xref: /linux/fs/xfs/libxfs/xfs_bmap.c (revision 825ec756afeeb082395ac6430e7b07e3a9997665)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_dir2.h"
17 #include "xfs_inode.h"
18 #include "xfs_btree.h"
19 #include "xfs_trans.h"
20 #include "xfs_alloc.h"
21 #include "xfs_bmap.h"
22 #include "xfs_bmap_util.h"
23 #include "xfs_bmap_btree.h"
24 #include "xfs_rtbitmap.h"
25 #include "xfs_errortag.h"
26 #include "xfs_error.h"
27 #include "xfs_quota.h"
28 #include "xfs_trans_space.h"
29 #include "xfs_buf_item.h"
30 #include "xfs_trace.h"
31 #include "xfs_attr_leaf.h"
32 #include "xfs_filestream.h"
33 #include "xfs_rmap.h"
34 #include "xfs_ag.h"
35 #include "xfs_ag_resv.h"
36 #include "xfs_refcount.h"
37 #include "xfs_icache.h"
38 #include "xfs_iomap.h"
39 #include "xfs_health.h"
40 #include "xfs_bmap_item.h"
41 #include "xfs_symlink_remote.h"
42 #include "xfs_inode_util.h"
43 
44 struct kmem_cache		*xfs_bmap_intent_cache;
45 
46 /*
47  * Miscellaneous helper functions
48  */
49 
50 /*
51  * Compute and fill in the value of the maximum depth of a bmap btree
52  * in this filesystem.  Done once, during mount.
53  */
54 void
xfs_bmap_compute_maxlevels(xfs_mount_t * mp,int whichfork)55 xfs_bmap_compute_maxlevels(
56 	xfs_mount_t	*mp,		/* file system mount structure */
57 	int		whichfork)	/* data or attr fork */
58 {
59 	uint64_t	maxblocks;	/* max blocks at this level */
60 	xfs_extnum_t	maxleafents;	/* max leaf entries possible */
61 	int		level;		/* btree level */
62 	int		maxrootrecs;	/* max records in root block */
63 	int		minleafrecs;	/* min records in leaf block */
64 	int		minnoderecs;	/* min records in node block */
65 	int		sz;		/* root block size */
66 
67 	/*
68 	 * The maximum number of extents in a fork, hence the maximum number of
69 	 * leaf entries, is controlled by the size of the on-disk extent count.
70 	 *
71 	 * Note that we can no longer assume that if we are in ATTR1 that the
72 	 * fork offset of all the inodes will be
73 	 * (xfs_default_attroffset(ip) >> 3) because we could have mounted with
74 	 * ATTR2 and then mounted back with ATTR1, keeping the i_forkoff's fixed
75 	 * but probably at various positions. Therefore, for both ATTR1 and
76 	 * ATTR2 we have to assume the worst case scenario of a minimum size
77 	 * available.
78 	 */
79 	maxleafents = xfs_iext_max_nextents(xfs_has_large_extent_counts(mp),
80 				whichfork);
81 	if (whichfork == XFS_DATA_FORK)
82 		sz = xfs_bmdr_space_calc(MINDBTPTRS);
83 	else
84 		sz = xfs_bmdr_space_calc(MINABTPTRS);
85 
86 	maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
87 	minleafrecs = mp->m_bmap_dmnr[0];
88 	minnoderecs = mp->m_bmap_dmnr[1];
89 	maxblocks = howmany_64(maxleafents, minleafrecs);
90 	for (level = 1; maxblocks > 1; level++) {
91 		if (maxblocks <= maxrootrecs)
92 			maxblocks = 1;
93 		else
94 			maxblocks = howmany_64(maxblocks, minnoderecs);
95 	}
96 	mp->m_bm_maxlevels[whichfork] = level;
97 	ASSERT(mp->m_bm_maxlevels[whichfork] <= xfs_bmbt_maxlevels_ondisk());
98 }
99 
100 unsigned int
xfs_bmap_compute_attr_offset(struct xfs_mount * mp)101 xfs_bmap_compute_attr_offset(
102 	struct xfs_mount	*mp)
103 {
104 	if (mp->m_sb.sb_inodesize == 256)
105 		return XFS_LITINO(mp) - xfs_bmdr_space_calc(MINABTPTRS);
106 	return xfs_bmdr_space_calc(6 * MINABTPTRS);
107 }
108 
109 STATIC int				/* error */
xfs_bmbt_lookup_eq(struct xfs_btree_cur * cur,struct xfs_bmbt_irec * irec,int * stat)110 xfs_bmbt_lookup_eq(
111 	struct xfs_btree_cur	*cur,
112 	struct xfs_bmbt_irec	*irec,
113 	int			*stat)	/* success/failure */
114 {
115 	cur->bc_rec.b = *irec;
116 	return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
117 }
118 
119 STATIC int				/* error */
xfs_bmbt_lookup_first(struct xfs_btree_cur * cur,int * stat)120 xfs_bmbt_lookup_first(
121 	struct xfs_btree_cur	*cur,
122 	int			*stat)	/* success/failure */
123 {
124 	cur->bc_rec.b.br_startoff = 0;
125 	cur->bc_rec.b.br_startblock = 0;
126 	cur->bc_rec.b.br_blockcount = 0;
127 	return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
128 }
129 
130 /*
131  * Check if the inode needs to be converted to btree format.
132  */
xfs_bmap_needs_btree(struct xfs_inode * ip,int whichfork)133 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
134 {
135 	struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
136 
137 	return whichfork != XFS_COW_FORK &&
138 		ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
139 		ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork);
140 }
141 
142 /*
143  * Check if the inode should be converted to extent format.
144  */
xfs_bmap_wants_extents(struct xfs_inode * ip,int whichfork)145 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
146 {
147 	struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
148 
149 	return whichfork != XFS_COW_FORK &&
150 		ifp->if_format == XFS_DINODE_FMT_BTREE &&
151 		ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork);
152 }
153 
154 /*
155  * Update the record referred to by cur to the value given by irec
156  * This either works (return 0) or gets an EFSCORRUPTED error.
157  */
158 STATIC int
xfs_bmbt_update(struct xfs_btree_cur * cur,struct xfs_bmbt_irec * irec)159 xfs_bmbt_update(
160 	struct xfs_btree_cur	*cur,
161 	struct xfs_bmbt_irec	*irec)
162 {
163 	union xfs_btree_rec	rec;
164 
165 	xfs_bmbt_disk_set_all(&rec.bmbt, irec);
166 	return xfs_btree_update(cur, &rec);
167 }
168 
169 /*
170  * Compute the worst-case number of indirect blocks that will be used
171  * for ip's delayed extent of length "len".
172  */
173 STATIC xfs_filblks_t
xfs_bmap_worst_indlen(xfs_inode_t * ip,xfs_filblks_t len)174 xfs_bmap_worst_indlen(
175 	xfs_inode_t	*ip,		/* incore inode pointer */
176 	xfs_filblks_t	len)		/* delayed extent length */
177 {
178 	int		level;		/* btree level number */
179 	int		maxrecs;	/* maximum record count at this level */
180 	xfs_mount_t	*mp;		/* mount structure */
181 	xfs_filblks_t	rval;		/* return value */
182 
183 	mp = ip->i_mount;
184 	maxrecs = mp->m_bmap_dmxr[0];
185 	for (level = 0, rval = 0;
186 	     level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
187 	     level++) {
188 		len += maxrecs - 1;
189 		do_div(len, maxrecs);
190 		rval += len;
191 		if (len == 1)
192 			return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
193 				level - 1;
194 		if (level == 0)
195 			maxrecs = mp->m_bmap_dmxr[1];
196 	}
197 	return rval;
198 }
199 
200 /*
201  * Calculate the default attribute fork offset for newly created inodes.
202  */
203 uint
xfs_default_attroffset(struct xfs_inode * ip)204 xfs_default_attroffset(
205 	struct xfs_inode	*ip)
206 {
207 	if (ip->i_df.if_format == XFS_DINODE_FMT_DEV)
208 		return roundup(sizeof(xfs_dev_t), 8);
209 	return M_IGEO(ip->i_mount)->attr_fork_offset;
210 }
211 
212 /*
213  * Helper routine to reset inode i_forkoff field when switching attribute fork
214  * from local to extent format - we reset it where possible to make space
215  * available for inline data fork extents.
216  */
217 STATIC void
xfs_bmap_forkoff_reset(xfs_inode_t * ip,int whichfork)218 xfs_bmap_forkoff_reset(
219 	xfs_inode_t	*ip,
220 	int		whichfork)
221 {
222 	if (whichfork == XFS_ATTR_FORK &&
223 	    ip->i_df.if_format != XFS_DINODE_FMT_DEV &&
224 	    ip->i_df.if_format != XFS_DINODE_FMT_BTREE) {
225 		uint	dfl_forkoff = xfs_default_attroffset(ip) >> 3;
226 
227 		if (dfl_forkoff > ip->i_forkoff)
228 			ip->i_forkoff = dfl_forkoff;
229 	}
230 }
231 
232 static int
xfs_bmap_read_buf(struct xfs_mount * mp,struct xfs_trans * tp,xfs_fsblock_t fsbno,struct xfs_buf ** bpp)233 xfs_bmap_read_buf(
234 	struct xfs_mount	*mp,		/* file system mount point */
235 	struct xfs_trans	*tp,		/* transaction pointer */
236 	xfs_fsblock_t		fsbno,		/* file system block number */
237 	struct xfs_buf		**bpp)		/* buffer for fsbno */
238 {
239 	struct xfs_buf		*bp;		/* return value */
240 	int			error;
241 
242 	if (!xfs_verify_fsbno(mp, fsbno))
243 		return -EFSCORRUPTED;
244 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
245 			XFS_FSB_TO_DADDR(mp, fsbno), mp->m_bsize, 0, &bp,
246 			&xfs_bmbt_buf_ops);
247 	if (!error) {
248 		xfs_buf_set_ref(bp, XFS_BMAP_BTREE_REF);
249 		*bpp = bp;
250 	}
251 	return error;
252 }
253 
254 #ifdef DEBUG
255 STATIC struct xfs_buf *
xfs_bmap_get_bp(struct xfs_btree_cur * cur,xfs_fsblock_t bno)256 xfs_bmap_get_bp(
257 	struct xfs_btree_cur	*cur,
258 	xfs_fsblock_t		bno)
259 {
260 	struct xfs_log_item	*lip;
261 	int			i;
262 
263 	if (!cur)
264 		return NULL;
265 
266 	for (i = 0; i < cur->bc_maxlevels; i++) {
267 		if (!cur->bc_levels[i].bp)
268 			break;
269 		if (xfs_buf_daddr(cur->bc_levels[i].bp) == bno)
270 			return cur->bc_levels[i].bp;
271 	}
272 
273 	/* Chase down all the log items to see if the bp is there */
274 	list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) {
275 		struct xfs_buf_log_item	*bip = (struct xfs_buf_log_item *)lip;
276 
277 		if (bip->bli_item.li_type == XFS_LI_BUF &&
278 		    xfs_buf_daddr(bip->bli_buf) == bno)
279 			return bip->bli_buf;
280 	}
281 
282 	return NULL;
283 }
284 
285 STATIC void
xfs_check_block(struct xfs_btree_block * block,xfs_mount_t * mp,int root,short sz)286 xfs_check_block(
287 	struct xfs_btree_block	*block,
288 	xfs_mount_t		*mp,
289 	int			root,
290 	short			sz)
291 {
292 	int			i, j, dmxr;
293 	__be64			*pp, *thispa;	/* pointer to block address */
294 	xfs_bmbt_key_t		*prevp, *keyp;
295 
296 	ASSERT(be16_to_cpu(block->bb_level) > 0);
297 
298 	prevp = NULL;
299 	for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
300 		dmxr = mp->m_bmap_dmxr[0];
301 		keyp = xfs_bmbt_key_addr(mp, block, i);
302 
303 		if (prevp) {
304 			ASSERT(be64_to_cpu(prevp->br_startoff) <
305 			       be64_to_cpu(keyp->br_startoff));
306 		}
307 		prevp = keyp;
308 
309 		/*
310 		 * Compare the block numbers to see if there are dups.
311 		 */
312 		if (root)
313 			pp = xfs_bmap_broot_ptr_addr(mp, block, i, sz);
314 		else
315 			pp = xfs_bmbt_ptr_addr(mp, block, i, dmxr);
316 
317 		for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
318 			if (root)
319 				thispa = xfs_bmap_broot_ptr_addr(mp, block, j, sz);
320 			else
321 				thispa = xfs_bmbt_ptr_addr(mp, block, j, dmxr);
322 			if (*thispa == *pp) {
323 				xfs_warn(mp, "%s: thispa(%d) == pp(%d) %lld",
324 					__func__, j, i,
325 					(unsigned long long)be64_to_cpu(*thispa));
326 				xfs_err(mp, "%s: ptrs are equal in node\n",
327 					__func__);
328 				xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
329 			}
330 		}
331 	}
332 }
333 
334 /*
335  * Check that the extents for the inode ip are in the right order in all
336  * btree leaves. THis becomes prohibitively expensive for large extent count
337  * files, so don't bother with inodes that have more than 10,000 extents in
338  * them. The btree record ordering checks will still be done, so for such large
339  * bmapbt constructs that is going to catch most corruptions.
340  */
341 STATIC void
xfs_bmap_check_leaf_extents(struct xfs_btree_cur * cur,xfs_inode_t * ip,int whichfork)342 xfs_bmap_check_leaf_extents(
343 	struct xfs_btree_cur	*cur,	/* btree cursor or null */
344 	xfs_inode_t		*ip,		/* incore inode pointer */
345 	int			whichfork)	/* data or attr fork */
346 {
347 	struct xfs_mount	*mp = ip->i_mount;
348 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
349 	struct xfs_btree_block	*block;	/* current btree block */
350 	xfs_fsblock_t		bno;	/* block # of "block" */
351 	struct xfs_buf		*bp;	/* buffer for "block" */
352 	int			error;	/* error return value */
353 	xfs_extnum_t		i=0, j;	/* index into the extents list */
354 	int			level;	/* btree level, for checking */
355 	__be64			*pp;	/* pointer to block address */
356 	xfs_bmbt_rec_t		*ep;	/* pointer to current extent */
357 	xfs_bmbt_rec_t		last = {0, 0}; /* last extent in prev block */
358 	xfs_bmbt_rec_t		*nextp;	/* pointer to next extent */
359 	int			bp_release = 0;
360 
361 	if (ifp->if_format != XFS_DINODE_FMT_BTREE)
362 		return;
363 
364 	/* skip large extent count inodes */
365 	if (ip->i_df.if_nextents > 10000)
366 		return;
367 
368 	bno = NULLFSBLOCK;
369 	block = ifp->if_broot;
370 	/*
371 	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
372 	 */
373 	level = be16_to_cpu(block->bb_level);
374 	ASSERT(level > 0);
375 	xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
376 	pp = xfs_bmap_broot_ptr_addr(mp, block, 1, ifp->if_broot_bytes);
377 	bno = be64_to_cpu(*pp);
378 
379 	ASSERT(bno != NULLFSBLOCK);
380 	ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
381 	ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
382 
383 	/*
384 	 * Go down the tree until leaf level is reached, following the first
385 	 * pointer (leftmost) at each level.
386 	 */
387 	while (level-- > 0) {
388 		/* See if buf is in cur first */
389 		bp_release = 0;
390 		bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
391 		if (!bp) {
392 			bp_release = 1;
393 			error = xfs_bmap_read_buf(mp, NULL, bno, &bp);
394 			if (xfs_metadata_is_sick(error))
395 				xfs_btree_mark_sick(cur);
396 			if (error)
397 				goto error_norelse;
398 		}
399 		block = XFS_BUF_TO_BLOCK(bp);
400 		if (level == 0)
401 			break;
402 
403 		/*
404 		 * Check this block for basic sanity (increasing keys and
405 		 * no duplicate blocks).
406 		 */
407 
408 		xfs_check_block(block, mp, 0, 0);
409 		pp = xfs_bmbt_ptr_addr(mp, block, 1, mp->m_bmap_dmxr[1]);
410 		bno = be64_to_cpu(*pp);
411 		if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, bno))) {
412 			xfs_btree_mark_sick(cur);
413 			error = -EFSCORRUPTED;
414 			goto error0;
415 		}
416 		if (bp_release) {
417 			bp_release = 0;
418 			xfs_trans_brelse(NULL, bp);
419 		}
420 	}
421 
422 	/*
423 	 * Here with bp and block set to the leftmost leaf node in the tree.
424 	 */
425 	i = 0;
426 
427 	/*
428 	 * Loop over all leaf nodes checking that all extents are in the right order.
429 	 */
430 	for (;;) {
431 		xfs_fsblock_t	nextbno;
432 		xfs_extnum_t	num_recs;
433 
434 
435 		num_recs = xfs_btree_get_numrecs(block);
436 
437 		/*
438 		 * Read-ahead the next leaf block, if any.
439 		 */
440 
441 		nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
442 
443 		/*
444 		 * Check all the extents to make sure they are OK.
445 		 * If we had a previous block, the last entry should
446 		 * conform with the first entry in this one.
447 		 */
448 
449 		ep = xfs_bmbt_rec_addr(mp, block, 1);
450 		if (i) {
451 			ASSERT(xfs_bmbt_disk_get_startoff(&last) +
452 			       xfs_bmbt_disk_get_blockcount(&last) <=
453 			       xfs_bmbt_disk_get_startoff(ep));
454 		}
455 		for (j = 1; j < num_recs; j++) {
456 			nextp = xfs_bmbt_rec_addr(mp, block, j + 1);
457 			ASSERT(xfs_bmbt_disk_get_startoff(ep) +
458 			       xfs_bmbt_disk_get_blockcount(ep) <=
459 			       xfs_bmbt_disk_get_startoff(nextp));
460 			ep = nextp;
461 		}
462 
463 		last = *ep;
464 		i += num_recs;
465 		if (bp_release) {
466 			bp_release = 0;
467 			xfs_trans_brelse(NULL, bp);
468 		}
469 		bno = nextbno;
470 		/*
471 		 * If we've reached the end, stop.
472 		 */
473 		if (bno == NULLFSBLOCK)
474 			break;
475 
476 		bp_release = 0;
477 		bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
478 		if (!bp) {
479 			bp_release = 1;
480 			error = xfs_bmap_read_buf(mp, NULL, bno, &bp);
481 			if (xfs_metadata_is_sick(error))
482 				xfs_btree_mark_sick(cur);
483 			if (error)
484 				goto error_norelse;
485 		}
486 		block = XFS_BUF_TO_BLOCK(bp);
487 	}
488 
489 	return;
490 
491 error0:
492 	xfs_warn(mp, "%s: at error0", __func__);
493 	if (bp_release)
494 		xfs_trans_brelse(NULL, bp);
495 error_norelse:
496 	xfs_warn(mp, "%s: BAD after btree leaves for %llu extents",
497 		__func__, i);
498 	xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__);
499 	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
500 	return;
501 }
502 
503 /*
504  * Validate that the bmbt_irecs being returned from bmapi are valid
505  * given the caller's original parameters.  Specifically check the
506  * ranges of the returned irecs to ensure that they only extend beyond
507  * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
508  */
509 STATIC void
xfs_bmap_validate_ret(xfs_fileoff_t bno,xfs_filblks_t len,uint32_t flags,xfs_bmbt_irec_t * mval,int nmap,int ret_nmap)510 xfs_bmap_validate_ret(
511 	xfs_fileoff_t		bno,
512 	xfs_filblks_t		len,
513 	uint32_t		flags,
514 	xfs_bmbt_irec_t		*mval,
515 	int			nmap,
516 	int			ret_nmap)
517 {
518 	int			i;		/* index to map values */
519 
520 	ASSERT(ret_nmap <= nmap);
521 
522 	for (i = 0; i < ret_nmap; i++) {
523 		ASSERT(mval[i].br_blockcount > 0);
524 		if (!(flags & XFS_BMAPI_ENTIRE)) {
525 			ASSERT(mval[i].br_startoff >= bno);
526 			ASSERT(mval[i].br_blockcount <= len);
527 			ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
528 			       bno + len);
529 		} else {
530 			ASSERT(mval[i].br_startoff < bno + len);
531 			ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
532 			       bno);
533 		}
534 		ASSERT(i == 0 ||
535 		       mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
536 		       mval[i].br_startoff);
537 		ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
538 		       mval[i].br_startblock != HOLESTARTBLOCK);
539 		ASSERT(mval[i].br_state == XFS_EXT_NORM ||
540 		       mval[i].br_state == XFS_EXT_UNWRITTEN);
541 	}
542 }
543 
544 #else
545 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork)		do { } while (0)
546 #define	xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)	do { } while (0)
547 #endif /* DEBUG */
548 
549 /*
550  * Inode fork format manipulation functions
551  */
552 
553 /*
554  * Convert the inode format to extent format if it currently is in btree format,
555  * but the extent list is small enough that it fits into the extent format.
556  *
557  * Since the extents are already in-core, all we have to do is give up the space
558  * for the btree root and pitch the leaf block.
559  */
560 STATIC int				/* error */
xfs_bmap_btree_to_extents(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_btree_cur * cur,int * logflagsp,int whichfork)561 xfs_bmap_btree_to_extents(
562 	struct xfs_trans	*tp,	/* transaction pointer */
563 	struct xfs_inode	*ip,	/* incore inode pointer */
564 	struct xfs_btree_cur	*cur,	/* btree cursor */
565 	int			*logflagsp, /* inode logging flags */
566 	int			whichfork)  /* data or attr fork */
567 {
568 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
569 	struct xfs_mount	*mp = ip->i_mount;
570 	struct xfs_btree_block	*rblock = ifp->if_broot;
571 	struct xfs_btree_block	*cblock;/* child btree block */
572 	xfs_fsblock_t		cbno;	/* child block number */
573 	struct xfs_buf		*cbp;	/* child block's buffer */
574 	int			error;	/* error return value */
575 	__be64			*pp;	/* ptr to block address */
576 	struct xfs_owner_info	oinfo;
577 
578 	/* check if we actually need the extent format first: */
579 	if (!xfs_bmap_wants_extents(ip, whichfork))
580 		return 0;
581 
582 	ASSERT(cur);
583 	ASSERT(whichfork != XFS_COW_FORK);
584 	ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
585 	ASSERT(be16_to_cpu(rblock->bb_level) == 1);
586 	ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
587 	ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, false) == 1);
588 
589 	pp = xfs_bmap_broot_ptr_addr(mp, rblock, 1, ifp->if_broot_bytes);
590 	cbno = be64_to_cpu(*pp);
591 #ifdef DEBUG
592 	if (XFS_IS_CORRUPT(cur->bc_mp, !xfs_verify_fsbno(mp, cbno))) {
593 		xfs_btree_mark_sick(cur);
594 		return -EFSCORRUPTED;
595 	}
596 #endif
597 	error = xfs_bmap_read_buf(mp, tp, cbno, &cbp);
598 	if (xfs_metadata_is_sick(error))
599 		xfs_btree_mark_sick(cur);
600 	if (error)
601 		return error;
602 	cblock = XFS_BUF_TO_BLOCK(cbp);
603 	if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
604 		return error;
605 
606 	xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
607 	error = xfs_free_extent_later(cur->bc_tp, cbno, 1, &oinfo,
608 			XFS_AG_RESV_NONE, 0);
609 	if (error)
610 		return error;
611 
612 	ip->i_nblocks--;
613 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
614 	xfs_trans_binval(tp, cbp);
615 	if (cur->bc_levels[0].bp == cbp)
616 		cur->bc_levels[0].bp = NULL;
617 	xfs_iroot_realloc(ip, -1, whichfork);
618 	ASSERT(ifp->if_broot == NULL);
619 	ifp->if_format = XFS_DINODE_FMT_EXTENTS;
620 	*logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
621 	return 0;
622 }
623 
624 /*
625  * Convert an extents-format file into a btree-format file.
626  * The new file will have a root block (in the inode) and a single child block.
627  */
628 STATIC int					/* error */
xfs_bmap_extents_to_btree(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_btree_cur ** curp,int wasdel,int * logflagsp,int whichfork)629 xfs_bmap_extents_to_btree(
630 	struct xfs_trans	*tp,		/* transaction pointer */
631 	struct xfs_inode	*ip,		/* incore inode pointer */
632 	struct xfs_btree_cur	**curp,		/* cursor returned to caller */
633 	int			wasdel,		/* converting a delayed alloc */
634 	int			*logflagsp,	/* inode logging flags */
635 	int			whichfork)	/* data or attr fork */
636 {
637 	struct xfs_btree_block	*ablock;	/* allocated (child) bt block */
638 	struct xfs_buf		*abp;		/* buffer for ablock */
639 	struct xfs_alloc_arg	args;		/* allocation arguments */
640 	struct xfs_bmbt_rec	*arp;		/* child record pointer */
641 	struct xfs_btree_block	*block;		/* btree root block */
642 	struct xfs_btree_cur	*cur;		/* bmap btree cursor */
643 	int			error;		/* error return value */
644 	struct xfs_ifork	*ifp;		/* inode fork pointer */
645 	struct xfs_bmbt_key	*kp;		/* root block key pointer */
646 	struct xfs_mount	*mp;		/* mount structure */
647 	xfs_bmbt_ptr_t		*pp;		/* root block address pointer */
648 	struct xfs_iext_cursor	icur;
649 	struct xfs_bmbt_irec	rec;
650 	xfs_extnum_t		cnt = 0;
651 
652 	mp = ip->i_mount;
653 	ASSERT(whichfork != XFS_COW_FORK);
654 	ifp = xfs_ifork_ptr(ip, whichfork);
655 	ASSERT(ifp->if_format == XFS_DINODE_FMT_EXTENTS);
656 
657 	/*
658 	 * Make space in the inode incore. This needs to be undone if we fail
659 	 * to expand the root.
660 	 */
661 	xfs_iroot_realloc(ip, 1, whichfork);
662 
663 	/*
664 	 * Fill in the root.
665 	 */
666 	block = ifp->if_broot;
667 	xfs_bmbt_init_block(ip, block, NULL, 1, 1);
668 	/*
669 	 * Need a cursor.  Can't allocate until bb_level is filled in.
670 	 */
671 	cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
672 	if (wasdel)
673 		cur->bc_flags |= XFS_BTREE_BMBT_WASDEL;
674 	/*
675 	 * Convert to a btree with two levels, one record in root.
676 	 */
677 	ifp->if_format = XFS_DINODE_FMT_BTREE;
678 	memset(&args, 0, sizeof(args));
679 	args.tp = tp;
680 	args.mp = mp;
681 	xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
682 
683 	args.minlen = args.maxlen = args.prod = 1;
684 	args.wasdel = wasdel;
685 	*logflagsp = 0;
686 	error = xfs_alloc_vextent_start_ag(&args,
687 				XFS_INO_TO_FSB(mp, ip->i_ino));
688 	if (error)
689 		goto out_root_realloc;
690 
691 	/*
692 	 * Allocation can't fail, the space was reserved.
693 	 */
694 	if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
695 		error = -ENOSPC;
696 		goto out_root_realloc;
697 	}
698 
699 	cur->bc_bmap.allocated++;
700 	ip->i_nblocks++;
701 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
702 	error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
703 			XFS_FSB_TO_DADDR(mp, args.fsbno),
704 			mp->m_bsize, 0, &abp);
705 	if (error)
706 		goto out_unreserve_dquot;
707 
708 	/*
709 	 * Fill in the child block.
710 	 */
711 	ablock = XFS_BUF_TO_BLOCK(abp);
712 	xfs_bmbt_init_block(ip, ablock, abp, 0, 0);
713 
714 	for_each_xfs_iext(ifp, &icur, &rec) {
715 		if (isnullstartblock(rec.br_startblock))
716 			continue;
717 		arp = xfs_bmbt_rec_addr(mp, ablock, 1 + cnt);
718 		xfs_bmbt_disk_set_all(arp, &rec);
719 		cnt++;
720 	}
721 	ASSERT(cnt == ifp->if_nextents);
722 	xfs_btree_set_numrecs(ablock, cnt);
723 
724 	/*
725 	 * Fill in the root key and pointer.
726 	 */
727 	kp = xfs_bmbt_key_addr(mp, block, 1);
728 	arp = xfs_bmbt_rec_addr(mp, ablock, 1);
729 	kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
730 	pp = xfs_bmbt_ptr_addr(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
731 						be16_to_cpu(block->bb_level)));
732 	*pp = cpu_to_be64(args.fsbno);
733 
734 	/*
735 	 * Do all this logging at the end so that
736 	 * the root is at the right level.
737 	 */
738 	xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
739 	xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
740 	ASSERT(*curp == NULL);
741 	*curp = cur;
742 	*logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
743 	return 0;
744 
745 out_unreserve_dquot:
746 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
747 out_root_realloc:
748 	xfs_iroot_realloc(ip, -1, whichfork);
749 	ifp->if_format = XFS_DINODE_FMT_EXTENTS;
750 	ASSERT(ifp->if_broot == NULL);
751 	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
752 
753 	return error;
754 }
755 
756 /*
757  * Convert a local file to an extents file.
758  * This code is out of bounds for data forks of regular files,
759  * since the file data needs to get logged so things will stay consistent.
760  * (The bmap-level manipulations are ok, though).
761  */
762 void
xfs_bmap_local_to_extents_empty(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork)763 xfs_bmap_local_to_extents_empty(
764 	struct xfs_trans	*tp,
765 	struct xfs_inode	*ip,
766 	int			whichfork)
767 {
768 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
769 
770 	ASSERT(whichfork != XFS_COW_FORK);
771 	ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
772 	ASSERT(ifp->if_bytes == 0);
773 	ASSERT(ifp->if_nextents == 0);
774 
775 	xfs_bmap_forkoff_reset(ip, whichfork);
776 	ifp->if_data = NULL;
777 	ifp->if_height = 0;
778 	ifp->if_format = XFS_DINODE_FMT_EXTENTS;
779 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
780 }
781 
782 
783 int					/* error */
xfs_bmap_local_to_extents(xfs_trans_t * tp,xfs_inode_t * ip,xfs_extlen_t total,int * logflagsp,int whichfork,void (* init_fn)(struct xfs_trans * tp,struct xfs_buf * bp,struct xfs_inode * ip,struct xfs_ifork * ifp,void * priv),void * priv)784 xfs_bmap_local_to_extents(
785 	xfs_trans_t	*tp,		/* transaction pointer */
786 	xfs_inode_t	*ip,		/* incore inode pointer */
787 	xfs_extlen_t	total,		/* total blocks needed by transaction */
788 	int		*logflagsp,	/* inode logging flags */
789 	int		whichfork,
790 	void		(*init_fn)(struct xfs_trans *tp,
791 				   struct xfs_buf *bp,
792 				   struct xfs_inode *ip,
793 				   struct xfs_ifork *ifp, void *priv),
794 	void		*priv)
795 {
796 	int		error = 0;
797 	int		flags;		/* logging flags returned */
798 	struct xfs_ifork *ifp;		/* inode fork pointer */
799 	xfs_alloc_arg_t	args;		/* allocation arguments */
800 	struct xfs_buf	*bp;		/* buffer for extent block */
801 	struct xfs_bmbt_irec rec;
802 	struct xfs_iext_cursor icur;
803 
804 	/*
805 	 * We don't want to deal with the case of keeping inode data inline yet.
806 	 * So sending the data fork of a regular inode is invalid.
807 	 */
808 	ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
809 	ifp = xfs_ifork_ptr(ip, whichfork);
810 	ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
811 
812 	if (!ifp->if_bytes) {
813 		xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
814 		flags = XFS_ILOG_CORE;
815 		goto done;
816 	}
817 
818 	flags = 0;
819 	error = 0;
820 	memset(&args, 0, sizeof(args));
821 	args.tp = tp;
822 	args.mp = ip->i_mount;
823 	args.total = total;
824 	args.minlen = args.maxlen = args.prod = 1;
825 	xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
826 
827 	/*
828 	 * Allocate a block.  We know we need only one, since the
829 	 * file currently fits in an inode.
830 	 */
831 	args.total = total;
832 	args.minlen = args.maxlen = args.prod = 1;
833 	error = xfs_alloc_vextent_start_ag(&args,
834 			XFS_INO_TO_FSB(args.mp, ip->i_ino));
835 	if (error)
836 		goto done;
837 
838 	/* Can't fail, the space was reserved. */
839 	ASSERT(args.fsbno != NULLFSBLOCK);
840 	ASSERT(args.len == 1);
841 	error = xfs_trans_get_buf(tp, args.mp->m_ddev_targp,
842 			XFS_FSB_TO_DADDR(args.mp, args.fsbno),
843 			args.mp->m_bsize, 0, &bp);
844 	if (error)
845 		goto done;
846 
847 	/*
848 	 * Initialize the block, copy the data and log the remote buffer.
849 	 *
850 	 * The callout is responsible for logging because the remote format
851 	 * might differ from the local format and thus we don't know how much to
852 	 * log here. Note that init_fn must also set the buffer log item type
853 	 * correctly.
854 	 */
855 	init_fn(tp, bp, ip, ifp, priv);
856 
857 	/* account for the change in fork size */
858 	xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
859 	xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
860 	flags |= XFS_ILOG_CORE;
861 
862 	ifp->if_data = NULL;
863 	ifp->if_height = 0;
864 
865 	rec.br_startoff = 0;
866 	rec.br_startblock = args.fsbno;
867 	rec.br_blockcount = 1;
868 	rec.br_state = XFS_EXT_NORM;
869 	xfs_iext_first(ifp, &icur);
870 	xfs_iext_insert(ip, &icur, &rec, 0);
871 
872 	ifp->if_nextents = 1;
873 	ip->i_nblocks = 1;
874 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
875 	flags |= xfs_ilog_fext(whichfork);
876 
877 done:
878 	*logflagsp = flags;
879 	return error;
880 }
881 
882 /*
883  * Called from xfs_bmap_add_attrfork to handle btree format files.
884  */
885 STATIC int					/* error */
xfs_bmap_add_attrfork_btree(xfs_trans_t * tp,xfs_inode_t * ip,int * flags)886 xfs_bmap_add_attrfork_btree(
887 	xfs_trans_t		*tp,		/* transaction pointer */
888 	xfs_inode_t		*ip,		/* incore inode pointer */
889 	int			*flags)		/* inode logging flags */
890 {
891 	struct xfs_btree_block	*block = ip->i_df.if_broot;
892 	struct xfs_btree_cur	*cur;		/* btree cursor */
893 	int			error;		/* error return value */
894 	xfs_mount_t		*mp;		/* file system mount struct */
895 	int			stat;		/* newroot status */
896 
897 	mp = ip->i_mount;
898 
899 	if (xfs_bmap_bmdr_space(block) <= xfs_inode_data_fork_size(ip))
900 		*flags |= XFS_ILOG_DBROOT;
901 	else {
902 		cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
903 		error = xfs_bmbt_lookup_first(cur, &stat);
904 		if (error)
905 			goto error0;
906 		/* must be at least one entry */
907 		if (XFS_IS_CORRUPT(mp, stat != 1)) {
908 			xfs_btree_mark_sick(cur);
909 			error = -EFSCORRUPTED;
910 			goto error0;
911 		}
912 		if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
913 			goto error0;
914 		if (stat == 0) {
915 			xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
916 			return -ENOSPC;
917 		}
918 		cur->bc_bmap.allocated = 0;
919 		xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
920 	}
921 	return 0;
922 error0:
923 	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
924 	return error;
925 }
926 
927 /*
928  * Called from xfs_bmap_add_attrfork to handle extents format files.
929  */
930 STATIC int					/* error */
xfs_bmap_add_attrfork_extents(struct xfs_trans * tp,struct xfs_inode * ip,int * flags)931 xfs_bmap_add_attrfork_extents(
932 	struct xfs_trans	*tp,		/* transaction pointer */
933 	struct xfs_inode	*ip,		/* incore inode pointer */
934 	int			*flags)		/* inode logging flags */
935 {
936 	struct xfs_btree_cur	*cur;		/* bmap btree cursor */
937 	int			error;		/* error return value */
938 
939 	if (ip->i_df.if_nextents * sizeof(struct xfs_bmbt_rec) <=
940 	    xfs_inode_data_fork_size(ip))
941 		return 0;
942 	cur = NULL;
943 	error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
944 					  XFS_DATA_FORK);
945 	if (cur) {
946 		cur->bc_bmap.allocated = 0;
947 		xfs_btree_del_cursor(cur, error);
948 	}
949 	return error;
950 }
951 
952 /*
953  * Called from xfs_bmap_add_attrfork to handle local format files. Each
954  * different data fork content type needs a different callout to do the
955  * conversion. Some are basic and only require special block initialisation
956  * callouts for the data formating, others (directories) are so specialised they
957  * handle everything themselves.
958  *
959  * XXX (dgc): investigate whether directory conversion can use the generic
960  * formatting callout. It should be possible - it's just a very complex
961  * formatter.
962  */
963 STATIC int					/* error */
xfs_bmap_add_attrfork_local(struct xfs_trans * tp,struct xfs_inode * ip,int * flags)964 xfs_bmap_add_attrfork_local(
965 	struct xfs_trans	*tp,		/* transaction pointer */
966 	struct xfs_inode	*ip,		/* incore inode pointer */
967 	int			*flags)		/* inode logging flags */
968 {
969 	struct xfs_da_args	dargs;		/* args for dir/attr code */
970 
971 	if (ip->i_df.if_bytes <= xfs_inode_data_fork_size(ip))
972 		return 0;
973 
974 	if (S_ISDIR(VFS_I(ip)->i_mode)) {
975 		memset(&dargs, 0, sizeof(dargs));
976 		dargs.geo = ip->i_mount->m_dir_geo;
977 		dargs.dp = ip;
978 		dargs.total = dargs.geo->fsbcount;
979 		dargs.whichfork = XFS_DATA_FORK;
980 		dargs.trans = tp;
981 		dargs.owner = ip->i_ino;
982 		return xfs_dir2_sf_to_block(&dargs);
983 	}
984 
985 	if (S_ISLNK(VFS_I(ip)->i_mode))
986 		return xfs_bmap_local_to_extents(tp, ip, 1, flags,
987 				XFS_DATA_FORK, xfs_symlink_local_to_remote,
988 				NULL);
989 
990 	/* should only be called for types that support local format data */
991 	ASSERT(0);
992 	xfs_bmap_mark_sick(ip, XFS_ATTR_FORK);
993 	return -EFSCORRUPTED;
994 }
995 
996 /*
997  * Set an inode attr fork offset based on the format of the data fork.
998  */
999 static int
xfs_bmap_set_attrforkoff(struct xfs_inode * ip,int size,int * version)1000 xfs_bmap_set_attrforkoff(
1001 	struct xfs_inode	*ip,
1002 	int			size,
1003 	int			*version)
1004 {
1005 	int			default_size = xfs_default_attroffset(ip) >> 3;
1006 
1007 	switch (ip->i_df.if_format) {
1008 	case XFS_DINODE_FMT_DEV:
1009 		ip->i_forkoff = default_size;
1010 		break;
1011 	case XFS_DINODE_FMT_LOCAL:
1012 	case XFS_DINODE_FMT_EXTENTS:
1013 	case XFS_DINODE_FMT_BTREE:
1014 		ip->i_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1015 		if (!ip->i_forkoff)
1016 			ip->i_forkoff = default_size;
1017 		else if (xfs_has_attr2(ip->i_mount) && version)
1018 			*version = 2;
1019 		break;
1020 	default:
1021 		ASSERT(0);
1022 		return -EINVAL;
1023 	}
1024 
1025 	return 0;
1026 }
1027 
1028 /*
1029  * Convert inode from non-attributed to attributed.  Caller must hold the
1030  * ILOCK_EXCL and the file cannot have an attr fork.
1031  */
1032 int						/* error code */
xfs_bmap_add_attrfork(struct xfs_trans * tp,struct xfs_inode * ip,int size,int rsvd)1033 xfs_bmap_add_attrfork(
1034 	struct xfs_trans	*tp,
1035 	struct xfs_inode	*ip,		/* incore inode pointer */
1036 	int			size,		/* space new attribute needs */
1037 	int			rsvd)		/* xact may use reserved blks */
1038 {
1039 	struct xfs_mount	*mp = tp->t_mountp;
1040 	int			version = 1;	/* superblock attr version */
1041 	int			logflags;	/* logging flags */
1042 	int			error;		/* error return value */
1043 
1044 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1045 	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1046 	ASSERT(!xfs_inode_has_attr_fork(ip));
1047 
1048 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1049 	error = xfs_bmap_set_attrforkoff(ip, size, &version);
1050 	if (error)
1051 		return error;
1052 
1053 	xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0);
1054 	logflags = 0;
1055 	switch (ip->i_df.if_format) {
1056 	case XFS_DINODE_FMT_LOCAL:
1057 		error = xfs_bmap_add_attrfork_local(tp, ip, &logflags);
1058 		break;
1059 	case XFS_DINODE_FMT_EXTENTS:
1060 		error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags);
1061 		break;
1062 	case XFS_DINODE_FMT_BTREE:
1063 		error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags);
1064 		break;
1065 	default:
1066 		error = 0;
1067 		break;
1068 	}
1069 	if (logflags)
1070 		xfs_trans_log_inode(tp, ip, logflags);
1071 	if (error)
1072 		return error;
1073 	if (!xfs_has_attr(mp) ||
1074 	   (!xfs_has_attr2(mp) && version == 2)) {
1075 		bool log_sb = false;
1076 
1077 		spin_lock(&mp->m_sb_lock);
1078 		if (!xfs_has_attr(mp)) {
1079 			xfs_add_attr(mp);
1080 			log_sb = true;
1081 		}
1082 		if (!xfs_has_attr2(mp) && version == 2) {
1083 			xfs_add_attr2(mp);
1084 			log_sb = true;
1085 		}
1086 		spin_unlock(&mp->m_sb_lock);
1087 		if (log_sb)
1088 			xfs_log_sb(tp);
1089 	}
1090 
1091 	return 0;
1092 }
1093 
1094 /*
1095  * Internal and external extent tree search functions.
1096  */
1097 
1098 struct xfs_iread_state {
1099 	struct xfs_iext_cursor	icur;
1100 	xfs_extnum_t		loaded;
1101 };
1102 
1103 int
xfs_bmap_complain_bad_rec(struct xfs_inode * ip,int whichfork,xfs_failaddr_t fa,const struct xfs_bmbt_irec * irec)1104 xfs_bmap_complain_bad_rec(
1105 	struct xfs_inode		*ip,
1106 	int				whichfork,
1107 	xfs_failaddr_t			fa,
1108 	const struct xfs_bmbt_irec	*irec)
1109 {
1110 	struct xfs_mount		*mp = ip->i_mount;
1111 	const char			*forkname;
1112 
1113 	switch (whichfork) {
1114 	case XFS_DATA_FORK:	forkname = "data"; break;
1115 	case XFS_ATTR_FORK:	forkname = "attr"; break;
1116 	case XFS_COW_FORK:	forkname = "CoW"; break;
1117 	default:		forkname = "???"; break;
1118 	}
1119 
1120 	xfs_warn(mp,
1121  "Bmap BTree record corruption in inode 0x%llx %s fork detected at %pS!",
1122 				ip->i_ino, forkname, fa);
1123 	xfs_warn(mp,
1124 		"Offset 0x%llx, start block 0x%llx, block count 0x%llx state 0x%x",
1125 		irec->br_startoff, irec->br_startblock, irec->br_blockcount,
1126 		irec->br_state);
1127 
1128 	return -EFSCORRUPTED;
1129 }
1130 
1131 /* Stuff every bmbt record from this block into the incore extent map. */
1132 static int
xfs_iread_bmbt_block(struct xfs_btree_cur * cur,int level,void * priv)1133 xfs_iread_bmbt_block(
1134 	struct xfs_btree_cur	*cur,
1135 	int			level,
1136 	void			*priv)
1137 {
1138 	struct xfs_iread_state	*ir = priv;
1139 	struct xfs_mount	*mp = cur->bc_mp;
1140 	struct xfs_inode	*ip = cur->bc_ino.ip;
1141 	struct xfs_btree_block	*block;
1142 	struct xfs_buf		*bp;
1143 	struct xfs_bmbt_rec	*frp;
1144 	xfs_extnum_t		num_recs;
1145 	xfs_extnum_t		j;
1146 	int			whichfork = cur->bc_ino.whichfork;
1147 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1148 
1149 	block = xfs_btree_get_block(cur, level, &bp);
1150 
1151 	/* Abort if we find more records than nextents. */
1152 	num_recs = xfs_btree_get_numrecs(block);
1153 	if (unlikely(ir->loaded + num_recs > ifp->if_nextents)) {
1154 		xfs_warn(ip->i_mount, "corrupt dinode %llu, (btree extents).",
1155 				(unsigned long long)ip->i_ino);
1156 		xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, block,
1157 				sizeof(*block), __this_address);
1158 		xfs_bmap_mark_sick(ip, whichfork);
1159 		return -EFSCORRUPTED;
1160 	}
1161 
1162 	/* Copy records into the incore cache. */
1163 	frp = xfs_bmbt_rec_addr(mp, block, 1);
1164 	for (j = 0; j < num_recs; j++, frp++, ir->loaded++) {
1165 		struct xfs_bmbt_irec	new;
1166 		xfs_failaddr_t		fa;
1167 
1168 		xfs_bmbt_disk_get_all(frp, &new);
1169 		fa = xfs_bmap_validate_extent(ip, whichfork, &new);
1170 		if (fa) {
1171 			xfs_inode_verifier_error(ip, -EFSCORRUPTED,
1172 					"xfs_iread_extents(2)", frp,
1173 					sizeof(*frp), fa);
1174 			xfs_bmap_mark_sick(ip, whichfork);
1175 			return xfs_bmap_complain_bad_rec(ip, whichfork, fa,
1176 					&new);
1177 		}
1178 		xfs_iext_insert(ip, &ir->icur, &new,
1179 				xfs_bmap_fork_to_state(whichfork));
1180 		trace_xfs_read_extent(ip, &ir->icur,
1181 				xfs_bmap_fork_to_state(whichfork), _THIS_IP_);
1182 		xfs_iext_next(ifp, &ir->icur);
1183 	}
1184 
1185 	return 0;
1186 }
1187 
1188 /*
1189  * Read in extents from a btree-format inode.
1190  */
1191 int
xfs_iread_extents(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork)1192 xfs_iread_extents(
1193 	struct xfs_trans	*tp,
1194 	struct xfs_inode	*ip,
1195 	int			whichfork)
1196 {
1197 	struct xfs_iread_state	ir;
1198 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1199 	struct xfs_mount	*mp = ip->i_mount;
1200 	struct xfs_btree_cur	*cur;
1201 	int			error;
1202 
1203 	if (!xfs_need_iread_extents(ifp))
1204 		return 0;
1205 
1206 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1207 
1208 	ir.loaded = 0;
1209 	xfs_iext_first(ifp, &ir.icur);
1210 	cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
1211 	error = xfs_btree_visit_blocks(cur, xfs_iread_bmbt_block,
1212 			XFS_BTREE_VISIT_RECORDS, &ir);
1213 	xfs_btree_del_cursor(cur, error);
1214 	if (error)
1215 		goto out;
1216 
1217 	if (XFS_IS_CORRUPT(mp, ir.loaded != ifp->if_nextents)) {
1218 		xfs_bmap_mark_sick(ip, whichfork);
1219 		error = -EFSCORRUPTED;
1220 		goto out;
1221 	}
1222 	ASSERT(ir.loaded == xfs_iext_count(ifp));
1223 	/*
1224 	 * Use release semantics so that we can use acquire semantics in
1225 	 * xfs_need_iread_extents and be guaranteed to see a valid mapping tree
1226 	 * after that load.
1227 	 */
1228 	smp_store_release(&ifp->if_needextents, 0);
1229 	return 0;
1230 out:
1231 	if (xfs_metadata_is_sick(error))
1232 		xfs_bmap_mark_sick(ip, whichfork);
1233 	xfs_iext_destroy(ifp);
1234 	return error;
1235 }
1236 
1237 /*
1238  * Returns the relative block number of the first unused block(s) in the given
1239  * fork with at least "len" logically contiguous blocks free.  This is the
1240  * lowest-address hole if the fork has holes, else the first block past the end
1241  * of fork.  Return 0 if the fork is currently local (in-inode).
1242  */
1243 int						/* error */
xfs_bmap_first_unused(struct xfs_trans * tp,struct xfs_inode * ip,xfs_extlen_t len,xfs_fileoff_t * first_unused,int whichfork)1244 xfs_bmap_first_unused(
1245 	struct xfs_trans	*tp,		/* transaction pointer */
1246 	struct xfs_inode	*ip,		/* incore inode */
1247 	xfs_extlen_t		len,		/* size of hole to find */
1248 	xfs_fileoff_t		*first_unused,	/* unused block */
1249 	int			whichfork)	/* data or attr fork */
1250 {
1251 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1252 	struct xfs_bmbt_irec	got;
1253 	struct xfs_iext_cursor	icur;
1254 	xfs_fileoff_t		lastaddr = 0;
1255 	xfs_fileoff_t		lowest, max;
1256 	int			error;
1257 
1258 	if (ifp->if_format == XFS_DINODE_FMT_LOCAL) {
1259 		*first_unused = 0;
1260 		return 0;
1261 	}
1262 
1263 	ASSERT(xfs_ifork_has_extents(ifp));
1264 
1265 	error = xfs_iread_extents(tp, ip, whichfork);
1266 	if (error)
1267 		return error;
1268 
1269 	lowest = max = *first_unused;
1270 	for_each_xfs_iext(ifp, &icur, &got) {
1271 		/*
1272 		 * See if the hole before this extent will work.
1273 		 */
1274 		if (got.br_startoff >= lowest + len &&
1275 		    got.br_startoff - max >= len)
1276 			break;
1277 		lastaddr = got.br_startoff + got.br_blockcount;
1278 		max = XFS_FILEOFF_MAX(lastaddr, lowest);
1279 	}
1280 
1281 	*first_unused = max;
1282 	return 0;
1283 }
1284 
1285 /*
1286  * Returns the file-relative block number of the last block - 1 before
1287  * last_block (input value) in the file.
1288  * This is not based on i_size, it is based on the extent records.
1289  * Returns 0 for local files, as they do not have extent records.
1290  */
1291 int						/* error */
xfs_bmap_last_before(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t * last_block,int whichfork)1292 xfs_bmap_last_before(
1293 	struct xfs_trans	*tp,		/* transaction pointer */
1294 	struct xfs_inode	*ip,		/* incore inode */
1295 	xfs_fileoff_t		*last_block,	/* last block */
1296 	int			whichfork)	/* data or attr fork */
1297 {
1298 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1299 	struct xfs_bmbt_irec	got;
1300 	struct xfs_iext_cursor	icur;
1301 	int			error;
1302 
1303 	switch (ifp->if_format) {
1304 	case XFS_DINODE_FMT_LOCAL:
1305 		*last_block = 0;
1306 		return 0;
1307 	case XFS_DINODE_FMT_BTREE:
1308 	case XFS_DINODE_FMT_EXTENTS:
1309 		break;
1310 	default:
1311 		ASSERT(0);
1312 		xfs_bmap_mark_sick(ip, whichfork);
1313 		return -EFSCORRUPTED;
1314 	}
1315 
1316 	error = xfs_iread_extents(tp, ip, whichfork);
1317 	if (error)
1318 		return error;
1319 
1320 	if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got))
1321 		*last_block = 0;
1322 	return 0;
1323 }
1324 
1325 int
xfs_bmap_last_extent(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * rec,int * is_empty)1326 xfs_bmap_last_extent(
1327 	struct xfs_trans	*tp,
1328 	struct xfs_inode	*ip,
1329 	int			whichfork,
1330 	struct xfs_bmbt_irec	*rec,
1331 	int			*is_empty)
1332 {
1333 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1334 	struct xfs_iext_cursor	icur;
1335 	int			error;
1336 
1337 	error = xfs_iread_extents(tp, ip, whichfork);
1338 	if (error)
1339 		return error;
1340 
1341 	xfs_iext_last(ifp, &icur);
1342 	if (!xfs_iext_get_extent(ifp, &icur, rec))
1343 		*is_empty = 1;
1344 	else
1345 		*is_empty = 0;
1346 	return 0;
1347 }
1348 
1349 /*
1350  * Check the last inode extent to determine whether this allocation will result
1351  * in blocks being allocated at the end of the file. When we allocate new data
1352  * blocks at the end of the file which do not start at the previous data block,
1353  * we will try to align the new blocks at stripe unit boundaries.
1354  *
1355  * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1356  * at, or past the EOF.
1357  */
1358 STATIC int
xfs_bmap_isaeof(struct xfs_bmalloca * bma,int whichfork)1359 xfs_bmap_isaeof(
1360 	struct xfs_bmalloca	*bma,
1361 	int			whichfork)
1362 {
1363 	struct xfs_bmbt_irec	rec;
1364 	int			is_empty;
1365 	int			error;
1366 
1367 	bma->aeof = false;
1368 	error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1369 				     &is_empty);
1370 	if (error)
1371 		return error;
1372 
1373 	if (is_empty) {
1374 		bma->aeof = true;
1375 		return 0;
1376 	}
1377 
1378 	/*
1379 	 * Check if we are allocation or past the last extent, or at least into
1380 	 * the last delayed allocated extent.
1381 	 */
1382 	bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1383 		(bma->offset >= rec.br_startoff &&
1384 		 isnullstartblock(rec.br_startblock));
1385 	return 0;
1386 }
1387 
1388 /*
1389  * Returns the file-relative block number of the first block past eof in
1390  * the file.  This is not based on i_size, it is based on the extent records.
1391  * Returns 0 for local files, as they do not have extent records.
1392  */
1393 int
xfs_bmap_last_offset(struct xfs_inode * ip,xfs_fileoff_t * last_block,int whichfork)1394 xfs_bmap_last_offset(
1395 	struct xfs_inode	*ip,
1396 	xfs_fileoff_t		*last_block,
1397 	int			whichfork)
1398 {
1399 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1400 	struct xfs_bmbt_irec	rec;
1401 	int			is_empty;
1402 	int			error;
1403 
1404 	*last_block = 0;
1405 
1406 	if (ifp->if_format == XFS_DINODE_FMT_LOCAL)
1407 		return 0;
1408 
1409 	if (XFS_IS_CORRUPT(ip->i_mount, !xfs_ifork_has_extents(ifp))) {
1410 		xfs_bmap_mark_sick(ip, whichfork);
1411 		return -EFSCORRUPTED;
1412 	}
1413 
1414 	error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1415 	if (error || is_empty)
1416 		return error;
1417 
1418 	*last_block = rec.br_startoff + rec.br_blockcount;
1419 	return 0;
1420 }
1421 
1422 /*
1423  * Extent tree manipulation functions used during allocation.
1424  */
1425 
1426 /*
1427  * Convert a delayed allocation to a real allocation.
1428  */
1429 STATIC int				/* error */
xfs_bmap_add_extent_delay_real(struct xfs_bmalloca * bma,int whichfork)1430 xfs_bmap_add_extent_delay_real(
1431 	struct xfs_bmalloca	*bma,
1432 	int			whichfork)
1433 {
1434 	struct xfs_mount	*mp = bma->ip->i_mount;
1435 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
1436 	struct xfs_bmbt_irec	*new = &bma->got;
1437 	int			error;	/* error return value */
1438 	int			i;	/* temp state */
1439 	xfs_fileoff_t		new_endoff;	/* end offset of new entry */
1440 	xfs_bmbt_irec_t		r[3];	/* neighbor extent entries */
1441 					/* left is 0, right is 1, prev is 2 */
1442 	int			rval=0;	/* return value (logging flags) */
1443 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
1444 	xfs_filblks_t		da_new; /* new count del alloc blocks used */
1445 	xfs_filblks_t		da_old; /* old count del alloc blocks used */
1446 	xfs_filblks_t		temp=0;	/* value for da_new calculations */
1447 	int			tmp_rval;	/* partial logging flags */
1448 	struct xfs_bmbt_irec	old;
1449 
1450 	ASSERT(whichfork != XFS_ATTR_FORK);
1451 	ASSERT(!isnullstartblock(new->br_startblock));
1452 	ASSERT(!bma->cur || (bma->cur->bc_flags & XFS_BTREE_BMBT_WASDEL));
1453 
1454 	XFS_STATS_INC(mp, xs_add_exlist);
1455 
1456 #define	LEFT		r[0]
1457 #define	RIGHT		r[1]
1458 #define	PREV		r[2]
1459 
1460 	/*
1461 	 * Set up a bunch of variables to make the tests simpler.
1462 	 */
1463 	xfs_iext_get_extent(ifp, &bma->icur, &PREV);
1464 	new_endoff = new->br_startoff + new->br_blockcount;
1465 	ASSERT(isnullstartblock(PREV.br_startblock));
1466 	ASSERT(PREV.br_startoff <= new->br_startoff);
1467 	ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1468 
1469 	da_old = startblockval(PREV.br_startblock);
1470 	da_new = 0;
1471 
1472 	/*
1473 	 * Set flags determining what part of the previous delayed allocation
1474 	 * extent is being replaced by a real allocation.
1475 	 */
1476 	if (PREV.br_startoff == new->br_startoff)
1477 		state |= BMAP_LEFT_FILLING;
1478 	if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1479 		state |= BMAP_RIGHT_FILLING;
1480 
1481 	/*
1482 	 * Check and set flags if this segment has a left neighbor.
1483 	 * Don't set contiguous if the combined extent would be too large.
1484 	 */
1485 	if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) {
1486 		state |= BMAP_LEFT_VALID;
1487 		if (isnullstartblock(LEFT.br_startblock))
1488 			state |= BMAP_LEFT_DELAY;
1489 	}
1490 
1491 	if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1492 	    LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1493 	    LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1494 	    LEFT.br_state == new->br_state &&
1495 	    LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
1496 		state |= BMAP_LEFT_CONTIG;
1497 
1498 	/*
1499 	 * Check and set flags if this segment has a right neighbor.
1500 	 * Don't set contiguous if the combined extent would be too large.
1501 	 * Also check for all-three-contiguous being too large.
1502 	 */
1503 	if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) {
1504 		state |= BMAP_RIGHT_VALID;
1505 		if (isnullstartblock(RIGHT.br_startblock))
1506 			state |= BMAP_RIGHT_DELAY;
1507 	}
1508 
1509 	if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1510 	    new_endoff == RIGHT.br_startoff &&
1511 	    new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1512 	    new->br_state == RIGHT.br_state &&
1513 	    new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
1514 	    ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1515 		       BMAP_RIGHT_FILLING)) !=
1516 		      (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1517 		       BMAP_RIGHT_FILLING) ||
1518 	     LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1519 			<= XFS_MAX_BMBT_EXTLEN))
1520 		state |= BMAP_RIGHT_CONTIG;
1521 
1522 	error = 0;
1523 	/*
1524 	 * Switch out based on the FILLING and CONTIG state bits.
1525 	 */
1526 	switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1527 			 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1528 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1529 	     BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1530 		/*
1531 		 * Filling in all of a previously delayed allocation extent.
1532 		 * The left and right neighbors are both contiguous with new.
1533 		 */
1534 		LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
1535 
1536 		xfs_iext_remove(bma->ip, &bma->icur, state);
1537 		xfs_iext_remove(bma->ip, &bma->icur, state);
1538 		xfs_iext_prev(ifp, &bma->icur);
1539 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1540 		ifp->if_nextents--;
1541 
1542 		if (bma->cur == NULL)
1543 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1544 		else {
1545 			rval = XFS_ILOG_CORE;
1546 			error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1547 			if (error)
1548 				goto done;
1549 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1550 				xfs_btree_mark_sick(bma->cur);
1551 				error = -EFSCORRUPTED;
1552 				goto done;
1553 			}
1554 			error = xfs_btree_delete(bma->cur, &i);
1555 			if (error)
1556 				goto done;
1557 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1558 				xfs_btree_mark_sick(bma->cur);
1559 				error = -EFSCORRUPTED;
1560 				goto done;
1561 			}
1562 			error = xfs_btree_decrement(bma->cur, 0, &i);
1563 			if (error)
1564 				goto done;
1565 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1566 				xfs_btree_mark_sick(bma->cur);
1567 				error = -EFSCORRUPTED;
1568 				goto done;
1569 			}
1570 			error = xfs_bmbt_update(bma->cur, &LEFT);
1571 			if (error)
1572 				goto done;
1573 		}
1574 		ASSERT(da_new <= da_old);
1575 		break;
1576 
1577 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1578 		/*
1579 		 * Filling in all of a previously delayed allocation extent.
1580 		 * The left neighbor is contiguous, the right is not.
1581 		 */
1582 		old = LEFT;
1583 		LEFT.br_blockcount += PREV.br_blockcount;
1584 
1585 		xfs_iext_remove(bma->ip, &bma->icur, state);
1586 		xfs_iext_prev(ifp, &bma->icur);
1587 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1588 
1589 		if (bma->cur == NULL)
1590 			rval = XFS_ILOG_DEXT;
1591 		else {
1592 			rval = 0;
1593 			error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1594 			if (error)
1595 				goto done;
1596 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1597 				xfs_btree_mark_sick(bma->cur);
1598 				error = -EFSCORRUPTED;
1599 				goto done;
1600 			}
1601 			error = xfs_bmbt_update(bma->cur, &LEFT);
1602 			if (error)
1603 				goto done;
1604 		}
1605 		ASSERT(da_new <= da_old);
1606 		break;
1607 
1608 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1609 		/*
1610 		 * Filling in all of a previously delayed allocation extent.
1611 		 * The right neighbor is contiguous, the left is not. Take care
1612 		 * with delay -> unwritten extent allocation here because the
1613 		 * delalloc record we are overwriting is always written.
1614 		 */
1615 		PREV.br_startblock = new->br_startblock;
1616 		PREV.br_blockcount += RIGHT.br_blockcount;
1617 		PREV.br_state = new->br_state;
1618 
1619 		xfs_iext_next(ifp, &bma->icur);
1620 		xfs_iext_remove(bma->ip, &bma->icur, state);
1621 		xfs_iext_prev(ifp, &bma->icur);
1622 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1623 
1624 		if (bma->cur == NULL)
1625 			rval = XFS_ILOG_DEXT;
1626 		else {
1627 			rval = 0;
1628 			error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1629 			if (error)
1630 				goto done;
1631 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1632 				xfs_btree_mark_sick(bma->cur);
1633 				error = -EFSCORRUPTED;
1634 				goto done;
1635 			}
1636 			error = xfs_bmbt_update(bma->cur, &PREV);
1637 			if (error)
1638 				goto done;
1639 		}
1640 		ASSERT(da_new <= da_old);
1641 		break;
1642 
1643 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1644 		/*
1645 		 * Filling in all of a previously delayed allocation extent.
1646 		 * Neither the left nor right neighbors are contiguous with
1647 		 * the new one.
1648 		 */
1649 		PREV.br_startblock = new->br_startblock;
1650 		PREV.br_state = new->br_state;
1651 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1652 		ifp->if_nextents++;
1653 
1654 		if (bma->cur == NULL)
1655 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1656 		else {
1657 			rval = XFS_ILOG_CORE;
1658 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1659 			if (error)
1660 				goto done;
1661 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1662 				xfs_btree_mark_sick(bma->cur);
1663 				error = -EFSCORRUPTED;
1664 				goto done;
1665 			}
1666 			error = xfs_btree_insert(bma->cur, &i);
1667 			if (error)
1668 				goto done;
1669 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1670 				xfs_btree_mark_sick(bma->cur);
1671 				error = -EFSCORRUPTED;
1672 				goto done;
1673 			}
1674 		}
1675 		ASSERT(da_new <= da_old);
1676 		break;
1677 
1678 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1679 		/*
1680 		 * Filling in the first part of a previous delayed allocation.
1681 		 * The left neighbor is contiguous.
1682 		 */
1683 		old = LEFT;
1684 		temp = PREV.br_blockcount - new->br_blockcount;
1685 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1686 				startblockval(PREV.br_startblock));
1687 
1688 		LEFT.br_blockcount += new->br_blockcount;
1689 
1690 		PREV.br_blockcount = temp;
1691 		PREV.br_startoff += new->br_blockcount;
1692 		PREV.br_startblock = nullstartblock(da_new);
1693 
1694 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1695 		xfs_iext_prev(ifp, &bma->icur);
1696 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1697 
1698 		if (bma->cur == NULL)
1699 			rval = XFS_ILOG_DEXT;
1700 		else {
1701 			rval = 0;
1702 			error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1703 			if (error)
1704 				goto done;
1705 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1706 				xfs_btree_mark_sick(bma->cur);
1707 				error = -EFSCORRUPTED;
1708 				goto done;
1709 			}
1710 			error = xfs_bmbt_update(bma->cur, &LEFT);
1711 			if (error)
1712 				goto done;
1713 		}
1714 		ASSERT(da_new <= da_old);
1715 		break;
1716 
1717 	case BMAP_LEFT_FILLING:
1718 		/*
1719 		 * Filling in the first part of a previous delayed allocation.
1720 		 * The left neighbor is not contiguous.
1721 		 */
1722 		xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1723 		ifp->if_nextents++;
1724 
1725 		if (bma->cur == NULL)
1726 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1727 		else {
1728 			rval = XFS_ILOG_CORE;
1729 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1730 			if (error)
1731 				goto done;
1732 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1733 				xfs_btree_mark_sick(bma->cur);
1734 				error = -EFSCORRUPTED;
1735 				goto done;
1736 			}
1737 			error = xfs_btree_insert(bma->cur, &i);
1738 			if (error)
1739 				goto done;
1740 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1741 				xfs_btree_mark_sick(bma->cur);
1742 				error = -EFSCORRUPTED;
1743 				goto done;
1744 			}
1745 		}
1746 
1747 		if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1748 			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1749 					&bma->cur, 1, &tmp_rval, whichfork);
1750 			rval |= tmp_rval;
1751 			if (error)
1752 				goto done;
1753 		}
1754 
1755 		temp = PREV.br_blockcount - new->br_blockcount;
1756 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1757 			startblockval(PREV.br_startblock) -
1758 			(bma->cur ? bma->cur->bc_bmap.allocated : 0));
1759 
1760 		PREV.br_startoff = new_endoff;
1761 		PREV.br_blockcount = temp;
1762 		PREV.br_startblock = nullstartblock(da_new);
1763 		xfs_iext_next(ifp, &bma->icur);
1764 		xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1765 		xfs_iext_prev(ifp, &bma->icur);
1766 		break;
1767 
1768 	case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1769 		/*
1770 		 * Filling in the last part of a previous delayed allocation.
1771 		 * The right neighbor is contiguous with the new allocation.
1772 		 */
1773 		old = RIGHT;
1774 		RIGHT.br_startoff = new->br_startoff;
1775 		RIGHT.br_startblock = new->br_startblock;
1776 		RIGHT.br_blockcount += new->br_blockcount;
1777 
1778 		if (bma->cur == NULL)
1779 			rval = XFS_ILOG_DEXT;
1780 		else {
1781 			rval = 0;
1782 			error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1783 			if (error)
1784 				goto done;
1785 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1786 				xfs_btree_mark_sick(bma->cur);
1787 				error = -EFSCORRUPTED;
1788 				goto done;
1789 			}
1790 			error = xfs_bmbt_update(bma->cur, &RIGHT);
1791 			if (error)
1792 				goto done;
1793 		}
1794 
1795 		temp = PREV.br_blockcount - new->br_blockcount;
1796 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1797 			startblockval(PREV.br_startblock));
1798 
1799 		PREV.br_blockcount = temp;
1800 		PREV.br_startblock = nullstartblock(da_new);
1801 
1802 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1803 		xfs_iext_next(ifp, &bma->icur);
1804 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT);
1805 		ASSERT(da_new <= da_old);
1806 		break;
1807 
1808 	case BMAP_RIGHT_FILLING:
1809 		/*
1810 		 * Filling in the last part of a previous delayed allocation.
1811 		 * The right neighbor is not contiguous.
1812 		 */
1813 		xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1814 		ifp->if_nextents++;
1815 
1816 		if (bma->cur == NULL)
1817 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1818 		else {
1819 			rval = XFS_ILOG_CORE;
1820 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1821 			if (error)
1822 				goto done;
1823 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1824 				xfs_btree_mark_sick(bma->cur);
1825 				error = -EFSCORRUPTED;
1826 				goto done;
1827 			}
1828 			error = xfs_btree_insert(bma->cur, &i);
1829 			if (error)
1830 				goto done;
1831 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1832 				xfs_btree_mark_sick(bma->cur);
1833 				error = -EFSCORRUPTED;
1834 				goto done;
1835 			}
1836 		}
1837 
1838 		if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1839 			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1840 				&bma->cur, 1, &tmp_rval, whichfork);
1841 			rval |= tmp_rval;
1842 			if (error)
1843 				goto done;
1844 		}
1845 
1846 		temp = PREV.br_blockcount - new->br_blockcount;
1847 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1848 			startblockval(PREV.br_startblock) -
1849 			(bma->cur ? bma->cur->bc_bmap.allocated : 0));
1850 
1851 		PREV.br_startblock = nullstartblock(da_new);
1852 		PREV.br_blockcount = temp;
1853 		xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1854 		xfs_iext_next(ifp, &bma->icur);
1855 		ASSERT(da_new <= da_old);
1856 		break;
1857 
1858 	case 0:
1859 		/*
1860 		 * Filling in the middle part of a previous delayed allocation.
1861 		 * Contiguity is impossible here.
1862 		 * This case is avoided almost all the time.
1863 		 *
1864 		 * We start with a delayed allocation:
1865 		 *
1866 		 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
1867 		 *  PREV @ idx
1868 		 *
1869 	         * and we are allocating:
1870 		 *                     +rrrrrrrrrrrrrrrrr+
1871 		 *			      new
1872 		 *
1873 		 * and we set it up for insertion as:
1874 		 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
1875 		 *                            new
1876 		 *  PREV @ idx          LEFT              RIGHT
1877 		 *                      inserted at idx + 1
1878 		 */
1879 		old = PREV;
1880 
1881 		/* LEFT is the new middle */
1882 		LEFT = *new;
1883 
1884 		/* RIGHT is the new right */
1885 		RIGHT.br_state = PREV.br_state;
1886 		RIGHT.br_startoff = new_endoff;
1887 		RIGHT.br_blockcount =
1888 			PREV.br_startoff + PREV.br_blockcount - new_endoff;
1889 		RIGHT.br_startblock =
1890 			nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1891 					RIGHT.br_blockcount));
1892 
1893 		/* truncate PREV */
1894 		PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
1895 		PREV.br_startblock =
1896 			nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1897 					PREV.br_blockcount));
1898 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1899 
1900 		xfs_iext_next(ifp, &bma->icur);
1901 		xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state);
1902 		xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state);
1903 		ifp->if_nextents++;
1904 
1905 		if (bma->cur == NULL)
1906 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1907 		else {
1908 			rval = XFS_ILOG_CORE;
1909 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1910 			if (error)
1911 				goto done;
1912 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1913 				xfs_btree_mark_sick(bma->cur);
1914 				error = -EFSCORRUPTED;
1915 				goto done;
1916 			}
1917 			error = xfs_btree_insert(bma->cur, &i);
1918 			if (error)
1919 				goto done;
1920 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1921 				xfs_btree_mark_sick(bma->cur);
1922 				error = -EFSCORRUPTED;
1923 				goto done;
1924 			}
1925 		}
1926 
1927 		if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1928 			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1929 					&bma->cur, 1, &tmp_rval, whichfork);
1930 			rval |= tmp_rval;
1931 			if (error)
1932 				goto done;
1933 		}
1934 
1935 		da_new = startblockval(PREV.br_startblock) +
1936 			 startblockval(RIGHT.br_startblock);
1937 		break;
1938 
1939 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1940 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1941 	case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1942 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1943 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1944 	case BMAP_LEFT_CONTIG:
1945 	case BMAP_RIGHT_CONTIG:
1946 		/*
1947 		 * These cases are all impossible.
1948 		 */
1949 		ASSERT(0);
1950 	}
1951 
1952 	/* add reverse mapping unless caller opted out */
1953 	if (!(bma->flags & XFS_BMAPI_NORMAP))
1954 		xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new);
1955 
1956 	/* convert to a btree if necessary */
1957 	if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1958 		int	tmp_logflags;	/* partial log flag return val */
1959 
1960 		ASSERT(bma->cur == NULL);
1961 		error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1962 				&bma->cur, da_old > 0, &tmp_logflags,
1963 				whichfork);
1964 		bma->logflags |= tmp_logflags;
1965 		if (error)
1966 			goto done;
1967 	}
1968 
1969 	if (da_new != da_old)
1970 		xfs_mod_delalloc(bma->ip, 0, (int64_t)da_new - da_old);
1971 
1972 	if (bma->cur) {
1973 		da_new += bma->cur->bc_bmap.allocated;
1974 		bma->cur->bc_bmap.allocated = 0;
1975 	}
1976 
1977 	/* adjust for changes in reserved delayed indirect blocks */
1978 	if (da_new < da_old)
1979 		xfs_add_fdblocks(mp, da_old - da_new);
1980 	else if (da_new > da_old)
1981 		error = xfs_dec_fdblocks(mp, da_new - da_old, true);
1982 
1983 	xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
1984 done:
1985 	if (whichfork != XFS_COW_FORK)
1986 		bma->logflags |= rval;
1987 	return error;
1988 #undef	LEFT
1989 #undef	RIGHT
1990 #undef	PREV
1991 }
1992 
1993 /*
1994  * Convert an unwritten allocation to a real allocation or vice versa.
1995  */
1996 int					/* error */
xfs_bmap_add_extent_unwritten_real(struct xfs_trans * tp,xfs_inode_t * ip,int whichfork,struct xfs_iext_cursor * icur,struct xfs_btree_cur ** curp,xfs_bmbt_irec_t * new,int * logflagsp)1997 xfs_bmap_add_extent_unwritten_real(
1998 	struct xfs_trans	*tp,
1999 	xfs_inode_t		*ip,	/* incore inode pointer */
2000 	int			whichfork,
2001 	struct xfs_iext_cursor	*icur,
2002 	struct xfs_btree_cur	**curp,	/* if *curp is null, not a btree */
2003 	xfs_bmbt_irec_t		*new,	/* new data to add to file extents */
2004 	int			*logflagsp) /* inode logging flags */
2005 {
2006 	struct xfs_btree_cur	*cur;	/* btree cursor */
2007 	int			error;	/* error return value */
2008 	int			i;	/* temp state */
2009 	struct xfs_ifork	*ifp;	/* inode fork pointer */
2010 	xfs_fileoff_t		new_endoff;	/* end offset of new entry */
2011 	xfs_bmbt_irec_t		r[3];	/* neighbor extent entries */
2012 					/* left is 0, right is 1, prev is 2 */
2013 	int			rval=0;	/* return value (logging flags) */
2014 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
2015 	struct xfs_mount	*mp = ip->i_mount;
2016 	struct xfs_bmbt_irec	old;
2017 
2018 	*logflagsp = 0;
2019 
2020 	cur = *curp;
2021 	ifp = xfs_ifork_ptr(ip, whichfork);
2022 
2023 	ASSERT(!isnullstartblock(new->br_startblock));
2024 
2025 	XFS_STATS_INC(mp, xs_add_exlist);
2026 
2027 #define	LEFT		r[0]
2028 #define	RIGHT		r[1]
2029 #define	PREV		r[2]
2030 
2031 	/*
2032 	 * Set up a bunch of variables to make the tests simpler.
2033 	 */
2034 	error = 0;
2035 	xfs_iext_get_extent(ifp, icur, &PREV);
2036 	ASSERT(new->br_state != PREV.br_state);
2037 	new_endoff = new->br_startoff + new->br_blockcount;
2038 	ASSERT(PREV.br_startoff <= new->br_startoff);
2039 	ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2040 
2041 	/*
2042 	 * Set flags determining what part of the previous oldext allocation
2043 	 * extent is being replaced by a newext allocation.
2044 	 */
2045 	if (PREV.br_startoff == new->br_startoff)
2046 		state |= BMAP_LEFT_FILLING;
2047 	if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2048 		state |= BMAP_RIGHT_FILLING;
2049 
2050 	/*
2051 	 * Check and set flags if this segment has a left neighbor.
2052 	 * Don't set contiguous if the combined extent would be too large.
2053 	 */
2054 	if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) {
2055 		state |= BMAP_LEFT_VALID;
2056 		if (isnullstartblock(LEFT.br_startblock))
2057 			state |= BMAP_LEFT_DELAY;
2058 	}
2059 
2060 	if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2061 	    LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2062 	    LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2063 	    LEFT.br_state == new->br_state &&
2064 	    LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
2065 		state |= BMAP_LEFT_CONTIG;
2066 
2067 	/*
2068 	 * Check and set flags if this segment has a right neighbor.
2069 	 * Don't set contiguous if the combined extent would be too large.
2070 	 * Also check for all-three-contiguous being too large.
2071 	 */
2072 	if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) {
2073 		state |= BMAP_RIGHT_VALID;
2074 		if (isnullstartblock(RIGHT.br_startblock))
2075 			state |= BMAP_RIGHT_DELAY;
2076 	}
2077 
2078 	if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2079 	    new_endoff == RIGHT.br_startoff &&
2080 	    new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2081 	    new->br_state == RIGHT.br_state &&
2082 	    new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2083 	    ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2084 		       BMAP_RIGHT_FILLING)) !=
2085 		      (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2086 		       BMAP_RIGHT_FILLING) ||
2087 	     LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2088 			<= XFS_MAX_BMBT_EXTLEN))
2089 		state |= BMAP_RIGHT_CONTIG;
2090 
2091 	/*
2092 	 * Switch out based on the FILLING and CONTIG state bits.
2093 	 */
2094 	switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2095 			 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2096 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2097 	     BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2098 		/*
2099 		 * Setting all of a previous oldext extent to newext.
2100 		 * The left and right neighbors are both contiguous with new.
2101 		 */
2102 		LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
2103 
2104 		xfs_iext_remove(ip, icur, state);
2105 		xfs_iext_remove(ip, icur, state);
2106 		xfs_iext_prev(ifp, icur);
2107 		xfs_iext_update_extent(ip, state, icur, &LEFT);
2108 		ifp->if_nextents -= 2;
2109 		if (cur == NULL)
2110 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2111 		else {
2112 			rval = XFS_ILOG_CORE;
2113 			error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2114 			if (error)
2115 				goto done;
2116 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2117 				xfs_btree_mark_sick(cur);
2118 				error = -EFSCORRUPTED;
2119 				goto done;
2120 			}
2121 			if ((error = xfs_btree_delete(cur, &i)))
2122 				goto done;
2123 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2124 				xfs_btree_mark_sick(cur);
2125 				error = -EFSCORRUPTED;
2126 				goto done;
2127 			}
2128 			if ((error = xfs_btree_decrement(cur, 0, &i)))
2129 				goto done;
2130 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2131 				xfs_btree_mark_sick(cur);
2132 				error = -EFSCORRUPTED;
2133 				goto done;
2134 			}
2135 			if ((error = xfs_btree_delete(cur, &i)))
2136 				goto done;
2137 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2138 				xfs_btree_mark_sick(cur);
2139 				error = -EFSCORRUPTED;
2140 				goto done;
2141 			}
2142 			if ((error = xfs_btree_decrement(cur, 0, &i)))
2143 				goto done;
2144 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2145 				xfs_btree_mark_sick(cur);
2146 				error = -EFSCORRUPTED;
2147 				goto done;
2148 			}
2149 			error = xfs_bmbt_update(cur, &LEFT);
2150 			if (error)
2151 				goto done;
2152 		}
2153 		break;
2154 
2155 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2156 		/*
2157 		 * Setting all of a previous oldext extent to newext.
2158 		 * The left neighbor is contiguous, the right is not.
2159 		 */
2160 		LEFT.br_blockcount += PREV.br_blockcount;
2161 
2162 		xfs_iext_remove(ip, icur, state);
2163 		xfs_iext_prev(ifp, icur);
2164 		xfs_iext_update_extent(ip, state, icur, &LEFT);
2165 		ifp->if_nextents--;
2166 		if (cur == NULL)
2167 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2168 		else {
2169 			rval = XFS_ILOG_CORE;
2170 			error = xfs_bmbt_lookup_eq(cur, &PREV, &i);
2171 			if (error)
2172 				goto done;
2173 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2174 				xfs_btree_mark_sick(cur);
2175 				error = -EFSCORRUPTED;
2176 				goto done;
2177 			}
2178 			if ((error = xfs_btree_delete(cur, &i)))
2179 				goto done;
2180 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2181 				xfs_btree_mark_sick(cur);
2182 				error = -EFSCORRUPTED;
2183 				goto done;
2184 			}
2185 			if ((error = xfs_btree_decrement(cur, 0, &i)))
2186 				goto done;
2187 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2188 				xfs_btree_mark_sick(cur);
2189 				error = -EFSCORRUPTED;
2190 				goto done;
2191 			}
2192 			error = xfs_bmbt_update(cur, &LEFT);
2193 			if (error)
2194 				goto done;
2195 		}
2196 		break;
2197 
2198 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2199 		/*
2200 		 * Setting all of a previous oldext extent to newext.
2201 		 * The right neighbor is contiguous, the left is not.
2202 		 */
2203 		PREV.br_blockcount += RIGHT.br_blockcount;
2204 		PREV.br_state = new->br_state;
2205 
2206 		xfs_iext_next(ifp, icur);
2207 		xfs_iext_remove(ip, icur, state);
2208 		xfs_iext_prev(ifp, icur);
2209 		xfs_iext_update_extent(ip, state, icur, &PREV);
2210 		ifp->if_nextents--;
2211 
2212 		if (cur == NULL)
2213 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2214 		else {
2215 			rval = XFS_ILOG_CORE;
2216 			error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2217 			if (error)
2218 				goto done;
2219 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2220 				xfs_btree_mark_sick(cur);
2221 				error = -EFSCORRUPTED;
2222 				goto done;
2223 			}
2224 			if ((error = xfs_btree_delete(cur, &i)))
2225 				goto done;
2226 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2227 				xfs_btree_mark_sick(cur);
2228 				error = -EFSCORRUPTED;
2229 				goto done;
2230 			}
2231 			if ((error = xfs_btree_decrement(cur, 0, &i)))
2232 				goto done;
2233 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2234 				xfs_btree_mark_sick(cur);
2235 				error = -EFSCORRUPTED;
2236 				goto done;
2237 			}
2238 			error = xfs_bmbt_update(cur, &PREV);
2239 			if (error)
2240 				goto done;
2241 		}
2242 		break;
2243 
2244 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2245 		/*
2246 		 * Setting all of a previous oldext extent to newext.
2247 		 * Neither the left nor right neighbors are contiguous with
2248 		 * the new one.
2249 		 */
2250 		PREV.br_state = new->br_state;
2251 		xfs_iext_update_extent(ip, state, icur, &PREV);
2252 
2253 		if (cur == NULL)
2254 			rval = XFS_ILOG_DEXT;
2255 		else {
2256 			rval = 0;
2257 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2258 			if (error)
2259 				goto done;
2260 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2261 				xfs_btree_mark_sick(cur);
2262 				error = -EFSCORRUPTED;
2263 				goto done;
2264 			}
2265 			error = xfs_bmbt_update(cur, &PREV);
2266 			if (error)
2267 				goto done;
2268 		}
2269 		break;
2270 
2271 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2272 		/*
2273 		 * Setting the first part of a previous oldext extent to newext.
2274 		 * The left neighbor is contiguous.
2275 		 */
2276 		LEFT.br_blockcount += new->br_blockcount;
2277 
2278 		old = PREV;
2279 		PREV.br_startoff += new->br_blockcount;
2280 		PREV.br_startblock += new->br_blockcount;
2281 		PREV.br_blockcount -= new->br_blockcount;
2282 
2283 		xfs_iext_update_extent(ip, state, icur, &PREV);
2284 		xfs_iext_prev(ifp, icur);
2285 		xfs_iext_update_extent(ip, state, icur, &LEFT);
2286 
2287 		if (cur == NULL)
2288 			rval = XFS_ILOG_DEXT;
2289 		else {
2290 			rval = 0;
2291 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2292 			if (error)
2293 				goto done;
2294 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2295 				xfs_btree_mark_sick(cur);
2296 				error = -EFSCORRUPTED;
2297 				goto done;
2298 			}
2299 			error = xfs_bmbt_update(cur, &PREV);
2300 			if (error)
2301 				goto done;
2302 			error = xfs_btree_decrement(cur, 0, &i);
2303 			if (error)
2304 				goto done;
2305 			error = xfs_bmbt_update(cur, &LEFT);
2306 			if (error)
2307 				goto done;
2308 		}
2309 		break;
2310 
2311 	case BMAP_LEFT_FILLING:
2312 		/*
2313 		 * Setting the first part of a previous oldext extent to newext.
2314 		 * The left neighbor is not contiguous.
2315 		 */
2316 		old = PREV;
2317 		PREV.br_startoff += new->br_blockcount;
2318 		PREV.br_startblock += new->br_blockcount;
2319 		PREV.br_blockcount -= new->br_blockcount;
2320 
2321 		xfs_iext_update_extent(ip, state, icur, &PREV);
2322 		xfs_iext_insert(ip, icur, new, state);
2323 		ifp->if_nextents++;
2324 
2325 		if (cur == NULL)
2326 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2327 		else {
2328 			rval = XFS_ILOG_CORE;
2329 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2330 			if (error)
2331 				goto done;
2332 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2333 				xfs_btree_mark_sick(cur);
2334 				error = -EFSCORRUPTED;
2335 				goto done;
2336 			}
2337 			error = xfs_bmbt_update(cur, &PREV);
2338 			if (error)
2339 				goto done;
2340 			cur->bc_rec.b = *new;
2341 			if ((error = xfs_btree_insert(cur, &i)))
2342 				goto done;
2343 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2344 				xfs_btree_mark_sick(cur);
2345 				error = -EFSCORRUPTED;
2346 				goto done;
2347 			}
2348 		}
2349 		break;
2350 
2351 	case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2352 		/*
2353 		 * Setting the last part of a previous oldext extent to newext.
2354 		 * The right neighbor is contiguous with the new allocation.
2355 		 */
2356 		old = PREV;
2357 		PREV.br_blockcount -= new->br_blockcount;
2358 
2359 		RIGHT.br_startoff = new->br_startoff;
2360 		RIGHT.br_startblock = new->br_startblock;
2361 		RIGHT.br_blockcount += new->br_blockcount;
2362 
2363 		xfs_iext_update_extent(ip, state, icur, &PREV);
2364 		xfs_iext_next(ifp, icur);
2365 		xfs_iext_update_extent(ip, state, icur, &RIGHT);
2366 
2367 		if (cur == NULL)
2368 			rval = XFS_ILOG_DEXT;
2369 		else {
2370 			rval = 0;
2371 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2372 			if (error)
2373 				goto done;
2374 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2375 				xfs_btree_mark_sick(cur);
2376 				error = -EFSCORRUPTED;
2377 				goto done;
2378 			}
2379 			error = xfs_bmbt_update(cur, &PREV);
2380 			if (error)
2381 				goto done;
2382 			error = xfs_btree_increment(cur, 0, &i);
2383 			if (error)
2384 				goto done;
2385 			error = xfs_bmbt_update(cur, &RIGHT);
2386 			if (error)
2387 				goto done;
2388 		}
2389 		break;
2390 
2391 	case BMAP_RIGHT_FILLING:
2392 		/*
2393 		 * Setting the last part of a previous oldext extent to newext.
2394 		 * The right neighbor is not contiguous.
2395 		 */
2396 		old = PREV;
2397 		PREV.br_blockcount -= new->br_blockcount;
2398 
2399 		xfs_iext_update_extent(ip, state, icur, &PREV);
2400 		xfs_iext_next(ifp, icur);
2401 		xfs_iext_insert(ip, icur, new, state);
2402 		ifp->if_nextents++;
2403 
2404 		if (cur == NULL)
2405 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2406 		else {
2407 			rval = XFS_ILOG_CORE;
2408 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2409 			if (error)
2410 				goto done;
2411 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2412 				xfs_btree_mark_sick(cur);
2413 				error = -EFSCORRUPTED;
2414 				goto done;
2415 			}
2416 			error = xfs_bmbt_update(cur, &PREV);
2417 			if (error)
2418 				goto done;
2419 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2420 			if (error)
2421 				goto done;
2422 			if (XFS_IS_CORRUPT(mp, i != 0)) {
2423 				xfs_btree_mark_sick(cur);
2424 				error = -EFSCORRUPTED;
2425 				goto done;
2426 			}
2427 			if ((error = xfs_btree_insert(cur, &i)))
2428 				goto done;
2429 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2430 				xfs_btree_mark_sick(cur);
2431 				error = -EFSCORRUPTED;
2432 				goto done;
2433 			}
2434 		}
2435 		break;
2436 
2437 	case 0:
2438 		/*
2439 		 * Setting the middle part of a previous oldext extent to
2440 		 * newext.  Contiguity is impossible here.
2441 		 * One extent becomes three extents.
2442 		 */
2443 		old = PREV;
2444 		PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
2445 
2446 		r[0] = *new;
2447 		r[1].br_startoff = new_endoff;
2448 		r[1].br_blockcount =
2449 			old.br_startoff + old.br_blockcount - new_endoff;
2450 		r[1].br_startblock = new->br_startblock + new->br_blockcount;
2451 		r[1].br_state = PREV.br_state;
2452 
2453 		xfs_iext_update_extent(ip, state, icur, &PREV);
2454 		xfs_iext_next(ifp, icur);
2455 		xfs_iext_insert(ip, icur, &r[1], state);
2456 		xfs_iext_insert(ip, icur, &r[0], state);
2457 		ifp->if_nextents += 2;
2458 
2459 		if (cur == NULL)
2460 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2461 		else {
2462 			rval = XFS_ILOG_CORE;
2463 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2464 			if (error)
2465 				goto done;
2466 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2467 				xfs_btree_mark_sick(cur);
2468 				error = -EFSCORRUPTED;
2469 				goto done;
2470 			}
2471 			/* new right extent - oldext */
2472 			error = xfs_bmbt_update(cur, &r[1]);
2473 			if (error)
2474 				goto done;
2475 			/* new left extent - oldext */
2476 			cur->bc_rec.b = PREV;
2477 			if ((error = xfs_btree_insert(cur, &i)))
2478 				goto done;
2479 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2480 				xfs_btree_mark_sick(cur);
2481 				error = -EFSCORRUPTED;
2482 				goto done;
2483 			}
2484 			/*
2485 			 * Reset the cursor to the position of the new extent
2486 			 * we are about to insert as we can't trust it after
2487 			 * the previous insert.
2488 			 */
2489 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2490 			if (error)
2491 				goto done;
2492 			if (XFS_IS_CORRUPT(mp, i != 0)) {
2493 				xfs_btree_mark_sick(cur);
2494 				error = -EFSCORRUPTED;
2495 				goto done;
2496 			}
2497 			/* new middle extent - newext */
2498 			if ((error = xfs_btree_insert(cur, &i)))
2499 				goto done;
2500 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2501 				xfs_btree_mark_sick(cur);
2502 				error = -EFSCORRUPTED;
2503 				goto done;
2504 			}
2505 		}
2506 		break;
2507 
2508 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2509 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2510 	case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2511 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2512 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2513 	case BMAP_LEFT_CONTIG:
2514 	case BMAP_RIGHT_CONTIG:
2515 		/*
2516 		 * These cases are all impossible.
2517 		 */
2518 		ASSERT(0);
2519 	}
2520 
2521 	/* update reverse mappings */
2522 	xfs_rmap_convert_extent(mp, tp, ip, whichfork, new);
2523 
2524 	/* convert to a btree if necessary */
2525 	if (xfs_bmap_needs_btree(ip, whichfork)) {
2526 		int	tmp_logflags;	/* partial log flag return val */
2527 
2528 		ASSERT(cur == NULL);
2529 		error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
2530 				&tmp_logflags, whichfork);
2531 		*logflagsp |= tmp_logflags;
2532 		if (error)
2533 			goto done;
2534 	}
2535 
2536 	/* clear out the allocated field, done with it now in any case. */
2537 	if (cur) {
2538 		cur->bc_bmap.allocated = 0;
2539 		*curp = cur;
2540 	}
2541 
2542 	xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
2543 done:
2544 	*logflagsp |= rval;
2545 	return error;
2546 #undef	LEFT
2547 #undef	RIGHT
2548 #undef	PREV
2549 }
2550 
2551 /*
2552  * Convert a hole to a delayed allocation.
2553  */
2554 STATIC void
xfs_bmap_add_extent_hole_delay(xfs_inode_t * ip,int whichfork,struct xfs_iext_cursor * icur,xfs_bmbt_irec_t * new)2555 xfs_bmap_add_extent_hole_delay(
2556 	xfs_inode_t		*ip,	/* incore inode pointer */
2557 	int			whichfork,
2558 	struct xfs_iext_cursor	*icur,
2559 	xfs_bmbt_irec_t		*new)	/* new data to add to file extents */
2560 {
2561 	struct xfs_ifork	*ifp;	/* inode fork pointer */
2562 	xfs_bmbt_irec_t		left;	/* left neighbor extent entry */
2563 	xfs_filblks_t		newlen=0;	/* new indirect size */
2564 	xfs_filblks_t		oldlen=0;	/* old indirect size */
2565 	xfs_bmbt_irec_t		right;	/* right neighbor extent entry */
2566 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
2567 	xfs_filblks_t		temp;	 /* temp for indirect calculations */
2568 
2569 	ifp = xfs_ifork_ptr(ip, whichfork);
2570 	ASSERT(isnullstartblock(new->br_startblock));
2571 
2572 	/*
2573 	 * Check and set flags if this segment has a left neighbor
2574 	 */
2575 	if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2576 		state |= BMAP_LEFT_VALID;
2577 		if (isnullstartblock(left.br_startblock))
2578 			state |= BMAP_LEFT_DELAY;
2579 	}
2580 
2581 	/*
2582 	 * Check and set flags if the current (right) segment exists.
2583 	 * If it doesn't exist, we're converting the hole at end-of-file.
2584 	 */
2585 	if (xfs_iext_get_extent(ifp, icur, &right)) {
2586 		state |= BMAP_RIGHT_VALID;
2587 		if (isnullstartblock(right.br_startblock))
2588 			state |= BMAP_RIGHT_DELAY;
2589 	}
2590 
2591 	/*
2592 	 * Set contiguity flags on the left and right neighbors.
2593 	 * Don't let extents get too large, even if the pieces are contiguous.
2594 	 */
2595 	if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2596 	    left.br_startoff + left.br_blockcount == new->br_startoff &&
2597 	    left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
2598 		state |= BMAP_LEFT_CONTIG;
2599 
2600 	if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2601 	    new->br_startoff + new->br_blockcount == right.br_startoff &&
2602 	    new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2603 	    (!(state & BMAP_LEFT_CONTIG) ||
2604 	     (left.br_blockcount + new->br_blockcount +
2605 	      right.br_blockcount <= XFS_MAX_BMBT_EXTLEN)))
2606 		state |= BMAP_RIGHT_CONTIG;
2607 
2608 	/*
2609 	 * Switch out based on the contiguity flags.
2610 	 */
2611 	switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2612 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2613 		/*
2614 		 * New allocation is contiguous with delayed allocations
2615 		 * on the left and on the right.
2616 		 * Merge all three into a single extent record.
2617 		 */
2618 		temp = left.br_blockcount + new->br_blockcount +
2619 			right.br_blockcount;
2620 
2621 		oldlen = startblockval(left.br_startblock) +
2622 			startblockval(new->br_startblock) +
2623 			startblockval(right.br_startblock);
2624 		newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2625 					 oldlen);
2626 		left.br_startblock = nullstartblock(newlen);
2627 		left.br_blockcount = temp;
2628 
2629 		xfs_iext_remove(ip, icur, state);
2630 		xfs_iext_prev(ifp, icur);
2631 		xfs_iext_update_extent(ip, state, icur, &left);
2632 		break;
2633 
2634 	case BMAP_LEFT_CONTIG:
2635 		/*
2636 		 * New allocation is contiguous with a delayed allocation
2637 		 * on the left.
2638 		 * Merge the new allocation with the left neighbor.
2639 		 */
2640 		temp = left.br_blockcount + new->br_blockcount;
2641 
2642 		oldlen = startblockval(left.br_startblock) +
2643 			startblockval(new->br_startblock);
2644 		newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2645 					 oldlen);
2646 		left.br_blockcount = temp;
2647 		left.br_startblock = nullstartblock(newlen);
2648 
2649 		xfs_iext_prev(ifp, icur);
2650 		xfs_iext_update_extent(ip, state, icur, &left);
2651 		break;
2652 
2653 	case BMAP_RIGHT_CONTIG:
2654 		/*
2655 		 * New allocation is contiguous with a delayed allocation
2656 		 * on the right.
2657 		 * Merge the new allocation with the right neighbor.
2658 		 */
2659 		temp = new->br_blockcount + right.br_blockcount;
2660 		oldlen = startblockval(new->br_startblock) +
2661 			startblockval(right.br_startblock);
2662 		newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2663 					 oldlen);
2664 		right.br_startoff = new->br_startoff;
2665 		right.br_startblock = nullstartblock(newlen);
2666 		right.br_blockcount = temp;
2667 		xfs_iext_update_extent(ip, state, icur, &right);
2668 		break;
2669 
2670 	case 0:
2671 		/*
2672 		 * New allocation is not contiguous with another
2673 		 * delayed allocation.
2674 		 * Insert a new entry.
2675 		 */
2676 		oldlen = newlen = 0;
2677 		xfs_iext_insert(ip, icur, new, state);
2678 		break;
2679 	}
2680 	if (oldlen != newlen) {
2681 		ASSERT(oldlen > newlen);
2682 		xfs_add_fdblocks(ip->i_mount, oldlen - newlen);
2683 
2684 		/*
2685 		 * Nothing to do for disk quota accounting here.
2686 		 */
2687 		xfs_mod_delalloc(ip, 0, (int64_t)newlen - oldlen);
2688 	}
2689 }
2690 
2691 /*
2692  * Convert a hole to a real allocation.
2693  */
2694 STATIC int				/* error */
xfs_bmap_add_extent_hole_real(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_iext_cursor * icur,struct xfs_btree_cur ** curp,struct xfs_bmbt_irec * new,int * logflagsp,uint32_t flags)2695 xfs_bmap_add_extent_hole_real(
2696 	struct xfs_trans	*tp,
2697 	struct xfs_inode	*ip,
2698 	int			whichfork,
2699 	struct xfs_iext_cursor	*icur,
2700 	struct xfs_btree_cur	**curp,
2701 	struct xfs_bmbt_irec	*new,
2702 	int			*logflagsp,
2703 	uint32_t		flags)
2704 {
2705 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
2706 	struct xfs_mount	*mp = ip->i_mount;
2707 	struct xfs_btree_cur	*cur = *curp;
2708 	int			error;	/* error return value */
2709 	int			i;	/* temp state */
2710 	xfs_bmbt_irec_t		left;	/* left neighbor extent entry */
2711 	xfs_bmbt_irec_t		right;	/* right neighbor extent entry */
2712 	int			rval=0;	/* return value (logging flags) */
2713 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
2714 	struct xfs_bmbt_irec	old;
2715 
2716 	ASSERT(!isnullstartblock(new->br_startblock));
2717 	ASSERT(!cur || !(cur->bc_flags & XFS_BTREE_BMBT_WASDEL));
2718 
2719 	XFS_STATS_INC(mp, xs_add_exlist);
2720 
2721 	/*
2722 	 * Check and set flags if this segment has a left neighbor.
2723 	 */
2724 	if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2725 		state |= BMAP_LEFT_VALID;
2726 		if (isnullstartblock(left.br_startblock))
2727 			state |= BMAP_LEFT_DELAY;
2728 	}
2729 
2730 	/*
2731 	 * Check and set flags if this segment has a current value.
2732 	 * Not true if we're inserting into the "hole" at eof.
2733 	 */
2734 	if (xfs_iext_get_extent(ifp, icur, &right)) {
2735 		state |= BMAP_RIGHT_VALID;
2736 		if (isnullstartblock(right.br_startblock))
2737 			state |= BMAP_RIGHT_DELAY;
2738 	}
2739 
2740 	/*
2741 	 * We're inserting a real allocation between "left" and "right".
2742 	 * Set the contiguity flags.  Don't let extents get too large.
2743 	 */
2744 	if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2745 	    left.br_startoff + left.br_blockcount == new->br_startoff &&
2746 	    left.br_startblock + left.br_blockcount == new->br_startblock &&
2747 	    left.br_state == new->br_state &&
2748 	    left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
2749 		state |= BMAP_LEFT_CONTIG;
2750 
2751 	if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2752 	    new->br_startoff + new->br_blockcount == right.br_startoff &&
2753 	    new->br_startblock + new->br_blockcount == right.br_startblock &&
2754 	    new->br_state == right.br_state &&
2755 	    new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2756 	    (!(state & BMAP_LEFT_CONTIG) ||
2757 	     left.br_blockcount + new->br_blockcount +
2758 	     right.br_blockcount <= XFS_MAX_BMBT_EXTLEN))
2759 		state |= BMAP_RIGHT_CONTIG;
2760 
2761 	error = 0;
2762 	/*
2763 	 * Select which case we're in here, and implement it.
2764 	 */
2765 	switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2766 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2767 		/*
2768 		 * New allocation is contiguous with real allocations on the
2769 		 * left and on the right.
2770 		 * Merge all three into a single extent record.
2771 		 */
2772 		left.br_blockcount += new->br_blockcount + right.br_blockcount;
2773 
2774 		xfs_iext_remove(ip, icur, state);
2775 		xfs_iext_prev(ifp, icur);
2776 		xfs_iext_update_extent(ip, state, icur, &left);
2777 		ifp->if_nextents--;
2778 
2779 		if (cur == NULL) {
2780 			rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2781 		} else {
2782 			rval = XFS_ILOG_CORE;
2783 			error = xfs_bmbt_lookup_eq(cur, &right, &i);
2784 			if (error)
2785 				goto done;
2786 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2787 				xfs_btree_mark_sick(cur);
2788 				error = -EFSCORRUPTED;
2789 				goto done;
2790 			}
2791 			error = xfs_btree_delete(cur, &i);
2792 			if (error)
2793 				goto done;
2794 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2795 				xfs_btree_mark_sick(cur);
2796 				error = -EFSCORRUPTED;
2797 				goto done;
2798 			}
2799 			error = xfs_btree_decrement(cur, 0, &i);
2800 			if (error)
2801 				goto done;
2802 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2803 				xfs_btree_mark_sick(cur);
2804 				error = -EFSCORRUPTED;
2805 				goto done;
2806 			}
2807 			error = xfs_bmbt_update(cur, &left);
2808 			if (error)
2809 				goto done;
2810 		}
2811 		break;
2812 
2813 	case BMAP_LEFT_CONTIG:
2814 		/*
2815 		 * New allocation is contiguous with a real allocation
2816 		 * on the left.
2817 		 * Merge the new allocation with the left neighbor.
2818 		 */
2819 		old = left;
2820 		left.br_blockcount += new->br_blockcount;
2821 
2822 		xfs_iext_prev(ifp, icur);
2823 		xfs_iext_update_extent(ip, state, icur, &left);
2824 
2825 		if (cur == NULL) {
2826 			rval = xfs_ilog_fext(whichfork);
2827 		} else {
2828 			rval = 0;
2829 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2830 			if (error)
2831 				goto done;
2832 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2833 				xfs_btree_mark_sick(cur);
2834 				error = -EFSCORRUPTED;
2835 				goto done;
2836 			}
2837 			error = xfs_bmbt_update(cur, &left);
2838 			if (error)
2839 				goto done;
2840 		}
2841 		break;
2842 
2843 	case BMAP_RIGHT_CONTIG:
2844 		/*
2845 		 * New allocation is contiguous with a real allocation
2846 		 * on the right.
2847 		 * Merge the new allocation with the right neighbor.
2848 		 */
2849 		old = right;
2850 
2851 		right.br_startoff = new->br_startoff;
2852 		right.br_startblock = new->br_startblock;
2853 		right.br_blockcount += new->br_blockcount;
2854 		xfs_iext_update_extent(ip, state, icur, &right);
2855 
2856 		if (cur == NULL) {
2857 			rval = xfs_ilog_fext(whichfork);
2858 		} else {
2859 			rval = 0;
2860 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2861 			if (error)
2862 				goto done;
2863 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2864 				xfs_btree_mark_sick(cur);
2865 				error = -EFSCORRUPTED;
2866 				goto done;
2867 			}
2868 			error = xfs_bmbt_update(cur, &right);
2869 			if (error)
2870 				goto done;
2871 		}
2872 		break;
2873 
2874 	case 0:
2875 		/*
2876 		 * New allocation is not contiguous with another
2877 		 * real allocation.
2878 		 * Insert a new entry.
2879 		 */
2880 		xfs_iext_insert(ip, icur, new, state);
2881 		ifp->if_nextents++;
2882 
2883 		if (cur == NULL) {
2884 			rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2885 		} else {
2886 			rval = XFS_ILOG_CORE;
2887 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2888 			if (error)
2889 				goto done;
2890 			if (XFS_IS_CORRUPT(mp, i != 0)) {
2891 				xfs_btree_mark_sick(cur);
2892 				error = -EFSCORRUPTED;
2893 				goto done;
2894 			}
2895 			error = xfs_btree_insert(cur, &i);
2896 			if (error)
2897 				goto done;
2898 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2899 				xfs_btree_mark_sick(cur);
2900 				error = -EFSCORRUPTED;
2901 				goto done;
2902 			}
2903 		}
2904 		break;
2905 	}
2906 
2907 	/* add reverse mapping unless caller opted out */
2908 	if (!(flags & XFS_BMAPI_NORMAP))
2909 		xfs_rmap_map_extent(tp, ip, whichfork, new);
2910 
2911 	/* convert to a btree if necessary */
2912 	if (xfs_bmap_needs_btree(ip, whichfork)) {
2913 		int	tmp_logflags;	/* partial log flag return val */
2914 
2915 		ASSERT(cur == NULL);
2916 		error = xfs_bmap_extents_to_btree(tp, ip, curp, 0,
2917 				&tmp_logflags, whichfork);
2918 		*logflagsp |= tmp_logflags;
2919 		cur = *curp;
2920 		if (error)
2921 			goto done;
2922 	}
2923 
2924 	/* clear out the allocated field, done with it now in any case. */
2925 	if (cur)
2926 		cur->bc_bmap.allocated = 0;
2927 
2928 	xfs_bmap_check_leaf_extents(cur, ip, whichfork);
2929 done:
2930 	*logflagsp |= rval;
2931 	return error;
2932 }
2933 
2934 /*
2935  * Functions used in the extent read, allocate and remove paths
2936  */
2937 
2938 /*
2939  * Adjust the size of the new extent based on i_extsize and rt extsize.
2940  */
2941 int
xfs_bmap_extsize_align(xfs_mount_t * mp,xfs_bmbt_irec_t * gotp,xfs_bmbt_irec_t * prevp,xfs_extlen_t extsz,int rt,int eof,int delay,int convert,xfs_fileoff_t * offp,xfs_extlen_t * lenp)2942 xfs_bmap_extsize_align(
2943 	xfs_mount_t	*mp,
2944 	xfs_bmbt_irec_t	*gotp,		/* next extent pointer */
2945 	xfs_bmbt_irec_t	*prevp,		/* previous extent pointer */
2946 	xfs_extlen_t	extsz,		/* align to this extent size */
2947 	int		rt,		/* is this a realtime inode? */
2948 	int		eof,		/* is extent at end-of-file? */
2949 	int		delay,		/* creating delalloc extent? */
2950 	int		convert,	/* overwriting unwritten extent? */
2951 	xfs_fileoff_t	*offp,		/* in/out: aligned offset */
2952 	xfs_extlen_t	*lenp)		/* in/out: aligned length */
2953 {
2954 	xfs_fileoff_t	orig_off;	/* original offset */
2955 	xfs_extlen_t	orig_alen;	/* original length */
2956 	xfs_fileoff_t	orig_end;	/* original off+len */
2957 	xfs_fileoff_t	nexto;		/* next file offset */
2958 	xfs_fileoff_t	prevo;		/* previous file offset */
2959 	xfs_fileoff_t	align_off;	/* temp for offset */
2960 	xfs_extlen_t	align_alen;	/* temp for length */
2961 	xfs_extlen_t	temp;		/* temp for calculations */
2962 
2963 	if (convert)
2964 		return 0;
2965 
2966 	orig_off = align_off = *offp;
2967 	orig_alen = align_alen = *lenp;
2968 	orig_end = orig_off + orig_alen;
2969 
2970 	/*
2971 	 * If this request overlaps an existing extent, then don't
2972 	 * attempt to perform any additional alignment.
2973 	 */
2974 	if (!delay && !eof &&
2975 	    (orig_off >= gotp->br_startoff) &&
2976 	    (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
2977 		return 0;
2978 	}
2979 
2980 	/*
2981 	 * If the file offset is unaligned vs. the extent size
2982 	 * we need to align it.  This will be possible unless
2983 	 * the file was previously written with a kernel that didn't
2984 	 * perform this alignment, or if a truncate shot us in the
2985 	 * foot.
2986 	 */
2987 	div_u64_rem(orig_off, extsz, &temp);
2988 	if (temp) {
2989 		align_alen += temp;
2990 		align_off -= temp;
2991 	}
2992 
2993 	/* Same adjustment for the end of the requested area. */
2994 	temp = (align_alen % extsz);
2995 	if (temp)
2996 		align_alen += extsz - temp;
2997 
2998 	/*
2999 	 * For large extent hint sizes, the aligned extent might be larger than
3000 	 * XFS_BMBT_MAX_EXTLEN. In that case, reduce the size by an extsz so
3001 	 * that it pulls the length back under XFS_BMBT_MAX_EXTLEN. The outer
3002 	 * allocation loops handle short allocation just fine, so it is safe to
3003 	 * do this. We only want to do it when we are forced to, though, because
3004 	 * it means more allocation operations are required.
3005 	 */
3006 	while (align_alen > XFS_MAX_BMBT_EXTLEN)
3007 		align_alen -= extsz;
3008 	ASSERT(align_alen <= XFS_MAX_BMBT_EXTLEN);
3009 
3010 	/*
3011 	 * If the previous block overlaps with this proposed allocation
3012 	 * then move the start forward without adjusting the length.
3013 	 */
3014 	if (prevp->br_startoff != NULLFILEOFF) {
3015 		if (prevp->br_startblock == HOLESTARTBLOCK)
3016 			prevo = prevp->br_startoff;
3017 		else
3018 			prevo = prevp->br_startoff + prevp->br_blockcount;
3019 	} else
3020 		prevo = 0;
3021 	if (align_off != orig_off && align_off < prevo)
3022 		align_off = prevo;
3023 	/*
3024 	 * If the next block overlaps with this proposed allocation
3025 	 * then move the start back without adjusting the length,
3026 	 * but not before offset 0.
3027 	 * This may of course make the start overlap previous block,
3028 	 * and if we hit the offset 0 limit then the next block
3029 	 * can still overlap too.
3030 	 */
3031 	if (!eof && gotp->br_startoff != NULLFILEOFF) {
3032 		if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
3033 		    (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
3034 			nexto = gotp->br_startoff + gotp->br_blockcount;
3035 		else
3036 			nexto = gotp->br_startoff;
3037 	} else
3038 		nexto = NULLFILEOFF;
3039 	if (!eof &&
3040 	    align_off + align_alen != orig_end &&
3041 	    align_off + align_alen > nexto)
3042 		align_off = nexto > align_alen ? nexto - align_alen : 0;
3043 	/*
3044 	 * If we're now overlapping the next or previous extent that
3045 	 * means we can't fit an extsz piece in this hole.  Just move
3046 	 * the start forward to the first valid spot and set
3047 	 * the length so we hit the end.
3048 	 */
3049 	if (align_off != orig_off && align_off < prevo)
3050 		align_off = prevo;
3051 	if (align_off + align_alen != orig_end &&
3052 	    align_off + align_alen > nexto &&
3053 	    nexto != NULLFILEOFF) {
3054 		ASSERT(nexto > prevo);
3055 		align_alen = nexto - align_off;
3056 	}
3057 
3058 	/*
3059 	 * If realtime, and the result isn't a multiple of the realtime
3060 	 * extent size we need to remove blocks until it is.
3061 	 */
3062 	if (rt && (temp = xfs_extlen_to_rtxmod(mp, align_alen))) {
3063 		/*
3064 		 * We're not covering the original request, or
3065 		 * we won't be able to once we fix the length.
3066 		 */
3067 		if (orig_off < align_off ||
3068 		    orig_end > align_off + align_alen ||
3069 		    align_alen - temp < orig_alen)
3070 			return -EINVAL;
3071 		/*
3072 		 * Try to fix it by moving the start up.
3073 		 */
3074 		if (align_off + temp <= orig_off) {
3075 			align_alen -= temp;
3076 			align_off += temp;
3077 		}
3078 		/*
3079 		 * Try to fix it by moving the end in.
3080 		 */
3081 		else if (align_off + align_alen - temp >= orig_end)
3082 			align_alen -= temp;
3083 		/*
3084 		 * Set the start to the minimum then trim the length.
3085 		 */
3086 		else {
3087 			align_alen -= orig_off - align_off;
3088 			align_off = orig_off;
3089 			align_alen -= xfs_extlen_to_rtxmod(mp, align_alen);
3090 		}
3091 		/*
3092 		 * Result doesn't cover the request, fail it.
3093 		 */
3094 		if (orig_off < align_off || orig_end > align_off + align_alen)
3095 			return -EINVAL;
3096 	} else {
3097 		ASSERT(orig_off >= align_off);
3098 		/* see XFS_BMBT_MAX_EXTLEN handling above */
3099 		ASSERT(orig_end <= align_off + align_alen ||
3100 		       align_alen + extsz > XFS_MAX_BMBT_EXTLEN);
3101 	}
3102 
3103 #ifdef DEBUG
3104 	if (!eof && gotp->br_startoff != NULLFILEOFF)
3105 		ASSERT(align_off + align_alen <= gotp->br_startoff);
3106 	if (prevp->br_startoff != NULLFILEOFF)
3107 		ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3108 #endif
3109 
3110 	*lenp = align_alen;
3111 	*offp = align_off;
3112 	return 0;
3113 }
3114 
3115 static inline bool
xfs_bmap_adjacent_valid(struct xfs_bmalloca * ap,xfs_fsblock_t x,xfs_fsblock_t y)3116 xfs_bmap_adjacent_valid(
3117 	struct xfs_bmalloca	*ap,
3118 	xfs_fsblock_t		x,
3119 	xfs_fsblock_t		y)
3120 {
3121 	struct xfs_mount	*mp = ap->ip->i_mount;
3122 
3123 	if (XFS_IS_REALTIME_INODE(ap->ip) &&
3124 	    (ap->datatype & XFS_ALLOC_USERDATA))
3125 		return x < mp->m_sb.sb_rblocks;
3126 
3127 	return XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) &&
3128 		XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount &&
3129 		XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks;
3130 }
3131 
3132 #define XFS_ALLOC_GAP_UNITS	4
3133 
3134 /* returns true if ap->blkno was modified */
3135 bool
xfs_bmap_adjacent(struct xfs_bmalloca * ap)3136 xfs_bmap_adjacent(
3137 	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
3138 {
3139 	xfs_fsblock_t		adjust;		/* adjustment to block numbers */
3140 
3141 	/*
3142 	 * If allocating at eof, and there's a previous real block,
3143 	 * try to use its last block as our starting point.
3144 	 */
3145 	if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3146 	    !isnullstartblock(ap->prev.br_startblock) &&
3147 	    xfs_bmap_adjacent_valid(ap,
3148 			ap->prev.br_startblock + ap->prev.br_blockcount,
3149 			ap->prev.br_startblock)) {
3150 		ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3151 		/*
3152 		 * Adjust for the gap between prevp and us.
3153 		 */
3154 		adjust = ap->offset -
3155 			(ap->prev.br_startoff + ap->prev.br_blockcount);
3156 		if (adjust && xfs_bmap_adjacent_valid(ap, ap->blkno + adjust,
3157 				ap->prev.br_startblock))
3158 			ap->blkno += adjust;
3159 		return true;
3160 	}
3161 	/*
3162 	 * If not at eof, then compare the two neighbor blocks.
3163 	 * Figure out whether either one gives us a good starting point,
3164 	 * and pick the better one.
3165 	 */
3166 	if (!ap->eof) {
3167 		xfs_fsblock_t	gotbno;		/* right side block number */
3168 		xfs_fsblock_t	gotdiff=0;	/* right side difference */
3169 		xfs_fsblock_t	prevbno;	/* left side block number */
3170 		xfs_fsblock_t	prevdiff=0;	/* left side difference */
3171 
3172 		/*
3173 		 * If there's a previous (left) block, select a requested
3174 		 * start block based on it.
3175 		 */
3176 		if (ap->prev.br_startoff != NULLFILEOFF &&
3177 		    !isnullstartblock(ap->prev.br_startblock) &&
3178 		    (prevbno = ap->prev.br_startblock +
3179 			       ap->prev.br_blockcount) &&
3180 		    xfs_bmap_adjacent_valid(ap, prevbno,
3181 				ap->prev.br_startblock)) {
3182 			/*
3183 			 * Calculate gap to end of previous block.
3184 			 */
3185 			adjust = prevdiff = ap->offset -
3186 				(ap->prev.br_startoff +
3187 				 ap->prev.br_blockcount);
3188 			/*
3189 			 * Figure the startblock based on the previous block's
3190 			 * end and the gap size.
3191 			 * Heuristic!
3192 			 * If the gap is large relative to the piece we're
3193 			 * allocating, or using it gives us an invalid block
3194 			 * number, then just use the end of the previous block.
3195 			 */
3196 			if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3197 			    xfs_bmap_adjacent_valid(ap, prevbno + prevdiff,
3198 					ap->prev.br_startblock))
3199 				prevbno += adjust;
3200 			else
3201 				prevdiff += adjust;
3202 		}
3203 		/*
3204 		 * No previous block or can't follow it, just default.
3205 		 */
3206 		else
3207 			prevbno = NULLFSBLOCK;
3208 		/*
3209 		 * If there's a following (right) block, select a requested
3210 		 * start block based on it.
3211 		 */
3212 		if (!isnullstartblock(ap->got.br_startblock)) {
3213 			/*
3214 			 * Calculate gap to start of next block.
3215 			 */
3216 			adjust = gotdiff = ap->got.br_startoff - ap->offset;
3217 			/*
3218 			 * Figure the startblock based on the next block's
3219 			 * start and the gap size.
3220 			 */
3221 			gotbno = ap->got.br_startblock;
3222 			/*
3223 			 * Heuristic!
3224 			 * If the gap is large relative to the piece we're
3225 			 * allocating, or using it gives us an invalid block
3226 			 * number, then just use the start of the next block
3227 			 * offset by our length.
3228 			 */
3229 			if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3230 			    xfs_bmap_adjacent_valid(ap, gotbno - gotdiff,
3231 					gotbno))
3232 				gotbno -= adjust;
3233 			else if (xfs_bmap_adjacent_valid(ap, gotbno - ap->length,
3234 					gotbno)) {
3235 				gotbno -= ap->length;
3236 				gotdiff += adjust - ap->length;
3237 			} else
3238 				gotdiff += adjust;
3239 		}
3240 		/*
3241 		 * No next block, just default.
3242 		 */
3243 		else
3244 			gotbno = NULLFSBLOCK;
3245 		/*
3246 		 * If both valid, pick the better one, else the only good
3247 		 * one, else ap->blkno is already set (to 0 or the inode block).
3248 		 */
3249 		if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) {
3250 			ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3251 			return true;
3252 		}
3253 		if (prevbno != NULLFSBLOCK) {
3254 			ap->blkno = prevbno;
3255 			return true;
3256 		}
3257 		if (gotbno != NULLFSBLOCK) {
3258 			ap->blkno = gotbno;
3259 			return true;
3260 		}
3261 	}
3262 
3263 	return false;
3264 }
3265 
3266 int
xfs_bmap_longest_free_extent(struct xfs_perag * pag,struct xfs_trans * tp,xfs_extlen_t * blen)3267 xfs_bmap_longest_free_extent(
3268 	struct xfs_perag	*pag,
3269 	struct xfs_trans	*tp,
3270 	xfs_extlen_t		*blen)
3271 {
3272 	xfs_extlen_t		longest;
3273 	int			error = 0;
3274 
3275 	if (!xfs_perag_initialised_agf(pag)) {
3276 		error = xfs_alloc_read_agf(pag, tp, XFS_ALLOC_FLAG_TRYLOCK,
3277 				NULL);
3278 		if (error)
3279 			return error;
3280 	}
3281 
3282 	longest = xfs_alloc_longest_free_extent(pag,
3283 				xfs_alloc_min_freelist(pag->pag_mount, pag),
3284 				xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
3285 	if (*blen < longest)
3286 		*blen = longest;
3287 
3288 	return 0;
3289 }
3290 
3291 static xfs_extlen_t
xfs_bmap_select_minlen(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,xfs_extlen_t blen)3292 xfs_bmap_select_minlen(
3293 	struct xfs_bmalloca	*ap,
3294 	struct xfs_alloc_arg	*args,
3295 	xfs_extlen_t		blen)
3296 {
3297 
3298 	/*
3299 	 * Since we used XFS_ALLOC_FLAG_TRYLOCK in _longest_free_extent(), it is
3300 	 * possible that there is enough contiguous free space for this request.
3301 	 */
3302 	if (blen < ap->minlen)
3303 		return ap->minlen;
3304 
3305 	/*
3306 	 * If the best seen length is less than the request length,
3307 	 * use the best as the minimum, otherwise we've got the maxlen we
3308 	 * were asked for.
3309 	 */
3310 	if (blen < args->maxlen)
3311 		return blen;
3312 	return args->maxlen;
3313 }
3314 
3315 static int
xfs_bmap_btalloc_select_lengths(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,xfs_extlen_t * blen)3316 xfs_bmap_btalloc_select_lengths(
3317 	struct xfs_bmalloca	*ap,
3318 	struct xfs_alloc_arg	*args,
3319 	xfs_extlen_t		*blen)
3320 {
3321 	struct xfs_mount	*mp = args->mp;
3322 	struct xfs_perag	*pag;
3323 	xfs_agnumber_t		agno, startag;
3324 	int			error = 0;
3325 
3326 	if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
3327 		args->total = ap->minlen;
3328 		args->minlen = ap->minlen;
3329 		return 0;
3330 	}
3331 
3332 	args->total = ap->total;
3333 	startag = XFS_FSB_TO_AGNO(mp, ap->blkno);
3334 	if (startag == NULLAGNUMBER)
3335 		startag = 0;
3336 
3337 	*blen = 0;
3338 	for_each_perag_wrap(mp, startag, agno, pag) {
3339 		error = xfs_bmap_longest_free_extent(pag, args->tp, blen);
3340 		if (error && error != -EAGAIN)
3341 			break;
3342 		error = 0;
3343 		if (*blen >= args->maxlen)
3344 			break;
3345 	}
3346 	if (pag)
3347 		xfs_perag_rele(pag);
3348 
3349 	args->minlen = xfs_bmap_select_minlen(ap, args, *blen);
3350 	return error;
3351 }
3352 
3353 /* Update all inode and quota accounting for the allocation we just did. */
3354 void
xfs_bmap_alloc_account(struct xfs_bmalloca * ap)3355 xfs_bmap_alloc_account(
3356 	struct xfs_bmalloca	*ap)
3357 {
3358 	bool			isrt = XFS_IS_REALTIME_INODE(ap->ip) &&
3359 					!(ap->flags & XFS_BMAPI_ATTRFORK);
3360 	uint			fld;
3361 
3362 	if (ap->flags & XFS_BMAPI_COWFORK) {
3363 		/*
3364 		 * COW fork blocks are in-core only and thus are treated as
3365 		 * in-core quota reservation (like delalloc blocks) even when
3366 		 * converted to real blocks. The quota reservation is not
3367 		 * accounted to disk until blocks are remapped to the data
3368 		 * fork. So if these blocks were previously delalloc, we
3369 		 * already have quota reservation and there's nothing to do
3370 		 * yet.
3371 		 */
3372 		if (ap->wasdel) {
3373 			xfs_mod_delalloc(ap->ip, -(int64_t)ap->length, 0);
3374 			return;
3375 		}
3376 
3377 		/*
3378 		 * Otherwise, we've allocated blocks in a hole. The transaction
3379 		 * has acquired in-core quota reservation for this extent.
3380 		 * Rather than account these as real blocks, however, we reduce
3381 		 * the transaction quota reservation based on the allocation.
3382 		 * This essentially transfers the transaction quota reservation
3383 		 * to that of a delalloc extent.
3384 		 */
3385 		ap->ip->i_delayed_blks += ap->length;
3386 		xfs_trans_mod_dquot_byino(ap->tp, ap->ip, isrt ?
3387 				XFS_TRANS_DQ_RES_RTBLKS : XFS_TRANS_DQ_RES_BLKS,
3388 				-(long)ap->length);
3389 		return;
3390 	}
3391 
3392 	/* data/attr fork only */
3393 	ap->ip->i_nblocks += ap->length;
3394 	xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3395 	if (ap->wasdel) {
3396 		ap->ip->i_delayed_blks -= ap->length;
3397 		xfs_mod_delalloc(ap->ip, -(int64_t)ap->length, 0);
3398 		fld = isrt ? XFS_TRANS_DQ_DELRTBCOUNT : XFS_TRANS_DQ_DELBCOUNT;
3399 	} else {
3400 		fld = isrt ? XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
3401 	}
3402 
3403 	xfs_trans_mod_dquot_byino(ap->tp, ap->ip, fld, ap->length);
3404 }
3405 
3406 static int
xfs_bmap_compute_alignments(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args)3407 xfs_bmap_compute_alignments(
3408 	struct xfs_bmalloca	*ap,
3409 	struct xfs_alloc_arg	*args)
3410 {
3411 	struct xfs_mount	*mp = args->mp;
3412 	xfs_extlen_t		align = 0; /* minimum allocation alignment */
3413 	int			stripe_align = 0;
3414 
3415 	/* stripe alignment for allocation is determined by mount parameters */
3416 	if (mp->m_swidth && xfs_has_swalloc(mp))
3417 		stripe_align = mp->m_swidth;
3418 	else if (mp->m_dalign)
3419 		stripe_align = mp->m_dalign;
3420 
3421 	if (ap->flags & XFS_BMAPI_COWFORK)
3422 		align = xfs_get_cowextsz_hint(ap->ip);
3423 	else if (ap->datatype & XFS_ALLOC_USERDATA)
3424 		align = xfs_get_extsz_hint(ap->ip);
3425 	if (align) {
3426 		if (xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 0,
3427 					ap->eof, 0, ap->conv, &ap->offset,
3428 					&ap->length))
3429 			ASSERT(0);
3430 		ASSERT(ap->length);
3431 	}
3432 
3433 	/* apply extent size hints if obtained earlier */
3434 	if (align) {
3435 		args->prod = align;
3436 		div_u64_rem(ap->offset, args->prod, &args->mod);
3437 		if (args->mod)
3438 			args->mod = args->prod - args->mod;
3439 	} else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3440 		args->prod = 1;
3441 		args->mod = 0;
3442 	} else {
3443 		args->prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3444 		div_u64_rem(ap->offset, args->prod, &args->mod);
3445 		if (args->mod)
3446 			args->mod = args->prod - args->mod;
3447 	}
3448 
3449 	return stripe_align;
3450 }
3451 
3452 static void
xfs_bmap_process_allocated_extent(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,xfs_fileoff_t orig_offset,xfs_extlen_t orig_length)3453 xfs_bmap_process_allocated_extent(
3454 	struct xfs_bmalloca	*ap,
3455 	struct xfs_alloc_arg	*args,
3456 	xfs_fileoff_t		orig_offset,
3457 	xfs_extlen_t		orig_length)
3458 {
3459 	ap->blkno = args->fsbno;
3460 	ap->length = args->len;
3461 	/*
3462 	 * If the extent size hint is active, we tried to round the
3463 	 * caller's allocation request offset down to extsz and the
3464 	 * length up to another extsz boundary.  If we found a free
3465 	 * extent we mapped it in starting at this new offset.  If the
3466 	 * newly mapped space isn't long enough to cover any of the
3467 	 * range of offsets that was originally requested, move the
3468 	 * mapping up so that we can fill as much of the caller's
3469 	 * original request as possible.  Free space is apparently
3470 	 * very fragmented so we're unlikely to be able to satisfy the
3471 	 * hints anyway.
3472 	 */
3473 	if (ap->length <= orig_length)
3474 		ap->offset = orig_offset;
3475 	else if (ap->offset + ap->length < orig_offset + orig_length)
3476 		ap->offset = orig_offset + orig_length - ap->length;
3477 	xfs_bmap_alloc_account(ap);
3478 }
3479 
3480 static int
xfs_bmap_exact_minlen_extent_alloc(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args)3481 xfs_bmap_exact_minlen_extent_alloc(
3482 	struct xfs_bmalloca	*ap,
3483 	struct xfs_alloc_arg	*args)
3484 {
3485 	if (ap->minlen != 1) {
3486 		args->fsbno = NULLFSBLOCK;
3487 		return 0;
3488 	}
3489 
3490 	args->alloc_minlen_only = 1;
3491 	args->minlen = args->maxlen = ap->minlen;
3492 	args->total = ap->total;
3493 
3494 	/*
3495 	 * Unlike the longest extent available in an AG, we don't track
3496 	 * the length of an AG's shortest extent.
3497 	 * XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT is a debug only knob and
3498 	 * hence we can afford to start traversing from the 0th AG since
3499 	 * we need not be concerned about a drop in performance in
3500 	 * "debug only" code paths.
3501 	 */
3502 	ap->blkno = XFS_AGB_TO_FSB(ap->ip->i_mount, 0, 0);
3503 
3504 	/*
3505 	 * Call xfs_bmap_btalloc_low_space here as it first does a "normal" AG
3506 	 * iteration and then drops args->total to args->minlen, which might be
3507 	 * required to find an allocation for the transaction reservation when
3508 	 * the file system is very full.
3509 	 */
3510 	return xfs_bmap_btalloc_low_space(ap, args);
3511 }
3512 
3513 /*
3514  * If we are not low on available data blocks and we are allocating at
3515  * EOF, optimise allocation for contiguous file extension and/or stripe
3516  * alignment of the new extent.
3517  *
3518  * NOTE: ap->aeof is only set if the allocation length is >= the
3519  * stripe unit and the allocation offset is at the end of file.
3520  */
3521 static int
xfs_bmap_btalloc_at_eof(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,xfs_extlen_t blen,int stripe_align,bool ag_only)3522 xfs_bmap_btalloc_at_eof(
3523 	struct xfs_bmalloca	*ap,
3524 	struct xfs_alloc_arg	*args,
3525 	xfs_extlen_t		blen,
3526 	int			stripe_align,
3527 	bool			ag_only)
3528 {
3529 	struct xfs_mount	*mp = args->mp;
3530 	struct xfs_perag	*caller_pag = args->pag;
3531 	int			error;
3532 
3533 	/*
3534 	 * If there are already extents in the file, try an exact EOF block
3535 	 * allocation to extend the file as a contiguous extent. If that fails,
3536 	 * or it's the first allocation in a file, just try for a stripe aligned
3537 	 * allocation.
3538 	 */
3539 	if (ap->offset) {
3540 		xfs_extlen_t	nextminlen = 0;
3541 
3542 		/*
3543 		 * Compute the minlen+alignment for the next case.  Set slop so
3544 		 * that the value of minlen+alignment+slop doesn't go up between
3545 		 * the calls.
3546 		 */
3547 		args->alignment = 1;
3548 		if (blen > stripe_align && blen <= args->maxlen)
3549 			nextminlen = blen - stripe_align;
3550 		else
3551 			nextminlen = args->minlen;
3552 		if (nextminlen + stripe_align > args->minlen + 1)
3553 			args->minalignslop = nextminlen + stripe_align -
3554 					args->minlen - 1;
3555 		else
3556 			args->minalignslop = 0;
3557 
3558 		if (!caller_pag)
3559 			args->pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, ap->blkno));
3560 		error = xfs_alloc_vextent_exact_bno(args, ap->blkno);
3561 		if (!caller_pag) {
3562 			xfs_perag_put(args->pag);
3563 			args->pag = NULL;
3564 		}
3565 		if (error)
3566 			return error;
3567 
3568 		if (args->fsbno != NULLFSBLOCK)
3569 			return 0;
3570 		/*
3571 		 * Exact allocation failed. Reset to try an aligned allocation
3572 		 * according to the original allocation specification.
3573 		 */
3574 		args->alignment = stripe_align;
3575 		args->minlen = nextminlen;
3576 		args->minalignslop = 0;
3577 	} else {
3578 		/*
3579 		 * Adjust minlen to try and preserve alignment if we
3580 		 * can't guarantee an aligned maxlen extent.
3581 		 */
3582 		args->alignment = stripe_align;
3583 		if (blen > args->alignment &&
3584 		    blen <= args->maxlen + args->alignment)
3585 			args->minlen = blen - args->alignment;
3586 		args->minalignslop = 0;
3587 	}
3588 
3589 	if (ag_only) {
3590 		error = xfs_alloc_vextent_near_bno(args, ap->blkno);
3591 	} else {
3592 		args->pag = NULL;
3593 		error = xfs_alloc_vextent_start_ag(args, ap->blkno);
3594 		ASSERT(args->pag == NULL);
3595 		args->pag = caller_pag;
3596 	}
3597 	if (error)
3598 		return error;
3599 
3600 	if (args->fsbno != NULLFSBLOCK)
3601 		return 0;
3602 
3603 	/*
3604 	 * Allocation failed, so turn return the allocation args to their
3605 	 * original non-aligned state so the caller can proceed on allocation
3606 	 * failure as if this function was never called.
3607 	 */
3608 	args->alignment = 1;
3609 	return 0;
3610 }
3611 
3612 /*
3613  * We have failed multiple allocation attempts so now are in a low space
3614  * allocation situation. Try a locality first full filesystem minimum length
3615  * allocation whilst still maintaining necessary total block reservation
3616  * requirements.
3617  *
3618  * If that fails, we are now critically low on space, so perform a last resort
3619  * allocation attempt: no reserve, no locality, blocking, minimum length, full
3620  * filesystem free space scan. We also indicate to future allocations in this
3621  * transaction that we are critically low on space so they don't waste time on
3622  * allocation modes that are unlikely to succeed.
3623  */
3624 int
xfs_bmap_btalloc_low_space(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args)3625 xfs_bmap_btalloc_low_space(
3626 	struct xfs_bmalloca	*ap,
3627 	struct xfs_alloc_arg	*args)
3628 {
3629 	int			error;
3630 
3631 	if (args->minlen > ap->minlen) {
3632 		args->minlen = ap->minlen;
3633 		error = xfs_alloc_vextent_start_ag(args, ap->blkno);
3634 		if (error || args->fsbno != NULLFSBLOCK)
3635 			return error;
3636 	}
3637 
3638 	/* Last ditch attempt before failure is declared. */
3639 	args->total = ap->minlen;
3640 	error = xfs_alloc_vextent_first_ag(args, 0);
3641 	if (error)
3642 		return error;
3643 	ap->tp->t_flags |= XFS_TRANS_LOWMODE;
3644 	return 0;
3645 }
3646 
3647 static int
xfs_bmap_btalloc_filestreams(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,int stripe_align)3648 xfs_bmap_btalloc_filestreams(
3649 	struct xfs_bmalloca	*ap,
3650 	struct xfs_alloc_arg	*args,
3651 	int			stripe_align)
3652 {
3653 	xfs_extlen_t		blen = 0;
3654 	int			error = 0;
3655 
3656 
3657 	error = xfs_filestream_select_ag(ap, args, &blen);
3658 	if (error)
3659 		return error;
3660 	ASSERT(args->pag);
3661 
3662 	/*
3663 	 * If we are in low space mode, then optimal allocation will fail so
3664 	 * prepare for minimal allocation and jump to the low space algorithm
3665 	 * immediately.
3666 	 */
3667 	if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
3668 		args->minlen = ap->minlen;
3669 		ASSERT(args->fsbno == NULLFSBLOCK);
3670 		goto out_low_space;
3671 	}
3672 
3673 	args->minlen = xfs_bmap_select_minlen(ap, args, blen);
3674 	if (ap->aeof)
3675 		error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align,
3676 				true);
3677 
3678 	if (!error && args->fsbno == NULLFSBLOCK)
3679 		error = xfs_alloc_vextent_near_bno(args, ap->blkno);
3680 
3681 out_low_space:
3682 	/*
3683 	 * We are now done with the perag reference for the filestreams
3684 	 * association provided by xfs_filestream_select_ag(). Release it now as
3685 	 * we've either succeeded, had a fatal error or we are out of space and
3686 	 * need to do a full filesystem scan for free space which will take it's
3687 	 * own references.
3688 	 */
3689 	xfs_perag_rele(args->pag);
3690 	args->pag = NULL;
3691 	if (error || args->fsbno != NULLFSBLOCK)
3692 		return error;
3693 
3694 	return xfs_bmap_btalloc_low_space(ap, args);
3695 }
3696 
3697 static int
xfs_bmap_btalloc_best_length(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,int stripe_align)3698 xfs_bmap_btalloc_best_length(
3699 	struct xfs_bmalloca	*ap,
3700 	struct xfs_alloc_arg	*args,
3701 	int			stripe_align)
3702 {
3703 	xfs_extlen_t		blen = 0;
3704 	int			error;
3705 
3706 	ap->blkno = XFS_INO_TO_FSB(args->mp, ap->ip->i_ino);
3707 	xfs_bmap_adjacent(ap);
3708 
3709 	/*
3710 	 * Search for an allocation group with a single extent large enough for
3711 	 * the request.  If one isn't found, then adjust the minimum allocation
3712 	 * size to the largest space found.
3713 	 */
3714 	error = xfs_bmap_btalloc_select_lengths(ap, args, &blen);
3715 	if (error)
3716 		return error;
3717 
3718 	/*
3719 	 * Don't attempt optimal EOF allocation if previous allocations barely
3720 	 * succeeded due to being near ENOSPC. It is highly unlikely we'll get
3721 	 * optimal or even aligned allocations in this case, so don't waste time
3722 	 * trying.
3723 	 */
3724 	if (ap->aeof && !(ap->tp->t_flags & XFS_TRANS_LOWMODE)) {
3725 		error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align,
3726 				false);
3727 		if (error || args->fsbno != NULLFSBLOCK)
3728 			return error;
3729 	}
3730 
3731 	error = xfs_alloc_vextent_start_ag(args, ap->blkno);
3732 	if (error || args->fsbno != NULLFSBLOCK)
3733 		return error;
3734 
3735 	return xfs_bmap_btalloc_low_space(ap, args);
3736 }
3737 
3738 static int
xfs_bmap_btalloc(struct xfs_bmalloca * ap)3739 xfs_bmap_btalloc(
3740 	struct xfs_bmalloca	*ap)
3741 {
3742 	struct xfs_mount	*mp = ap->ip->i_mount;
3743 	struct xfs_alloc_arg	args = {
3744 		.tp		= ap->tp,
3745 		.mp		= mp,
3746 		.fsbno		= NULLFSBLOCK,
3747 		.oinfo		= XFS_RMAP_OINFO_SKIP_UPDATE,
3748 		.minleft	= ap->minleft,
3749 		.wasdel		= ap->wasdel,
3750 		.resv		= XFS_AG_RESV_NONE,
3751 		.datatype	= ap->datatype,
3752 		.alignment	= 1,
3753 		.minalignslop	= 0,
3754 	};
3755 	xfs_fileoff_t		orig_offset;
3756 	xfs_extlen_t		orig_length;
3757 	int			error;
3758 	int			stripe_align;
3759 
3760 	ASSERT(ap->length);
3761 	orig_offset = ap->offset;
3762 	orig_length = ap->length;
3763 
3764 	stripe_align = xfs_bmap_compute_alignments(ap, &args);
3765 
3766 	/* Trim the allocation back to the maximum an AG can fit. */
3767 	args.maxlen = min(ap->length, mp->m_ag_max_usable);
3768 
3769 	if (unlikely(XFS_TEST_ERROR(false, mp,
3770 			XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT)))
3771 		error = xfs_bmap_exact_minlen_extent_alloc(ap, &args);
3772 	else if ((ap->datatype & XFS_ALLOC_USERDATA) &&
3773 			xfs_inode_is_filestream(ap->ip))
3774 		error = xfs_bmap_btalloc_filestreams(ap, &args, stripe_align);
3775 	else
3776 		error = xfs_bmap_btalloc_best_length(ap, &args, stripe_align);
3777 	if (error)
3778 		return error;
3779 
3780 	if (args.fsbno != NULLFSBLOCK) {
3781 		xfs_bmap_process_allocated_extent(ap, &args, orig_offset,
3782 			orig_length);
3783 	} else {
3784 		ap->blkno = NULLFSBLOCK;
3785 		ap->length = 0;
3786 	}
3787 	return 0;
3788 }
3789 
3790 /* Trim extent to fit a logical block range. */
3791 void
xfs_trim_extent(struct xfs_bmbt_irec * irec,xfs_fileoff_t bno,xfs_filblks_t len)3792 xfs_trim_extent(
3793 	struct xfs_bmbt_irec	*irec,
3794 	xfs_fileoff_t		bno,
3795 	xfs_filblks_t		len)
3796 {
3797 	xfs_fileoff_t		distance;
3798 	xfs_fileoff_t		end = bno + len;
3799 
3800 	if (irec->br_startoff + irec->br_blockcount <= bno ||
3801 	    irec->br_startoff >= end) {
3802 		irec->br_blockcount = 0;
3803 		return;
3804 	}
3805 
3806 	if (irec->br_startoff < bno) {
3807 		distance = bno - irec->br_startoff;
3808 		if (isnullstartblock(irec->br_startblock))
3809 			irec->br_startblock = DELAYSTARTBLOCK;
3810 		if (irec->br_startblock != DELAYSTARTBLOCK &&
3811 		    irec->br_startblock != HOLESTARTBLOCK)
3812 			irec->br_startblock += distance;
3813 		irec->br_startoff += distance;
3814 		irec->br_blockcount -= distance;
3815 	}
3816 
3817 	if (end < irec->br_startoff + irec->br_blockcount) {
3818 		distance = irec->br_startoff + irec->br_blockcount - end;
3819 		irec->br_blockcount -= distance;
3820 	}
3821 }
3822 
3823 /*
3824  * Trim the returned map to the required bounds
3825  */
3826 STATIC void
xfs_bmapi_trim_map(struct xfs_bmbt_irec * mval,struct xfs_bmbt_irec * got,xfs_fileoff_t * bno,xfs_filblks_t len,xfs_fileoff_t obno,xfs_fileoff_t end,int n,uint32_t flags)3827 xfs_bmapi_trim_map(
3828 	struct xfs_bmbt_irec	*mval,
3829 	struct xfs_bmbt_irec	*got,
3830 	xfs_fileoff_t		*bno,
3831 	xfs_filblks_t		len,
3832 	xfs_fileoff_t		obno,
3833 	xfs_fileoff_t		end,
3834 	int			n,
3835 	uint32_t		flags)
3836 {
3837 	if ((flags & XFS_BMAPI_ENTIRE) ||
3838 	    got->br_startoff + got->br_blockcount <= obno) {
3839 		*mval = *got;
3840 		if (isnullstartblock(got->br_startblock))
3841 			mval->br_startblock = DELAYSTARTBLOCK;
3842 		return;
3843 	}
3844 
3845 	if (obno > *bno)
3846 		*bno = obno;
3847 	ASSERT((*bno >= obno) || (n == 0));
3848 	ASSERT(*bno < end);
3849 	mval->br_startoff = *bno;
3850 	if (isnullstartblock(got->br_startblock))
3851 		mval->br_startblock = DELAYSTARTBLOCK;
3852 	else
3853 		mval->br_startblock = got->br_startblock +
3854 					(*bno - got->br_startoff);
3855 	/*
3856 	 * Return the minimum of what we got and what we asked for for
3857 	 * the length.  We can use the len variable here because it is
3858 	 * modified below and we could have been there before coming
3859 	 * here if the first part of the allocation didn't overlap what
3860 	 * was asked for.
3861 	 */
3862 	mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3863 			got->br_blockcount - (*bno - got->br_startoff));
3864 	mval->br_state = got->br_state;
3865 	ASSERT(mval->br_blockcount <= len);
3866 	return;
3867 }
3868 
3869 /*
3870  * Update and validate the extent map to return
3871  */
3872 STATIC void
xfs_bmapi_update_map(struct xfs_bmbt_irec ** map,xfs_fileoff_t * bno,xfs_filblks_t * len,xfs_fileoff_t obno,xfs_fileoff_t end,int * n,uint32_t flags)3873 xfs_bmapi_update_map(
3874 	struct xfs_bmbt_irec	**map,
3875 	xfs_fileoff_t		*bno,
3876 	xfs_filblks_t		*len,
3877 	xfs_fileoff_t		obno,
3878 	xfs_fileoff_t		end,
3879 	int			*n,
3880 	uint32_t		flags)
3881 {
3882 	xfs_bmbt_irec_t	*mval = *map;
3883 
3884 	ASSERT((flags & XFS_BMAPI_ENTIRE) ||
3885 	       ((mval->br_startoff + mval->br_blockcount) <= end));
3886 	ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
3887 	       (mval->br_startoff < obno));
3888 
3889 	*bno = mval->br_startoff + mval->br_blockcount;
3890 	*len = end - *bno;
3891 	if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
3892 		/* update previous map with new information */
3893 		ASSERT(mval->br_startblock == mval[-1].br_startblock);
3894 		ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
3895 		ASSERT(mval->br_state == mval[-1].br_state);
3896 		mval[-1].br_blockcount = mval->br_blockcount;
3897 		mval[-1].br_state = mval->br_state;
3898 	} else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
3899 		   mval[-1].br_startblock != DELAYSTARTBLOCK &&
3900 		   mval[-1].br_startblock != HOLESTARTBLOCK &&
3901 		   mval->br_startblock == mval[-1].br_startblock +
3902 					  mval[-1].br_blockcount &&
3903 		   mval[-1].br_state == mval->br_state) {
3904 		ASSERT(mval->br_startoff ==
3905 		       mval[-1].br_startoff + mval[-1].br_blockcount);
3906 		mval[-1].br_blockcount += mval->br_blockcount;
3907 	} else if (*n > 0 &&
3908 		   mval->br_startblock == DELAYSTARTBLOCK &&
3909 		   mval[-1].br_startblock == DELAYSTARTBLOCK &&
3910 		   mval->br_startoff ==
3911 		   mval[-1].br_startoff + mval[-1].br_blockcount) {
3912 		mval[-1].br_blockcount += mval->br_blockcount;
3913 		mval[-1].br_state = mval->br_state;
3914 	} else if (!((*n == 0) &&
3915 		     ((mval->br_startoff + mval->br_blockcount) <=
3916 		      obno))) {
3917 		mval++;
3918 		(*n)++;
3919 	}
3920 	*map = mval;
3921 }
3922 
3923 /*
3924  * Map file blocks to filesystem blocks without allocation.
3925  */
3926 int
xfs_bmapi_read(struct xfs_inode * ip,xfs_fileoff_t bno,xfs_filblks_t len,struct xfs_bmbt_irec * mval,int * nmap,uint32_t flags)3927 xfs_bmapi_read(
3928 	struct xfs_inode	*ip,
3929 	xfs_fileoff_t		bno,
3930 	xfs_filblks_t		len,
3931 	struct xfs_bmbt_irec	*mval,
3932 	int			*nmap,
3933 	uint32_t		flags)
3934 {
3935 	struct xfs_mount	*mp = ip->i_mount;
3936 	int			whichfork = xfs_bmapi_whichfork(flags);
3937 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
3938 	struct xfs_bmbt_irec	got;
3939 	xfs_fileoff_t		obno;
3940 	xfs_fileoff_t		end;
3941 	struct xfs_iext_cursor	icur;
3942 	int			error;
3943 	bool			eof = false;
3944 	int			n = 0;
3945 
3946 	ASSERT(*nmap >= 1);
3947 	ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_ENTIRE)));
3948 	xfs_assert_ilocked(ip, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL);
3949 
3950 	if (WARN_ON_ONCE(!ifp)) {
3951 		xfs_bmap_mark_sick(ip, whichfork);
3952 		return -EFSCORRUPTED;
3953 	}
3954 
3955 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
3956 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
3957 		xfs_bmap_mark_sick(ip, whichfork);
3958 		return -EFSCORRUPTED;
3959 	}
3960 
3961 	if (xfs_is_shutdown(mp))
3962 		return -EIO;
3963 
3964 	XFS_STATS_INC(mp, xs_blk_mapr);
3965 
3966 	error = xfs_iread_extents(NULL, ip, whichfork);
3967 	if (error)
3968 		return error;
3969 
3970 	if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got))
3971 		eof = true;
3972 	end = bno + len;
3973 	obno = bno;
3974 
3975 	while (bno < end && n < *nmap) {
3976 		/* Reading past eof, act as though there's a hole up to end. */
3977 		if (eof)
3978 			got.br_startoff = end;
3979 		if (got.br_startoff > bno) {
3980 			/* Reading in a hole.  */
3981 			mval->br_startoff = bno;
3982 			mval->br_startblock = HOLESTARTBLOCK;
3983 			mval->br_blockcount =
3984 				XFS_FILBLKS_MIN(len, got.br_startoff - bno);
3985 			mval->br_state = XFS_EXT_NORM;
3986 			bno += mval->br_blockcount;
3987 			len -= mval->br_blockcount;
3988 			mval++;
3989 			n++;
3990 			continue;
3991 		}
3992 
3993 		/* set up the extent map to return. */
3994 		xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
3995 		xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
3996 
3997 		/* If we're done, stop now. */
3998 		if (bno >= end || n >= *nmap)
3999 			break;
4000 
4001 		/* Else go on to the next record. */
4002 		if (!xfs_iext_next_extent(ifp, &icur, &got))
4003 			eof = true;
4004 	}
4005 	*nmap = n;
4006 	return 0;
4007 }
4008 
4009 /*
4010  * Add a delayed allocation extent to an inode. Blocks are reserved from the
4011  * global pool and the extent inserted into the inode in-core extent tree.
4012  *
4013  * On entry, got refers to the first extent beyond the offset of the extent to
4014  * allocate or eof is specified if no such extent exists. On return, got refers
4015  * to the extent record that was inserted to the inode fork.
4016  *
4017  * Note that the allocated extent may have been merged with contiguous extents
4018  * during insertion into the inode fork. Thus, got does not reflect the current
4019  * state of the inode fork on return. If necessary, the caller can use lastx to
4020  * look up the updated record in the inode fork.
4021  */
4022 int
xfs_bmapi_reserve_delalloc(struct xfs_inode * ip,int whichfork,xfs_fileoff_t off,xfs_filblks_t len,xfs_filblks_t prealloc,struct xfs_bmbt_irec * got,struct xfs_iext_cursor * icur,int eof)4023 xfs_bmapi_reserve_delalloc(
4024 	struct xfs_inode	*ip,
4025 	int			whichfork,
4026 	xfs_fileoff_t		off,
4027 	xfs_filblks_t		len,
4028 	xfs_filblks_t		prealloc,
4029 	struct xfs_bmbt_irec	*got,
4030 	struct xfs_iext_cursor	*icur,
4031 	int			eof)
4032 {
4033 	struct xfs_mount	*mp = ip->i_mount;
4034 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
4035 	xfs_extlen_t		alen;
4036 	xfs_extlen_t		indlen;
4037 	uint64_t		fdblocks;
4038 	int			error;
4039 	xfs_fileoff_t		aoff;
4040 	bool			use_cowextszhint =
4041 					whichfork == XFS_COW_FORK && !prealloc;
4042 
4043 retry:
4044 	/*
4045 	 * Cap the alloc length. Keep track of prealloc so we know whether to
4046 	 * tag the inode before we return.
4047 	 */
4048 	aoff = off;
4049 	alen = XFS_FILBLKS_MIN(len + prealloc, XFS_MAX_BMBT_EXTLEN);
4050 	if (!eof)
4051 		alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
4052 	if (prealloc && alen >= len)
4053 		prealloc = alen - len;
4054 
4055 	/*
4056 	 * If we're targetting the COW fork but aren't creating a speculative
4057 	 * posteof preallocation, try to expand the reservation to align with
4058 	 * the COW extent size hint if there's sufficient free space.
4059 	 *
4060 	 * Unlike the data fork, the CoW cancellation functions will free all
4061 	 * the reservations at inactivation, so we don't require that every
4062 	 * delalloc reservation have a dirty pagecache.
4063 	 */
4064 	if (use_cowextszhint) {
4065 		struct xfs_bmbt_irec	prev;
4066 		xfs_extlen_t		extsz = xfs_get_cowextsz_hint(ip);
4067 
4068 		if (!xfs_iext_peek_prev_extent(ifp, icur, &prev))
4069 			prev.br_startoff = NULLFILEOFF;
4070 
4071 		error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof,
4072 					       1, 0, &aoff, &alen);
4073 		ASSERT(!error);
4074 	}
4075 
4076 	/*
4077 	 * Make a transaction-less quota reservation for delayed allocation
4078 	 * blocks.  This number gets adjusted later.  We return if we haven't
4079 	 * allocated blocks already inside this loop.
4080 	 */
4081 	error = xfs_quota_reserve_blkres(ip, alen);
4082 	if (error)
4083 		goto out;
4084 
4085 	/*
4086 	 * Split changing sb for alen and indlen since they could be coming
4087 	 * from different places.
4088 	 */
4089 	indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
4090 	ASSERT(indlen > 0);
4091 
4092 	fdblocks = indlen;
4093 	if (XFS_IS_REALTIME_INODE(ip)) {
4094 		error = xfs_dec_frextents(mp, xfs_rtb_to_rtx(mp, alen));
4095 		if (error)
4096 			goto out_unreserve_quota;
4097 	} else {
4098 		fdblocks += alen;
4099 	}
4100 
4101 	error = xfs_dec_fdblocks(mp, fdblocks, false);
4102 	if (error)
4103 		goto out_unreserve_frextents;
4104 
4105 	ip->i_delayed_blks += alen;
4106 	xfs_mod_delalloc(ip, alen, indlen);
4107 
4108 	got->br_startoff = aoff;
4109 	got->br_startblock = nullstartblock(indlen);
4110 	got->br_blockcount = alen;
4111 	got->br_state = XFS_EXT_NORM;
4112 
4113 	xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got);
4114 
4115 	/*
4116 	 * Tag the inode if blocks were preallocated. Note that COW fork
4117 	 * preallocation can occur at the start or end of the extent, even when
4118 	 * prealloc == 0, so we must also check the aligned offset and length.
4119 	 */
4120 	if (whichfork == XFS_DATA_FORK && prealloc)
4121 		xfs_inode_set_eofblocks_tag(ip);
4122 	if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
4123 		xfs_inode_set_cowblocks_tag(ip);
4124 
4125 	return 0;
4126 
4127 out_unreserve_frextents:
4128 	if (XFS_IS_REALTIME_INODE(ip))
4129 		xfs_add_frextents(mp, xfs_rtb_to_rtx(mp, alen));
4130 out_unreserve_quota:
4131 	if (XFS_IS_QUOTA_ON(mp))
4132 		xfs_quota_unreserve_blkres(ip, alen);
4133 out:
4134 	if (error == -ENOSPC || error == -EDQUOT) {
4135 		trace_xfs_delalloc_enospc(ip, off, len);
4136 
4137 		if (prealloc || use_cowextszhint) {
4138 			/* retry without any preallocation */
4139 			use_cowextszhint = false;
4140 			prealloc = 0;
4141 			goto retry;
4142 		}
4143 	}
4144 	return error;
4145 }
4146 
4147 static int
xfs_bmapi_allocate(struct xfs_bmalloca * bma)4148 xfs_bmapi_allocate(
4149 	struct xfs_bmalloca	*bma)
4150 {
4151 	struct xfs_mount	*mp = bma->ip->i_mount;
4152 	int			whichfork = xfs_bmapi_whichfork(bma->flags);
4153 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
4154 	int			error;
4155 
4156 	ASSERT(bma->length > 0);
4157 	ASSERT(bma->length <= XFS_MAX_BMBT_EXTLEN);
4158 
4159 	if (bma->flags & XFS_BMAPI_CONTIG)
4160 		bma->minlen = bma->length;
4161 	else
4162 		bma->minlen = 1;
4163 
4164 	if (!(bma->flags & XFS_BMAPI_METADATA)) {
4165 		/*
4166 		 * For the data and COW fork, the first data in the file is
4167 		 * treated differently to all other allocations. For the
4168 		 * attribute fork, we only need to ensure the allocated range
4169 		 * is not on the busy list.
4170 		 */
4171 		bma->datatype = XFS_ALLOC_NOBUSY;
4172 		if (whichfork == XFS_DATA_FORK || whichfork == XFS_COW_FORK) {
4173 			bma->datatype |= XFS_ALLOC_USERDATA;
4174 			if (bma->offset == 0)
4175 				bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
4176 
4177 			if (mp->m_dalign && bma->length >= mp->m_dalign) {
4178 				error = xfs_bmap_isaeof(bma, whichfork);
4179 				if (error)
4180 					return error;
4181 			}
4182 		}
4183 	}
4184 
4185 	if ((bma->datatype & XFS_ALLOC_USERDATA) &&
4186 	    XFS_IS_REALTIME_INODE(bma->ip))
4187 		error = xfs_bmap_rtalloc(bma);
4188 	else
4189 		error = xfs_bmap_btalloc(bma);
4190 	if (error)
4191 		return error;
4192 	if (bma->blkno == NULLFSBLOCK)
4193 		return -ENOSPC;
4194 
4195 	if (WARN_ON_ONCE(!xfs_valid_startblock(bma->ip, bma->blkno))) {
4196 		xfs_bmap_mark_sick(bma->ip, whichfork);
4197 		return -EFSCORRUPTED;
4198 	}
4199 
4200 	if (bma->flags & XFS_BMAPI_ZERO) {
4201 		error = xfs_zero_extent(bma->ip, bma->blkno, bma->length);
4202 		if (error)
4203 			return error;
4204 	}
4205 
4206 	if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur)
4207 		bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4208 	/*
4209 	 * Bump the number of extents we've allocated
4210 	 * in this call.
4211 	 */
4212 	bma->nallocs++;
4213 
4214 	if (bma->cur && bma->wasdel)
4215 		bma->cur->bc_flags |= XFS_BTREE_BMBT_WASDEL;
4216 
4217 	bma->got.br_startoff = bma->offset;
4218 	bma->got.br_startblock = bma->blkno;
4219 	bma->got.br_blockcount = bma->length;
4220 	bma->got.br_state = XFS_EXT_NORM;
4221 
4222 	if (bma->flags & XFS_BMAPI_PREALLOC)
4223 		bma->got.br_state = XFS_EXT_UNWRITTEN;
4224 
4225 	if (bma->wasdel)
4226 		error = xfs_bmap_add_extent_delay_real(bma, whichfork);
4227 	else
4228 		error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
4229 				whichfork, &bma->icur, &bma->cur, &bma->got,
4230 				&bma->logflags, bma->flags);
4231 	if (error)
4232 		return error;
4233 
4234 	/*
4235 	 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4236 	 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4237 	 * the neighbouring ones.
4238 	 */
4239 	xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4240 
4241 	ASSERT(bma->got.br_startoff <= bma->offset);
4242 	ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4243 	       bma->offset + bma->length);
4244 	ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4245 	       bma->got.br_state == XFS_EXT_UNWRITTEN);
4246 	return 0;
4247 }
4248 
4249 STATIC int
xfs_bmapi_convert_unwritten(struct xfs_bmalloca * bma,struct xfs_bmbt_irec * mval,xfs_filblks_t len,uint32_t flags)4250 xfs_bmapi_convert_unwritten(
4251 	struct xfs_bmalloca	*bma,
4252 	struct xfs_bmbt_irec	*mval,
4253 	xfs_filblks_t		len,
4254 	uint32_t		flags)
4255 {
4256 	int			whichfork = xfs_bmapi_whichfork(flags);
4257 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
4258 	int			tmp_logflags = 0;
4259 	int			error;
4260 
4261 	/* check if we need to do unwritten->real conversion */
4262 	if (mval->br_state == XFS_EXT_UNWRITTEN &&
4263 	    (flags & XFS_BMAPI_PREALLOC))
4264 		return 0;
4265 
4266 	/* check if we need to do real->unwritten conversion */
4267 	if (mval->br_state == XFS_EXT_NORM &&
4268 	    (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4269 			(XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4270 		return 0;
4271 
4272 	/*
4273 	 * Modify (by adding) the state flag, if writing.
4274 	 */
4275 	ASSERT(mval->br_blockcount <= len);
4276 	if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur) {
4277 		bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4278 					bma->ip, whichfork);
4279 	}
4280 	mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4281 				? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4282 
4283 	/*
4284 	 * Before insertion into the bmbt, zero the range being converted
4285 	 * if required.
4286 	 */
4287 	if (flags & XFS_BMAPI_ZERO) {
4288 		error = xfs_zero_extent(bma->ip, mval->br_startblock,
4289 					mval->br_blockcount);
4290 		if (error)
4291 			return error;
4292 	}
4293 
4294 	error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
4295 			&bma->icur, &bma->cur, mval, &tmp_logflags);
4296 	/*
4297 	 * Log the inode core unconditionally in the unwritten extent conversion
4298 	 * path because the conversion might not have done so (e.g., if the
4299 	 * extent count hasn't changed). We need to make sure the inode is dirty
4300 	 * in the transaction for the sake of fsync(), even if nothing has
4301 	 * changed, because fsync() will not force the log for this transaction
4302 	 * unless it sees the inode pinned.
4303 	 *
4304 	 * Note: If we're only converting cow fork extents, there aren't
4305 	 * any on-disk updates to make, so we don't need to log anything.
4306 	 */
4307 	if (whichfork != XFS_COW_FORK)
4308 		bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4309 	if (error)
4310 		return error;
4311 
4312 	/*
4313 	 * Update our extent pointer, given that
4314 	 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4315 	 * of the neighbouring ones.
4316 	 */
4317 	xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4318 
4319 	/*
4320 	 * We may have combined previously unwritten space with written space,
4321 	 * so generate another request.
4322 	 */
4323 	if (mval->br_blockcount < len)
4324 		return -EAGAIN;
4325 	return 0;
4326 }
4327 
4328 xfs_extlen_t
xfs_bmapi_minleft(struct xfs_trans * tp,struct xfs_inode * ip,int fork)4329 xfs_bmapi_minleft(
4330 	struct xfs_trans	*tp,
4331 	struct xfs_inode	*ip,
4332 	int			fork)
4333 {
4334 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, fork);
4335 
4336 	if (tp && tp->t_highest_agno != NULLAGNUMBER)
4337 		return 0;
4338 	if (ifp->if_format != XFS_DINODE_FMT_BTREE)
4339 		return 1;
4340 	return be16_to_cpu(ifp->if_broot->bb_level) + 1;
4341 }
4342 
4343 /*
4344  * Log whatever the flags say, even if error.  Otherwise we might miss detecting
4345  * a case where the data is changed, there's an error, and it's not logged so we
4346  * don't shutdown when we should.  Don't bother logging extents/btree changes if
4347  * we converted to the other format.
4348  */
4349 static void
xfs_bmapi_finish(struct xfs_bmalloca * bma,int whichfork,int error)4350 xfs_bmapi_finish(
4351 	struct xfs_bmalloca	*bma,
4352 	int			whichfork,
4353 	int			error)
4354 {
4355 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
4356 
4357 	if ((bma->logflags & xfs_ilog_fext(whichfork)) &&
4358 	    ifp->if_format != XFS_DINODE_FMT_EXTENTS)
4359 		bma->logflags &= ~xfs_ilog_fext(whichfork);
4360 	else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) &&
4361 		 ifp->if_format != XFS_DINODE_FMT_BTREE)
4362 		bma->logflags &= ~xfs_ilog_fbroot(whichfork);
4363 
4364 	if (bma->logflags)
4365 		xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags);
4366 	if (bma->cur)
4367 		xfs_btree_del_cursor(bma->cur, error);
4368 }
4369 
4370 /*
4371  * Map file blocks to filesystem blocks, and allocate blocks or convert the
4372  * extent state if necessary.  Details behaviour is controlled by the flags
4373  * parameter.  Only allocates blocks from a single allocation group, to avoid
4374  * locking problems.
4375  *
4376  * Returns 0 on success and places the extent mappings in mval.  nmaps is used
4377  * as an input/output parameter where the caller specifies the maximum number
4378  * of mappings that may be returned and xfs_bmapi_write passes back the number
4379  * of mappings (including existing mappings) it found.
4380  *
4381  * Returns a negative error code on failure, including -ENOSPC when it could not
4382  * allocate any blocks and -ENOSR when it did allocate blocks to convert a
4383  * delalloc range, but those blocks were before the passed in range.
4384  */
4385 int
xfs_bmapi_write(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t bno,xfs_filblks_t len,uint32_t flags,xfs_extlen_t total,struct xfs_bmbt_irec * mval,int * nmap)4386 xfs_bmapi_write(
4387 	struct xfs_trans	*tp,		/* transaction pointer */
4388 	struct xfs_inode	*ip,		/* incore inode */
4389 	xfs_fileoff_t		bno,		/* starting file offs. mapped */
4390 	xfs_filblks_t		len,		/* length to map in file */
4391 	uint32_t		flags,		/* XFS_BMAPI_... */
4392 	xfs_extlen_t		total,		/* total blocks needed */
4393 	struct xfs_bmbt_irec	*mval,		/* output: map values */
4394 	int			*nmap)		/* i/o: mval size/count */
4395 {
4396 	struct xfs_bmalloca	bma = {
4397 		.tp		= tp,
4398 		.ip		= ip,
4399 		.total		= total,
4400 	};
4401 	struct xfs_mount	*mp = ip->i_mount;
4402 	int			whichfork = xfs_bmapi_whichfork(flags);
4403 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
4404 	xfs_fileoff_t		end;		/* end of mapped file region */
4405 	bool			eof = false;	/* after the end of extents */
4406 	int			error;		/* error return */
4407 	int			n;		/* current extent index */
4408 	xfs_fileoff_t		obno;		/* old block number (offset) */
4409 
4410 #ifdef DEBUG
4411 	xfs_fileoff_t		orig_bno;	/* original block number value */
4412 	int			orig_flags;	/* original flags arg value */
4413 	xfs_filblks_t		orig_len;	/* original value of len arg */
4414 	struct xfs_bmbt_irec	*orig_mval;	/* original value of mval */
4415 	int			orig_nmap;	/* original value of *nmap */
4416 
4417 	orig_bno = bno;
4418 	orig_len = len;
4419 	orig_flags = flags;
4420 	orig_mval = mval;
4421 	orig_nmap = *nmap;
4422 #endif
4423 
4424 	ASSERT(*nmap >= 1);
4425 	ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4426 	ASSERT(tp != NULL);
4427 	ASSERT(len > 0);
4428 	ASSERT(ifp->if_format != XFS_DINODE_FMT_LOCAL);
4429 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
4430 	ASSERT(!(flags & XFS_BMAPI_REMAP));
4431 
4432 	/* zeroing is for currently only for data extents, not metadata */
4433 	ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4434 			(XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4435 	/*
4436 	 * we can allocate unwritten extents or pre-zero allocated blocks,
4437 	 * but it makes no sense to do both at once. This would result in
4438 	 * zeroing the unwritten extent twice, but it still being an
4439 	 * unwritten extent....
4440 	 */
4441 	ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4442 			(XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4443 
4444 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
4445 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
4446 		xfs_bmap_mark_sick(ip, whichfork);
4447 		return -EFSCORRUPTED;
4448 	}
4449 
4450 	if (xfs_is_shutdown(mp))
4451 		return -EIO;
4452 
4453 	XFS_STATS_INC(mp, xs_blk_mapw);
4454 
4455 	error = xfs_iread_extents(tp, ip, whichfork);
4456 	if (error)
4457 		goto error0;
4458 
4459 	if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got))
4460 		eof = true;
4461 	if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4462 		bma.prev.br_startoff = NULLFILEOFF;
4463 	bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4464 
4465 	n = 0;
4466 	end = bno + len;
4467 	obno = bno;
4468 	while (bno < end && n < *nmap) {
4469 		bool			need_alloc = false, wasdelay = false;
4470 
4471 		/* in hole or beyond EOF? */
4472 		if (eof || bma.got.br_startoff > bno) {
4473 			/*
4474 			 * CoW fork conversions should /never/ hit EOF or
4475 			 * holes.  There should always be something for us
4476 			 * to work on.
4477 			 */
4478 			ASSERT(!((flags & XFS_BMAPI_CONVERT) &&
4479 			         (flags & XFS_BMAPI_COWFORK)));
4480 
4481 			need_alloc = true;
4482 		} else if (isnullstartblock(bma.got.br_startblock)) {
4483 			wasdelay = true;
4484 		}
4485 
4486 		/*
4487 		 * First, deal with the hole before the allocated space
4488 		 * that we found, if any.
4489 		 */
4490 		if (need_alloc || wasdelay) {
4491 			bma.eof = eof;
4492 			bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4493 			bma.wasdel = wasdelay;
4494 			bma.offset = bno;
4495 			bma.flags = flags;
4496 
4497 			/*
4498 			 * There's a 32/64 bit type mismatch between the
4499 			 * allocation length request (which can be 64 bits in
4500 			 * length) and the bma length request, which is
4501 			 * xfs_extlen_t and therefore 32 bits. Hence we have to
4502 			 * be careful and do the min() using the larger type to
4503 			 * avoid overflows.
4504 			 */
4505 			bma.length = XFS_FILBLKS_MIN(len, XFS_MAX_BMBT_EXTLEN);
4506 
4507 			if (wasdelay) {
4508 				bma.length = XFS_FILBLKS_MIN(bma.length,
4509 					bma.got.br_blockcount -
4510 					(bno - bma.got.br_startoff));
4511 			} else {
4512 				if (!eof)
4513 					bma.length = XFS_FILBLKS_MIN(bma.length,
4514 						bma.got.br_startoff - bno);
4515 			}
4516 
4517 			ASSERT(bma.length > 0);
4518 			error = xfs_bmapi_allocate(&bma);
4519 			if (error) {
4520 				/*
4521 				 * If we already allocated space in a previous
4522 				 * iteration return what we go so far when
4523 				 * running out of space.
4524 				 */
4525 				if (error == -ENOSPC && bma.nallocs)
4526 					break;
4527 				goto error0;
4528 			}
4529 
4530 			/*
4531 			 * If this is a CoW allocation, record the data in
4532 			 * the refcount btree for orphan recovery.
4533 			 */
4534 			if (whichfork == XFS_COW_FORK)
4535 				xfs_refcount_alloc_cow_extent(tp, bma.blkno,
4536 						bma.length);
4537 		}
4538 
4539 		/* Deal with the allocated space we found.  */
4540 		xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4541 							end, n, flags);
4542 
4543 		/* Execute unwritten extent conversion if necessary */
4544 		error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4545 		if (error == -EAGAIN)
4546 			continue;
4547 		if (error)
4548 			goto error0;
4549 
4550 		/* update the extent map to return */
4551 		xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4552 
4553 		/*
4554 		 * If we're done, stop now.  Stop when we've allocated
4555 		 * XFS_BMAP_MAX_NMAP extents no matter what.  Otherwise
4556 		 * the transaction may get too big.
4557 		 */
4558 		if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4559 			break;
4560 
4561 		/* Else go on to the next record. */
4562 		bma.prev = bma.got;
4563 		if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
4564 			eof = true;
4565 	}
4566 
4567 	error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4568 			whichfork);
4569 	if (error)
4570 		goto error0;
4571 
4572 	ASSERT(ifp->if_format != XFS_DINODE_FMT_BTREE ||
4573 	       ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork));
4574 	xfs_bmapi_finish(&bma, whichfork, 0);
4575 	xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4576 		orig_nmap, n);
4577 
4578 	/*
4579 	 * When converting delayed allocations, xfs_bmapi_allocate ignores
4580 	 * the passed in bno and always converts from the start of the found
4581 	 * delalloc extent.
4582 	 *
4583 	 * To avoid a successful return with *nmap set to 0, return the magic
4584 	 * -ENOSR error code for this particular case so that the caller can
4585 	 * handle it.
4586 	 */
4587 	if (!n) {
4588 		ASSERT(bma.nallocs >= *nmap);
4589 		return -ENOSR;
4590 	}
4591 	*nmap = n;
4592 	return 0;
4593 error0:
4594 	xfs_bmapi_finish(&bma, whichfork, error);
4595 	return error;
4596 }
4597 
4598 /*
4599  * Convert an existing delalloc extent to real blocks based on file offset. This
4600  * attempts to allocate the entire delalloc extent and may require multiple
4601  * invocations to allocate the target offset if a large enough physical extent
4602  * is not available.
4603  */
4604 static int
xfs_bmapi_convert_one_delalloc(struct xfs_inode * ip,int whichfork,xfs_off_t offset,struct iomap * iomap,unsigned int * seq)4605 xfs_bmapi_convert_one_delalloc(
4606 	struct xfs_inode	*ip,
4607 	int			whichfork,
4608 	xfs_off_t		offset,
4609 	struct iomap		*iomap,
4610 	unsigned int		*seq)
4611 {
4612 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
4613 	struct xfs_mount	*mp = ip->i_mount;
4614 	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
4615 	struct xfs_bmalloca	bma = { NULL };
4616 	uint16_t		flags = 0;
4617 	struct xfs_trans	*tp;
4618 	int			error;
4619 
4620 	if (whichfork == XFS_COW_FORK)
4621 		flags |= IOMAP_F_SHARED;
4622 
4623 	/*
4624 	 * Space for the extent and indirect blocks was reserved when the
4625 	 * delalloc extent was created so there's no need to do so here.
4626 	 */
4627 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0,
4628 				XFS_TRANS_RESERVE, &tp);
4629 	if (error)
4630 		return error;
4631 
4632 	xfs_ilock(ip, XFS_ILOCK_EXCL);
4633 	xfs_trans_ijoin(tp, ip, 0);
4634 
4635 	error = xfs_iext_count_extend(tp, ip, whichfork,
4636 			XFS_IEXT_ADD_NOSPLIT_CNT);
4637 	if (error)
4638 		goto out_trans_cancel;
4639 
4640 	if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) ||
4641 	    bma.got.br_startoff > offset_fsb) {
4642 		/*
4643 		 * No extent found in the range we are trying to convert.  This
4644 		 * should only happen for the COW fork, where another thread
4645 		 * might have moved the extent to the data fork in the meantime.
4646 		 */
4647 		WARN_ON_ONCE(whichfork != XFS_COW_FORK);
4648 		error = -EAGAIN;
4649 		goto out_trans_cancel;
4650 	}
4651 
4652 	/*
4653 	 * If we find a real extent here we raced with another thread converting
4654 	 * the extent.  Just return the real extent at this offset.
4655 	 */
4656 	if (!isnullstartblock(bma.got.br_startblock)) {
4657 		xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
4658 				xfs_iomap_inode_sequence(ip, flags));
4659 		if (seq)
4660 			*seq = READ_ONCE(ifp->if_seq);
4661 		goto out_trans_cancel;
4662 	}
4663 
4664 	bma.tp = tp;
4665 	bma.ip = ip;
4666 	bma.wasdel = true;
4667 	bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4668 
4669 	/*
4670 	 * Always allocate convert from the start of the delalloc extent even if
4671 	 * that is outside the passed in range to create large contiguous
4672 	 * extents on disk.
4673 	 */
4674 	bma.offset = bma.got.br_startoff;
4675 	bma.length = bma.got.br_blockcount;
4676 
4677 	/*
4678 	 * When we're converting the delalloc reservations backing dirty pages
4679 	 * in the page cache, we must be careful about how we create the new
4680 	 * extents:
4681 	 *
4682 	 * New CoW fork extents are created unwritten, turned into real extents
4683 	 * when we're about to write the data to disk, and mapped into the data
4684 	 * fork after the write finishes.  End of story.
4685 	 *
4686 	 * New data fork extents must be mapped in as unwritten and converted
4687 	 * to real extents after the write succeeds to avoid exposing stale
4688 	 * disk contents if we crash.
4689 	 */
4690 	bma.flags = XFS_BMAPI_PREALLOC;
4691 	if (whichfork == XFS_COW_FORK)
4692 		bma.flags |= XFS_BMAPI_COWFORK;
4693 
4694 	if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4695 		bma.prev.br_startoff = NULLFILEOFF;
4696 
4697 	error = xfs_bmapi_allocate(&bma);
4698 	if (error)
4699 		goto out_finish;
4700 
4701 	XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length));
4702 	XFS_STATS_INC(mp, xs_xstrat_quick);
4703 
4704 	ASSERT(!isnullstartblock(bma.got.br_startblock));
4705 	xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
4706 				xfs_iomap_inode_sequence(ip, flags));
4707 	if (seq)
4708 		*seq = READ_ONCE(ifp->if_seq);
4709 
4710 	if (whichfork == XFS_COW_FORK)
4711 		xfs_refcount_alloc_cow_extent(tp, bma.blkno, bma.length);
4712 
4713 	error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4714 			whichfork);
4715 	if (error)
4716 		goto out_finish;
4717 
4718 	xfs_bmapi_finish(&bma, whichfork, 0);
4719 	error = xfs_trans_commit(tp);
4720 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
4721 	return error;
4722 
4723 out_finish:
4724 	xfs_bmapi_finish(&bma, whichfork, error);
4725 out_trans_cancel:
4726 	xfs_trans_cancel(tp);
4727 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
4728 	return error;
4729 }
4730 
4731 /*
4732  * Pass in a dellalloc extent and convert it to real extents, return the real
4733  * extent that maps offset_fsb in iomap.
4734  */
4735 int
xfs_bmapi_convert_delalloc(struct xfs_inode * ip,int whichfork,loff_t offset,struct iomap * iomap,unsigned int * seq)4736 xfs_bmapi_convert_delalloc(
4737 	struct xfs_inode	*ip,
4738 	int			whichfork,
4739 	loff_t			offset,
4740 	struct iomap		*iomap,
4741 	unsigned int		*seq)
4742 {
4743 	int			error;
4744 
4745 	/*
4746 	 * Attempt to allocate whatever delalloc extent currently backs offset
4747 	 * and put the result into iomap.  Allocate in a loop because it may
4748 	 * take several attempts to allocate real blocks for a contiguous
4749 	 * delalloc extent if free space is sufficiently fragmented.
4750 	 */
4751 	do {
4752 		error = xfs_bmapi_convert_one_delalloc(ip, whichfork, offset,
4753 					iomap, seq);
4754 		if (error)
4755 			return error;
4756 	} while (iomap->offset + iomap->length <= offset);
4757 
4758 	return 0;
4759 }
4760 
4761 int
xfs_bmapi_remap(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t bno,xfs_filblks_t len,xfs_fsblock_t startblock,uint32_t flags)4762 xfs_bmapi_remap(
4763 	struct xfs_trans	*tp,
4764 	struct xfs_inode	*ip,
4765 	xfs_fileoff_t		bno,
4766 	xfs_filblks_t		len,
4767 	xfs_fsblock_t		startblock,
4768 	uint32_t		flags)
4769 {
4770 	struct xfs_mount	*mp = ip->i_mount;
4771 	struct xfs_ifork	*ifp;
4772 	struct xfs_btree_cur	*cur = NULL;
4773 	struct xfs_bmbt_irec	got;
4774 	struct xfs_iext_cursor	icur;
4775 	int			whichfork = xfs_bmapi_whichfork(flags);
4776 	int			logflags = 0, error;
4777 
4778 	ifp = xfs_ifork_ptr(ip, whichfork);
4779 	ASSERT(len > 0);
4780 	ASSERT(len <= (xfs_filblks_t)XFS_MAX_BMBT_EXTLEN);
4781 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
4782 	ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC |
4783 			   XFS_BMAPI_NORMAP)));
4784 	ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) !=
4785 			(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC));
4786 
4787 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
4788 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
4789 		xfs_bmap_mark_sick(ip, whichfork);
4790 		return -EFSCORRUPTED;
4791 	}
4792 
4793 	if (xfs_is_shutdown(mp))
4794 		return -EIO;
4795 
4796 	error = xfs_iread_extents(tp, ip, whichfork);
4797 	if (error)
4798 		return error;
4799 
4800 	if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
4801 		/* make sure we only reflink into a hole. */
4802 		ASSERT(got.br_startoff > bno);
4803 		ASSERT(got.br_startoff - bno >= len);
4804 	}
4805 
4806 	ip->i_nblocks += len;
4807 	ip->i_delayed_blks -= len; /* see xfs_bmap_defer_add */
4808 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
4809 
4810 	if (ifp->if_format == XFS_DINODE_FMT_BTREE)
4811 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
4812 
4813 	got.br_startoff = bno;
4814 	got.br_startblock = startblock;
4815 	got.br_blockcount = len;
4816 	if (flags & XFS_BMAPI_PREALLOC)
4817 		got.br_state = XFS_EXT_UNWRITTEN;
4818 	else
4819 		got.br_state = XFS_EXT_NORM;
4820 
4821 	error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur,
4822 			&cur, &got, &logflags, flags);
4823 	if (error)
4824 		goto error0;
4825 
4826 	error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork);
4827 
4828 error0:
4829 	if (ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS)
4830 		logflags &= ~XFS_ILOG_DEXT;
4831 	else if (ip->i_df.if_format != XFS_DINODE_FMT_BTREE)
4832 		logflags &= ~XFS_ILOG_DBROOT;
4833 
4834 	if (logflags)
4835 		xfs_trans_log_inode(tp, ip, logflags);
4836 	if (cur)
4837 		xfs_btree_del_cursor(cur, error);
4838 	return error;
4839 }
4840 
4841 /*
4842  * When a delalloc extent is split (e.g., due to a hole punch), the original
4843  * indlen reservation must be shared across the two new extents that are left
4844  * behind.
4845  *
4846  * Given the original reservation and the worst case indlen for the two new
4847  * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4848  * reservation fairly across the two new extents. If necessary, steal available
4849  * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4850  * ores == 1). The number of stolen blocks is returned. The availability and
4851  * subsequent accounting of stolen blocks is the responsibility of the caller.
4852  */
4853 static void
xfs_bmap_split_indlen(xfs_filblks_t ores,xfs_filblks_t * indlen1,xfs_filblks_t * indlen2)4854 xfs_bmap_split_indlen(
4855 	xfs_filblks_t			ores,		/* original res. */
4856 	xfs_filblks_t			*indlen1,	/* ext1 worst indlen */
4857 	xfs_filblks_t			*indlen2)	/* ext2 worst indlen */
4858 {
4859 	xfs_filblks_t			len1 = *indlen1;
4860 	xfs_filblks_t			len2 = *indlen2;
4861 	xfs_filblks_t			nres = len1 + len2; /* new total res. */
4862 	xfs_filblks_t			resfactor;
4863 
4864 	/*
4865 	 * We can't meet the total required reservation for the two extents.
4866 	 * Calculate the percent of the overall shortage between both extents
4867 	 * and apply this percentage to each of the requested indlen values.
4868 	 * This distributes the shortage fairly and reduces the chances that one
4869 	 * of the two extents is left with nothing when extents are repeatedly
4870 	 * split.
4871 	 */
4872 	resfactor = (ores * 100);
4873 	do_div(resfactor, nres);
4874 	len1 *= resfactor;
4875 	do_div(len1, 100);
4876 	len2 *= resfactor;
4877 	do_div(len2, 100);
4878 	ASSERT(len1 + len2 <= ores);
4879 	ASSERT(len1 < *indlen1 && len2 < *indlen2);
4880 
4881 	/*
4882 	 * Hand out the remainder to each extent. If one of the two reservations
4883 	 * is zero, we want to make sure that one gets a block first. The loop
4884 	 * below starts with len1, so hand len2 a block right off the bat if it
4885 	 * is zero.
4886 	 */
4887 	ores -= (len1 + len2);
4888 	ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
4889 	if (ores && !len2 && *indlen2) {
4890 		len2++;
4891 		ores--;
4892 	}
4893 	while (ores) {
4894 		if (len1 < *indlen1) {
4895 			len1++;
4896 			ores--;
4897 		}
4898 		if (!ores)
4899 			break;
4900 		if (len2 < *indlen2) {
4901 			len2++;
4902 			ores--;
4903 		}
4904 	}
4905 
4906 	*indlen1 = len1;
4907 	*indlen2 = len2;
4908 }
4909 
4910 void
xfs_bmap_del_extent_delay(struct xfs_inode * ip,int whichfork,struct xfs_iext_cursor * icur,struct xfs_bmbt_irec * got,struct xfs_bmbt_irec * del)4911 xfs_bmap_del_extent_delay(
4912 	struct xfs_inode	*ip,
4913 	int			whichfork,
4914 	struct xfs_iext_cursor	*icur,
4915 	struct xfs_bmbt_irec	*got,
4916 	struct xfs_bmbt_irec	*del)
4917 {
4918 	struct xfs_mount	*mp = ip->i_mount;
4919 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
4920 	struct xfs_bmbt_irec	new;
4921 	int64_t			da_old, da_new, da_diff = 0;
4922 	xfs_fileoff_t		del_endoff, got_endoff;
4923 	xfs_filblks_t		got_indlen, new_indlen, stolen = 0;
4924 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
4925 	uint64_t		fdblocks;
4926 	bool			isrt;
4927 
4928 	XFS_STATS_INC(mp, xs_del_exlist);
4929 
4930 	isrt = xfs_ifork_is_realtime(ip, whichfork);
4931 	del_endoff = del->br_startoff + del->br_blockcount;
4932 	got_endoff = got->br_startoff + got->br_blockcount;
4933 	da_old = startblockval(got->br_startblock);
4934 	da_new = 0;
4935 
4936 	ASSERT(del->br_blockcount > 0);
4937 	ASSERT(got->br_startoff <= del->br_startoff);
4938 	ASSERT(got_endoff >= del_endoff);
4939 
4940 	/*
4941 	 * Update the inode delalloc counter now and wait to update the
4942 	 * sb counters as we might have to borrow some blocks for the
4943 	 * indirect block accounting.
4944 	 */
4945 	xfs_quota_unreserve_blkres(ip, del->br_blockcount);
4946 	ip->i_delayed_blks -= del->br_blockcount;
4947 
4948 	if (got->br_startoff == del->br_startoff)
4949 		state |= BMAP_LEFT_FILLING;
4950 	if (got_endoff == del_endoff)
4951 		state |= BMAP_RIGHT_FILLING;
4952 
4953 	switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4954 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4955 		/*
4956 		 * Matches the whole extent.  Delete the entry.
4957 		 */
4958 		xfs_iext_remove(ip, icur, state);
4959 		xfs_iext_prev(ifp, icur);
4960 		break;
4961 	case BMAP_LEFT_FILLING:
4962 		/*
4963 		 * Deleting the first part of the extent.
4964 		 */
4965 		got->br_startoff = del_endoff;
4966 		got->br_blockcount -= del->br_blockcount;
4967 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4968 				got->br_blockcount), da_old);
4969 		got->br_startblock = nullstartblock((int)da_new);
4970 		xfs_iext_update_extent(ip, state, icur, got);
4971 		break;
4972 	case BMAP_RIGHT_FILLING:
4973 		/*
4974 		 * Deleting the last part of the extent.
4975 		 */
4976 		got->br_blockcount = got->br_blockcount - del->br_blockcount;
4977 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4978 				got->br_blockcount), da_old);
4979 		got->br_startblock = nullstartblock((int)da_new);
4980 		xfs_iext_update_extent(ip, state, icur, got);
4981 		break;
4982 	case 0:
4983 		/*
4984 		 * Deleting the middle of the extent.
4985 		 *
4986 		 * Distribute the original indlen reservation across the two new
4987 		 * extents.  Steal blocks from the deleted extent if necessary.
4988 		 * Stealing blocks simply fudges the fdblocks accounting below.
4989 		 * Warn if either of the new indlen reservations is zero as this
4990 		 * can lead to delalloc problems.
4991 		 */
4992 		got->br_blockcount = del->br_startoff - got->br_startoff;
4993 		got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
4994 
4995 		new.br_blockcount = got_endoff - del_endoff;
4996 		new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
4997 
4998 		WARN_ON_ONCE(!got_indlen || !new_indlen);
4999 		/*
5000 		 * Steal as many blocks as we can to try and satisfy the worst
5001 		 * case indlen for both new extents.
5002 		 *
5003 		 * However, we can't just steal reservations from the data
5004 		 * blocks if this is an RT inodes as the data and metadata
5005 		 * blocks come from different pools.  We'll have to live with
5006 		 * under-filled indirect reservation in this case.
5007 		 */
5008 		da_new = got_indlen + new_indlen;
5009 		if (da_new > da_old && !isrt) {
5010 			stolen = XFS_FILBLKS_MIN(da_new - da_old,
5011 						 del->br_blockcount);
5012 			da_old += stolen;
5013 		}
5014 		if (da_new > da_old)
5015 			xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen);
5016 		da_new = got_indlen + new_indlen;
5017 
5018 		got->br_startblock = nullstartblock((int)got_indlen);
5019 
5020 		new.br_startoff = del_endoff;
5021 		new.br_state = got->br_state;
5022 		new.br_startblock = nullstartblock((int)new_indlen);
5023 
5024 		xfs_iext_update_extent(ip, state, icur, got);
5025 		xfs_iext_next(ifp, icur);
5026 		xfs_iext_insert(ip, icur, &new, state);
5027 
5028 		del->br_blockcount -= stolen;
5029 		break;
5030 	}
5031 
5032 	ASSERT(da_old >= da_new);
5033 	da_diff = da_old - da_new;
5034 	fdblocks = da_diff;
5035 
5036 	if (isrt)
5037 		xfs_add_frextents(mp, xfs_rtb_to_rtx(mp, del->br_blockcount));
5038 	else
5039 		fdblocks += del->br_blockcount;
5040 
5041 	xfs_add_fdblocks(mp, fdblocks);
5042 	xfs_mod_delalloc(ip, -(int64_t)del->br_blockcount, -da_diff);
5043 }
5044 
5045 void
xfs_bmap_del_extent_cow(struct xfs_inode * ip,struct xfs_iext_cursor * icur,struct xfs_bmbt_irec * got,struct xfs_bmbt_irec * del)5046 xfs_bmap_del_extent_cow(
5047 	struct xfs_inode	*ip,
5048 	struct xfs_iext_cursor	*icur,
5049 	struct xfs_bmbt_irec	*got,
5050 	struct xfs_bmbt_irec	*del)
5051 {
5052 	struct xfs_mount	*mp = ip->i_mount;
5053 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
5054 	struct xfs_bmbt_irec	new;
5055 	xfs_fileoff_t		del_endoff, got_endoff;
5056 	uint32_t		state = BMAP_COWFORK;
5057 
5058 	XFS_STATS_INC(mp, xs_del_exlist);
5059 
5060 	del_endoff = del->br_startoff + del->br_blockcount;
5061 	got_endoff = got->br_startoff + got->br_blockcount;
5062 
5063 	ASSERT(del->br_blockcount > 0);
5064 	ASSERT(got->br_startoff <= del->br_startoff);
5065 	ASSERT(got_endoff >= del_endoff);
5066 	ASSERT(!isnullstartblock(got->br_startblock));
5067 
5068 	if (got->br_startoff == del->br_startoff)
5069 		state |= BMAP_LEFT_FILLING;
5070 	if (got_endoff == del_endoff)
5071 		state |= BMAP_RIGHT_FILLING;
5072 
5073 	switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
5074 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
5075 		/*
5076 		 * Matches the whole extent.  Delete the entry.
5077 		 */
5078 		xfs_iext_remove(ip, icur, state);
5079 		xfs_iext_prev(ifp, icur);
5080 		break;
5081 	case BMAP_LEFT_FILLING:
5082 		/*
5083 		 * Deleting the first part of the extent.
5084 		 */
5085 		got->br_startoff = del_endoff;
5086 		got->br_blockcount -= del->br_blockcount;
5087 		got->br_startblock = del->br_startblock + del->br_blockcount;
5088 		xfs_iext_update_extent(ip, state, icur, got);
5089 		break;
5090 	case BMAP_RIGHT_FILLING:
5091 		/*
5092 		 * Deleting the last part of the extent.
5093 		 */
5094 		got->br_blockcount -= del->br_blockcount;
5095 		xfs_iext_update_extent(ip, state, icur, got);
5096 		break;
5097 	case 0:
5098 		/*
5099 		 * Deleting the middle of the extent.
5100 		 */
5101 		got->br_blockcount = del->br_startoff - got->br_startoff;
5102 
5103 		new.br_startoff = del_endoff;
5104 		new.br_blockcount = got_endoff - del_endoff;
5105 		new.br_state = got->br_state;
5106 		new.br_startblock = del->br_startblock + del->br_blockcount;
5107 
5108 		xfs_iext_update_extent(ip, state, icur, got);
5109 		xfs_iext_next(ifp, icur);
5110 		xfs_iext_insert(ip, icur, &new, state);
5111 		break;
5112 	}
5113 	ip->i_delayed_blks -= del->br_blockcount;
5114 }
5115 
5116 /*
5117  * Called by xfs_bmapi to update file extent records and the btree
5118  * after removing space.
5119  */
5120 STATIC int				/* error */
xfs_bmap_del_extent_real(xfs_inode_t * ip,xfs_trans_t * tp,struct xfs_iext_cursor * icur,struct xfs_btree_cur * cur,xfs_bmbt_irec_t * del,int * logflagsp,int whichfork,uint32_t bflags)5121 xfs_bmap_del_extent_real(
5122 	xfs_inode_t		*ip,	/* incore inode pointer */
5123 	xfs_trans_t		*tp,	/* current transaction pointer */
5124 	struct xfs_iext_cursor	*icur,
5125 	struct xfs_btree_cur	*cur,	/* if null, not a btree */
5126 	xfs_bmbt_irec_t		*del,	/* data to remove from extents */
5127 	int			*logflagsp, /* inode logging flags */
5128 	int			whichfork, /* data or attr fork */
5129 	uint32_t		bflags)	/* bmapi flags */
5130 {
5131 	xfs_fsblock_t		del_endblock=0;	/* first block past del */
5132 	xfs_fileoff_t		del_endoff;	/* first offset past del */
5133 	int			error = 0;	/* error return value */
5134 	struct xfs_bmbt_irec	got;	/* current extent entry */
5135 	xfs_fileoff_t		got_endoff;	/* first offset past got */
5136 	int			i;	/* temp state */
5137 	struct xfs_ifork	*ifp;	/* inode fork pointer */
5138 	xfs_mount_t		*mp;	/* mount structure */
5139 	xfs_filblks_t		nblks;	/* quota/sb block count */
5140 	xfs_bmbt_irec_t		new;	/* new record to be inserted */
5141 	/* REFERENCED */
5142 	uint			qfield;	/* quota field to update */
5143 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
5144 	struct xfs_bmbt_irec	old;
5145 
5146 	*logflagsp = 0;
5147 
5148 	mp = ip->i_mount;
5149 	XFS_STATS_INC(mp, xs_del_exlist);
5150 
5151 	ifp = xfs_ifork_ptr(ip, whichfork);
5152 	ASSERT(del->br_blockcount > 0);
5153 	xfs_iext_get_extent(ifp, icur, &got);
5154 	ASSERT(got.br_startoff <= del->br_startoff);
5155 	del_endoff = del->br_startoff + del->br_blockcount;
5156 	got_endoff = got.br_startoff + got.br_blockcount;
5157 	ASSERT(got_endoff >= del_endoff);
5158 	ASSERT(!isnullstartblock(got.br_startblock));
5159 	qfield = 0;
5160 
5161 	/*
5162 	 * If it's the case where the directory code is running with no block
5163 	 * reservation, and the deleted block is in the middle of its extent,
5164 	 * and the resulting insert of an extent would cause transformation to
5165 	 * btree format, then reject it.  The calling code will then swap blocks
5166 	 * around instead.  We have to do this now, rather than waiting for the
5167 	 * conversion to btree format, since the transaction will be dirty then.
5168 	 */
5169 	if (tp->t_blk_res == 0 &&
5170 	    ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
5171 	    ifp->if_nextents >= XFS_IFORK_MAXEXT(ip, whichfork) &&
5172 	    del->br_startoff > got.br_startoff && del_endoff < got_endoff)
5173 		return -ENOSPC;
5174 
5175 	*logflagsp = XFS_ILOG_CORE;
5176 	if (xfs_ifork_is_realtime(ip, whichfork))
5177 		qfield = XFS_TRANS_DQ_RTBCOUNT;
5178 	else
5179 		qfield = XFS_TRANS_DQ_BCOUNT;
5180 	nblks = del->br_blockcount;
5181 
5182 	del_endblock = del->br_startblock + del->br_blockcount;
5183 	if (cur) {
5184 		error = xfs_bmbt_lookup_eq(cur, &got, &i);
5185 		if (error)
5186 			return error;
5187 		if (XFS_IS_CORRUPT(mp, i != 1)) {
5188 			xfs_btree_mark_sick(cur);
5189 			return -EFSCORRUPTED;
5190 		}
5191 	}
5192 
5193 	if (got.br_startoff == del->br_startoff)
5194 		state |= BMAP_LEFT_FILLING;
5195 	if (got_endoff == del_endoff)
5196 		state |= BMAP_RIGHT_FILLING;
5197 
5198 	switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
5199 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
5200 		/*
5201 		 * Matches the whole extent.  Delete the entry.
5202 		 */
5203 		xfs_iext_remove(ip, icur, state);
5204 		xfs_iext_prev(ifp, icur);
5205 		ifp->if_nextents--;
5206 
5207 		*logflagsp |= XFS_ILOG_CORE;
5208 		if (!cur) {
5209 			*logflagsp |= xfs_ilog_fext(whichfork);
5210 			break;
5211 		}
5212 		if ((error = xfs_btree_delete(cur, &i)))
5213 			return error;
5214 		if (XFS_IS_CORRUPT(mp, i != 1)) {
5215 			xfs_btree_mark_sick(cur);
5216 			return -EFSCORRUPTED;
5217 		}
5218 		break;
5219 	case BMAP_LEFT_FILLING:
5220 		/*
5221 		 * Deleting the first part of the extent.
5222 		 */
5223 		got.br_startoff = del_endoff;
5224 		got.br_startblock = del_endblock;
5225 		got.br_blockcount -= del->br_blockcount;
5226 		xfs_iext_update_extent(ip, state, icur, &got);
5227 		if (!cur) {
5228 			*logflagsp |= xfs_ilog_fext(whichfork);
5229 			break;
5230 		}
5231 		error = xfs_bmbt_update(cur, &got);
5232 		if (error)
5233 			return error;
5234 		break;
5235 	case BMAP_RIGHT_FILLING:
5236 		/*
5237 		 * Deleting the last part of the extent.
5238 		 */
5239 		got.br_blockcount -= del->br_blockcount;
5240 		xfs_iext_update_extent(ip, state, icur, &got);
5241 		if (!cur) {
5242 			*logflagsp |= xfs_ilog_fext(whichfork);
5243 			break;
5244 		}
5245 		error = xfs_bmbt_update(cur, &got);
5246 		if (error)
5247 			return error;
5248 		break;
5249 	case 0:
5250 		/*
5251 		 * Deleting the middle of the extent.
5252 		 */
5253 
5254 		old = got;
5255 
5256 		got.br_blockcount = del->br_startoff - got.br_startoff;
5257 		xfs_iext_update_extent(ip, state, icur, &got);
5258 
5259 		new.br_startoff = del_endoff;
5260 		new.br_blockcount = got_endoff - del_endoff;
5261 		new.br_state = got.br_state;
5262 		new.br_startblock = del_endblock;
5263 
5264 		*logflagsp |= XFS_ILOG_CORE;
5265 		if (cur) {
5266 			error = xfs_bmbt_update(cur, &got);
5267 			if (error)
5268 				return error;
5269 			error = xfs_btree_increment(cur, 0, &i);
5270 			if (error)
5271 				return error;
5272 			cur->bc_rec.b = new;
5273 			error = xfs_btree_insert(cur, &i);
5274 			if (error && error != -ENOSPC)
5275 				return error;
5276 			/*
5277 			 * If get no-space back from btree insert, it tried a
5278 			 * split, and we have a zero block reservation.  Fix up
5279 			 * our state and return the error.
5280 			 */
5281 			if (error == -ENOSPC) {
5282 				/*
5283 				 * Reset the cursor, don't trust it after any
5284 				 * insert operation.
5285 				 */
5286 				error = xfs_bmbt_lookup_eq(cur, &got, &i);
5287 				if (error)
5288 					return error;
5289 				if (XFS_IS_CORRUPT(mp, i != 1)) {
5290 					xfs_btree_mark_sick(cur);
5291 					return -EFSCORRUPTED;
5292 				}
5293 				/*
5294 				 * Update the btree record back
5295 				 * to the original value.
5296 				 */
5297 				error = xfs_bmbt_update(cur, &old);
5298 				if (error)
5299 					return error;
5300 				/*
5301 				 * Reset the extent record back
5302 				 * to the original value.
5303 				 */
5304 				xfs_iext_update_extent(ip, state, icur, &old);
5305 				*logflagsp = 0;
5306 				return -ENOSPC;
5307 			}
5308 			if (XFS_IS_CORRUPT(mp, i != 1)) {
5309 				xfs_btree_mark_sick(cur);
5310 				return -EFSCORRUPTED;
5311 			}
5312 		} else
5313 			*logflagsp |= xfs_ilog_fext(whichfork);
5314 
5315 		ifp->if_nextents++;
5316 		xfs_iext_next(ifp, icur);
5317 		xfs_iext_insert(ip, icur, &new, state);
5318 		break;
5319 	}
5320 
5321 	/* remove reverse mapping */
5322 	xfs_rmap_unmap_extent(tp, ip, whichfork, del);
5323 
5324 	/*
5325 	 * If we need to, add to list of extents to delete.
5326 	 */
5327 	if (!(bflags & XFS_BMAPI_REMAP)) {
5328 		if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
5329 			xfs_refcount_decrease_extent(tp, del);
5330 		} else if (xfs_ifork_is_realtime(ip, whichfork)) {
5331 			/*
5332 			 * Ensure the bitmap and summary inodes are locked
5333 			 * and joined to the transaction before modifying them.
5334 			 */
5335 			if (!(tp->t_flags & XFS_TRANS_RTBITMAP_LOCKED)) {
5336 				tp->t_flags |= XFS_TRANS_RTBITMAP_LOCKED;
5337 				xfs_rtbitmap_lock(mp);
5338 				xfs_rtbitmap_trans_join(tp);
5339 			}
5340 			error = xfs_rtfree_blocks(tp, del->br_startblock,
5341 					del->br_blockcount);
5342 		} else {
5343 			unsigned int	efi_flags = 0;
5344 
5345 			if ((bflags & XFS_BMAPI_NODISCARD) ||
5346 			    del->br_state == XFS_EXT_UNWRITTEN)
5347 				efi_flags |= XFS_FREE_EXTENT_SKIP_DISCARD;
5348 
5349 			error = xfs_free_extent_later(tp, del->br_startblock,
5350 					del->br_blockcount, NULL,
5351 					XFS_AG_RESV_NONE, efi_flags);
5352 		}
5353 		if (error)
5354 			return error;
5355 	}
5356 
5357 	/*
5358 	 * Adjust inode # blocks in the file.
5359 	 */
5360 	if (nblks)
5361 		ip->i_nblocks -= nblks;
5362 	/*
5363 	 * Adjust quota data.
5364 	 */
5365 	if (qfield && !(bflags & XFS_BMAPI_REMAP))
5366 		xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5367 
5368 	return 0;
5369 }
5370 
5371 /*
5372  * Unmap (remove) blocks from a file.
5373  * If nexts is nonzero then the number of extents to remove is limited to
5374  * that value.  If not all extents in the block range can be removed then
5375  * *done is set.
5376  */
5377 static int
__xfs_bunmapi(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t start,xfs_filblks_t * rlen,uint32_t flags,xfs_extnum_t nexts)5378 __xfs_bunmapi(
5379 	struct xfs_trans	*tp,		/* transaction pointer */
5380 	struct xfs_inode	*ip,		/* incore inode */
5381 	xfs_fileoff_t		start,		/* first file offset deleted */
5382 	xfs_filblks_t		*rlen,		/* i/o: amount remaining */
5383 	uint32_t		flags,		/* misc flags */
5384 	xfs_extnum_t		nexts)		/* number of extents max */
5385 {
5386 	struct xfs_btree_cur	*cur;		/* bmap btree cursor */
5387 	struct xfs_bmbt_irec	del;		/* extent being deleted */
5388 	int			error;		/* error return value */
5389 	xfs_extnum_t		extno;		/* extent number in list */
5390 	struct xfs_bmbt_irec	got;		/* current extent record */
5391 	struct xfs_ifork	*ifp;		/* inode fork pointer */
5392 	int			isrt;		/* freeing in rt area */
5393 	int			logflags;	/* transaction logging flags */
5394 	xfs_extlen_t		mod;		/* rt extent offset */
5395 	struct xfs_mount	*mp = ip->i_mount;
5396 	int			tmp_logflags;	/* partial logging flags */
5397 	int			wasdel;		/* was a delayed alloc extent */
5398 	int			whichfork;	/* data or attribute fork */
5399 	xfs_filblks_t		len = *rlen;	/* length to unmap in file */
5400 	xfs_fileoff_t		end;
5401 	struct xfs_iext_cursor	icur;
5402 	bool			done = false;
5403 
5404 	trace_xfs_bunmap(ip, start, len, flags, _RET_IP_);
5405 
5406 	whichfork = xfs_bmapi_whichfork(flags);
5407 	ASSERT(whichfork != XFS_COW_FORK);
5408 	ifp = xfs_ifork_ptr(ip, whichfork);
5409 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp))) {
5410 		xfs_bmap_mark_sick(ip, whichfork);
5411 		return -EFSCORRUPTED;
5412 	}
5413 	if (xfs_is_shutdown(mp))
5414 		return -EIO;
5415 
5416 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
5417 	ASSERT(len > 0);
5418 	ASSERT(nexts >= 0);
5419 
5420 	error = xfs_iread_extents(tp, ip, whichfork);
5421 	if (error)
5422 		return error;
5423 
5424 	if (xfs_iext_count(ifp) == 0) {
5425 		*rlen = 0;
5426 		return 0;
5427 	}
5428 	XFS_STATS_INC(mp, xs_blk_unmap);
5429 	isrt = xfs_ifork_is_realtime(ip, whichfork);
5430 	end = start + len;
5431 
5432 	if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) {
5433 		*rlen = 0;
5434 		return 0;
5435 	}
5436 	end--;
5437 
5438 	logflags = 0;
5439 	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
5440 		ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
5441 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5442 	} else
5443 		cur = NULL;
5444 
5445 	extno = 0;
5446 	while (end != (xfs_fileoff_t)-1 && end >= start &&
5447 	       (nexts == 0 || extno < nexts)) {
5448 		/*
5449 		 * Is the found extent after a hole in which end lives?
5450 		 * Just back up to the previous extent, if so.
5451 		 */
5452 		if (got.br_startoff > end &&
5453 		    !xfs_iext_prev_extent(ifp, &icur, &got)) {
5454 			done = true;
5455 			break;
5456 		}
5457 		/*
5458 		 * Is the last block of this extent before the range
5459 		 * we're supposed to delete?  If so, we're done.
5460 		 */
5461 		end = XFS_FILEOFF_MIN(end,
5462 			got.br_startoff + got.br_blockcount - 1);
5463 		if (end < start)
5464 			break;
5465 		/*
5466 		 * Then deal with the (possibly delayed) allocated space
5467 		 * we found.
5468 		 */
5469 		del = got;
5470 		wasdel = isnullstartblock(del.br_startblock);
5471 
5472 		if (got.br_startoff < start) {
5473 			del.br_startoff = start;
5474 			del.br_blockcount -= start - got.br_startoff;
5475 			if (!wasdel)
5476 				del.br_startblock += start - got.br_startoff;
5477 		}
5478 		if (del.br_startoff + del.br_blockcount > end + 1)
5479 			del.br_blockcount = end + 1 - del.br_startoff;
5480 
5481 		if (!isrt || (flags & XFS_BMAPI_REMAP))
5482 			goto delete;
5483 
5484 		mod = xfs_rtb_to_rtxoff(mp,
5485 				del.br_startblock + del.br_blockcount);
5486 		if (mod) {
5487 			/*
5488 			 * Realtime extent not lined up at the end.
5489 			 * The extent could have been split into written
5490 			 * and unwritten pieces, or we could just be
5491 			 * unmapping part of it.  But we can't really
5492 			 * get rid of part of a realtime extent.
5493 			 */
5494 			if (del.br_state == XFS_EXT_UNWRITTEN) {
5495 				/*
5496 				 * This piece is unwritten, or we're not
5497 				 * using unwritten extents.  Skip over it.
5498 				 */
5499 				ASSERT((flags & XFS_BMAPI_REMAP) || end >= mod);
5500 				end -= mod > del.br_blockcount ?
5501 					del.br_blockcount : mod;
5502 				if (end < got.br_startoff &&
5503 				    !xfs_iext_prev_extent(ifp, &icur, &got)) {
5504 					done = true;
5505 					break;
5506 				}
5507 				continue;
5508 			}
5509 			/*
5510 			 * It's written, turn it unwritten.
5511 			 * This is better than zeroing it.
5512 			 */
5513 			ASSERT(del.br_state == XFS_EXT_NORM);
5514 			ASSERT(tp->t_blk_res > 0);
5515 			/*
5516 			 * If this spans a realtime extent boundary,
5517 			 * chop it back to the start of the one we end at.
5518 			 */
5519 			if (del.br_blockcount > mod) {
5520 				del.br_startoff += del.br_blockcount - mod;
5521 				del.br_startblock += del.br_blockcount - mod;
5522 				del.br_blockcount = mod;
5523 			}
5524 			del.br_state = XFS_EXT_UNWRITTEN;
5525 			error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5526 					whichfork, &icur, &cur, &del,
5527 					&logflags);
5528 			if (error)
5529 				goto error0;
5530 			goto nodelete;
5531 		}
5532 
5533 		mod = xfs_rtb_to_rtxoff(mp, del.br_startblock);
5534 		if (mod) {
5535 			xfs_extlen_t off = mp->m_sb.sb_rextsize - mod;
5536 
5537 			/*
5538 			 * Realtime extent is lined up at the end but not
5539 			 * at the front.  We'll get rid of full extents if
5540 			 * we can.
5541 			 */
5542 			if (del.br_blockcount > off) {
5543 				del.br_blockcount -= off;
5544 				del.br_startoff += off;
5545 				del.br_startblock += off;
5546 			} else if (del.br_startoff == start &&
5547 				   (del.br_state == XFS_EXT_UNWRITTEN ||
5548 				    tp->t_blk_res == 0)) {
5549 				/*
5550 				 * Can't make it unwritten.  There isn't
5551 				 * a full extent here so just skip it.
5552 				 */
5553 				ASSERT(end >= del.br_blockcount);
5554 				end -= del.br_blockcount;
5555 				if (got.br_startoff > end &&
5556 				    !xfs_iext_prev_extent(ifp, &icur, &got)) {
5557 					done = true;
5558 					break;
5559 				}
5560 				continue;
5561 			} else if (del.br_state == XFS_EXT_UNWRITTEN) {
5562 				struct xfs_bmbt_irec	prev;
5563 				xfs_fileoff_t		unwrite_start;
5564 
5565 				/*
5566 				 * This one is already unwritten.
5567 				 * It must have a written left neighbor.
5568 				 * Unwrite the killed part of that one and
5569 				 * try again.
5570 				 */
5571 				if (!xfs_iext_prev_extent(ifp, &icur, &prev))
5572 					ASSERT(0);
5573 				ASSERT(prev.br_state == XFS_EXT_NORM);
5574 				ASSERT(!isnullstartblock(prev.br_startblock));
5575 				ASSERT(del.br_startblock ==
5576 				       prev.br_startblock + prev.br_blockcount);
5577 				unwrite_start = max3(start,
5578 						     del.br_startoff - mod,
5579 						     prev.br_startoff);
5580 				mod = unwrite_start - prev.br_startoff;
5581 				prev.br_startoff = unwrite_start;
5582 				prev.br_startblock += mod;
5583 				prev.br_blockcount -= mod;
5584 				prev.br_state = XFS_EXT_UNWRITTEN;
5585 				error = xfs_bmap_add_extent_unwritten_real(tp,
5586 						ip, whichfork, &icur, &cur,
5587 						&prev, &logflags);
5588 				if (error)
5589 					goto error0;
5590 				goto nodelete;
5591 			} else {
5592 				ASSERT(del.br_state == XFS_EXT_NORM);
5593 				del.br_state = XFS_EXT_UNWRITTEN;
5594 				error = xfs_bmap_add_extent_unwritten_real(tp,
5595 						ip, whichfork, &icur, &cur,
5596 						&del, &logflags);
5597 				if (error)
5598 					goto error0;
5599 				goto nodelete;
5600 			}
5601 		}
5602 
5603 delete:
5604 		if (wasdel) {
5605 			xfs_bmap_del_extent_delay(ip, whichfork, &icur, &got, &del);
5606 		} else {
5607 			error = xfs_bmap_del_extent_real(ip, tp, &icur, cur,
5608 					&del, &tmp_logflags, whichfork,
5609 					flags);
5610 			logflags |= tmp_logflags;
5611 			if (error)
5612 				goto error0;
5613 		}
5614 
5615 		end = del.br_startoff - 1;
5616 nodelete:
5617 		/*
5618 		 * If not done go on to the next (previous) record.
5619 		 */
5620 		if (end != (xfs_fileoff_t)-1 && end >= start) {
5621 			if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5622 			    (got.br_startoff > end &&
5623 			     !xfs_iext_prev_extent(ifp, &icur, &got))) {
5624 				done = true;
5625 				break;
5626 			}
5627 			extno++;
5628 		}
5629 	}
5630 	if (done || end == (xfs_fileoff_t)-1 || end < start)
5631 		*rlen = 0;
5632 	else
5633 		*rlen = end - start + 1;
5634 
5635 	/*
5636 	 * Convert to a btree if necessary.
5637 	 */
5638 	if (xfs_bmap_needs_btree(ip, whichfork)) {
5639 		ASSERT(cur == NULL);
5640 		error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
5641 				&tmp_logflags, whichfork);
5642 		logflags |= tmp_logflags;
5643 	} else {
5644 		error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags,
5645 			whichfork);
5646 	}
5647 
5648 error0:
5649 	/*
5650 	 * Log everything.  Do this after conversion, there's no point in
5651 	 * logging the extent records if we've converted to btree format.
5652 	 */
5653 	if ((logflags & xfs_ilog_fext(whichfork)) &&
5654 	    ifp->if_format != XFS_DINODE_FMT_EXTENTS)
5655 		logflags &= ~xfs_ilog_fext(whichfork);
5656 	else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5657 		 ifp->if_format != XFS_DINODE_FMT_BTREE)
5658 		logflags &= ~xfs_ilog_fbroot(whichfork);
5659 	/*
5660 	 * Log inode even in the error case, if the transaction
5661 	 * is dirty we'll need to shut down the filesystem.
5662 	 */
5663 	if (logflags)
5664 		xfs_trans_log_inode(tp, ip, logflags);
5665 	if (cur) {
5666 		if (!error)
5667 			cur->bc_bmap.allocated = 0;
5668 		xfs_btree_del_cursor(cur, error);
5669 	}
5670 	return error;
5671 }
5672 
5673 /* Unmap a range of a file. */
5674 int
xfs_bunmapi(xfs_trans_t * tp,struct xfs_inode * ip,xfs_fileoff_t bno,xfs_filblks_t len,uint32_t flags,xfs_extnum_t nexts,int * done)5675 xfs_bunmapi(
5676 	xfs_trans_t		*tp,
5677 	struct xfs_inode	*ip,
5678 	xfs_fileoff_t		bno,
5679 	xfs_filblks_t		len,
5680 	uint32_t		flags,
5681 	xfs_extnum_t		nexts,
5682 	int			*done)
5683 {
5684 	int			error;
5685 
5686 	error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts);
5687 	*done = (len == 0);
5688 	return error;
5689 }
5690 
5691 /*
5692  * Determine whether an extent shift can be accomplished by a merge with the
5693  * extent that precedes the target hole of the shift.
5694  */
5695 STATIC bool
xfs_bmse_can_merge(struct xfs_bmbt_irec * left,struct xfs_bmbt_irec * got,xfs_fileoff_t shift)5696 xfs_bmse_can_merge(
5697 	struct xfs_bmbt_irec	*left,	/* preceding extent */
5698 	struct xfs_bmbt_irec	*got,	/* current extent to shift */
5699 	xfs_fileoff_t		shift)	/* shift fsb */
5700 {
5701 	xfs_fileoff_t		startoff;
5702 
5703 	startoff = got->br_startoff - shift;
5704 
5705 	/*
5706 	 * The extent, once shifted, must be adjacent in-file and on-disk with
5707 	 * the preceding extent.
5708 	 */
5709 	if ((left->br_startoff + left->br_blockcount != startoff) ||
5710 	    (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5711 	    (left->br_state != got->br_state) ||
5712 	    (left->br_blockcount + got->br_blockcount > XFS_MAX_BMBT_EXTLEN))
5713 		return false;
5714 
5715 	return true;
5716 }
5717 
5718 /*
5719  * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5720  * hole in the file. If an extent shift would result in the extent being fully
5721  * adjacent to the extent that currently precedes the hole, we can merge with
5722  * the preceding extent rather than do the shift.
5723  *
5724  * This function assumes the caller has verified a shift-by-merge is possible
5725  * with the provided extents via xfs_bmse_can_merge().
5726  */
5727 STATIC int
xfs_bmse_merge(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,xfs_fileoff_t shift,struct xfs_iext_cursor * icur,struct xfs_bmbt_irec * got,struct xfs_bmbt_irec * left,struct xfs_btree_cur * cur,int * logflags)5728 xfs_bmse_merge(
5729 	struct xfs_trans		*tp,
5730 	struct xfs_inode		*ip,
5731 	int				whichfork,
5732 	xfs_fileoff_t			shift,		/* shift fsb */
5733 	struct xfs_iext_cursor		*icur,
5734 	struct xfs_bmbt_irec		*got,		/* extent to shift */
5735 	struct xfs_bmbt_irec		*left,		/* preceding extent */
5736 	struct xfs_btree_cur		*cur,
5737 	int				*logflags)	/* output */
5738 {
5739 	struct xfs_ifork		*ifp = xfs_ifork_ptr(ip, whichfork);
5740 	struct xfs_bmbt_irec		new;
5741 	xfs_filblks_t			blockcount;
5742 	int				error, i;
5743 	struct xfs_mount		*mp = ip->i_mount;
5744 
5745 	blockcount = left->br_blockcount + got->br_blockcount;
5746 
5747 	xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
5748 	ASSERT(xfs_bmse_can_merge(left, got, shift));
5749 
5750 	new = *left;
5751 	new.br_blockcount = blockcount;
5752 
5753 	/*
5754 	 * Update the on-disk extent count, the btree if necessary and log the
5755 	 * inode.
5756 	 */
5757 	ifp->if_nextents--;
5758 	*logflags |= XFS_ILOG_CORE;
5759 	if (!cur) {
5760 		*logflags |= XFS_ILOG_DEXT;
5761 		goto done;
5762 	}
5763 
5764 	/* lookup and remove the extent to merge */
5765 	error = xfs_bmbt_lookup_eq(cur, got, &i);
5766 	if (error)
5767 		return error;
5768 	if (XFS_IS_CORRUPT(mp, i != 1)) {
5769 		xfs_btree_mark_sick(cur);
5770 		return -EFSCORRUPTED;
5771 	}
5772 
5773 	error = xfs_btree_delete(cur, &i);
5774 	if (error)
5775 		return error;
5776 	if (XFS_IS_CORRUPT(mp, i != 1)) {
5777 		xfs_btree_mark_sick(cur);
5778 		return -EFSCORRUPTED;
5779 	}
5780 
5781 	/* lookup and update size of the previous extent */
5782 	error = xfs_bmbt_lookup_eq(cur, left, &i);
5783 	if (error)
5784 		return error;
5785 	if (XFS_IS_CORRUPT(mp, i != 1)) {
5786 		xfs_btree_mark_sick(cur);
5787 		return -EFSCORRUPTED;
5788 	}
5789 
5790 	error = xfs_bmbt_update(cur, &new);
5791 	if (error)
5792 		return error;
5793 
5794 	/* change to extent format if required after extent removal */
5795 	error = xfs_bmap_btree_to_extents(tp, ip, cur, logflags, whichfork);
5796 	if (error)
5797 		return error;
5798 
5799 done:
5800 	xfs_iext_remove(ip, icur, 0);
5801 	xfs_iext_prev(ifp, icur);
5802 	xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5803 			&new);
5804 
5805 	/* update reverse mapping. rmap functions merge the rmaps for us */
5806 	xfs_rmap_unmap_extent(tp, ip, whichfork, got);
5807 	memcpy(&new, got, sizeof(new));
5808 	new.br_startoff = left->br_startoff + left->br_blockcount;
5809 	xfs_rmap_map_extent(tp, ip, whichfork, &new);
5810 	return 0;
5811 }
5812 
5813 static int
xfs_bmap_shift_update_extent(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_iext_cursor * icur,struct xfs_bmbt_irec * got,struct xfs_btree_cur * cur,int * logflags,xfs_fileoff_t startoff)5814 xfs_bmap_shift_update_extent(
5815 	struct xfs_trans	*tp,
5816 	struct xfs_inode	*ip,
5817 	int			whichfork,
5818 	struct xfs_iext_cursor	*icur,
5819 	struct xfs_bmbt_irec	*got,
5820 	struct xfs_btree_cur	*cur,
5821 	int			*logflags,
5822 	xfs_fileoff_t		startoff)
5823 {
5824 	struct xfs_mount	*mp = ip->i_mount;
5825 	struct xfs_bmbt_irec	prev = *got;
5826 	int			error, i;
5827 
5828 	*logflags |= XFS_ILOG_CORE;
5829 
5830 	got->br_startoff = startoff;
5831 
5832 	if (cur) {
5833 		error = xfs_bmbt_lookup_eq(cur, &prev, &i);
5834 		if (error)
5835 			return error;
5836 		if (XFS_IS_CORRUPT(mp, i != 1)) {
5837 			xfs_btree_mark_sick(cur);
5838 			return -EFSCORRUPTED;
5839 		}
5840 
5841 		error = xfs_bmbt_update(cur, got);
5842 		if (error)
5843 			return error;
5844 	} else {
5845 		*logflags |= XFS_ILOG_DEXT;
5846 	}
5847 
5848 	xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5849 			got);
5850 
5851 	/* update reverse mapping */
5852 	xfs_rmap_unmap_extent(tp, ip, whichfork, &prev);
5853 	xfs_rmap_map_extent(tp, ip, whichfork, got);
5854 	return 0;
5855 }
5856 
5857 int
xfs_bmap_collapse_extents(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t * next_fsb,xfs_fileoff_t offset_shift_fsb,bool * done)5858 xfs_bmap_collapse_extents(
5859 	struct xfs_trans	*tp,
5860 	struct xfs_inode	*ip,
5861 	xfs_fileoff_t		*next_fsb,
5862 	xfs_fileoff_t		offset_shift_fsb,
5863 	bool			*done)
5864 {
5865 	int			whichfork = XFS_DATA_FORK;
5866 	struct xfs_mount	*mp = ip->i_mount;
5867 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
5868 	struct xfs_btree_cur	*cur = NULL;
5869 	struct xfs_bmbt_irec	got, prev;
5870 	struct xfs_iext_cursor	icur;
5871 	xfs_fileoff_t		new_startoff;
5872 	int			error = 0;
5873 	int			logflags = 0;
5874 
5875 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5876 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
5877 		xfs_bmap_mark_sick(ip, whichfork);
5878 		return -EFSCORRUPTED;
5879 	}
5880 
5881 	if (xfs_is_shutdown(mp))
5882 		return -EIO;
5883 
5884 	xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
5885 
5886 	error = xfs_iread_extents(tp, ip, whichfork);
5887 	if (error)
5888 		return error;
5889 
5890 	if (ifp->if_format == XFS_DINODE_FMT_BTREE)
5891 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5892 
5893 	if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5894 		*done = true;
5895 		goto del_cursor;
5896 	}
5897 	if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
5898 		xfs_bmap_mark_sick(ip, whichfork);
5899 		error = -EFSCORRUPTED;
5900 		goto del_cursor;
5901 	}
5902 
5903 	new_startoff = got.br_startoff - offset_shift_fsb;
5904 	if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
5905 		if (new_startoff < prev.br_startoff + prev.br_blockcount) {
5906 			error = -EINVAL;
5907 			goto del_cursor;
5908 		}
5909 
5910 		if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) {
5911 			error = xfs_bmse_merge(tp, ip, whichfork,
5912 					offset_shift_fsb, &icur, &got, &prev,
5913 					cur, &logflags);
5914 			if (error)
5915 				goto del_cursor;
5916 			goto done;
5917 		}
5918 	} else {
5919 		if (got.br_startoff < offset_shift_fsb) {
5920 			error = -EINVAL;
5921 			goto del_cursor;
5922 		}
5923 	}
5924 
5925 	error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
5926 			cur, &logflags, new_startoff);
5927 	if (error)
5928 		goto del_cursor;
5929 
5930 done:
5931 	if (!xfs_iext_next_extent(ifp, &icur, &got)) {
5932 		*done = true;
5933 		goto del_cursor;
5934 	}
5935 
5936 	*next_fsb = got.br_startoff;
5937 del_cursor:
5938 	if (cur)
5939 		xfs_btree_del_cursor(cur, error);
5940 	if (logflags)
5941 		xfs_trans_log_inode(tp, ip, logflags);
5942 	return error;
5943 }
5944 
5945 /* Make sure we won't be right-shifting an extent past the maximum bound. */
5946 int
xfs_bmap_can_insert_extents(struct xfs_inode * ip,xfs_fileoff_t off,xfs_fileoff_t shift)5947 xfs_bmap_can_insert_extents(
5948 	struct xfs_inode	*ip,
5949 	xfs_fileoff_t		off,
5950 	xfs_fileoff_t		shift)
5951 {
5952 	struct xfs_bmbt_irec	got;
5953 	int			is_empty;
5954 	int			error = 0;
5955 
5956 	xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL);
5957 
5958 	if (xfs_is_shutdown(ip->i_mount))
5959 		return -EIO;
5960 
5961 	xfs_ilock(ip, XFS_ILOCK_EXCL);
5962 	error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty);
5963 	if (!error && !is_empty && got.br_startoff >= off &&
5964 	    ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff)
5965 		error = -EINVAL;
5966 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
5967 
5968 	return error;
5969 }
5970 
5971 int
xfs_bmap_insert_extents(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t * next_fsb,xfs_fileoff_t offset_shift_fsb,bool * done,xfs_fileoff_t stop_fsb)5972 xfs_bmap_insert_extents(
5973 	struct xfs_trans	*tp,
5974 	struct xfs_inode	*ip,
5975 	xfs_fileoff_t		*next_fsb,
5976 	xfs_fileoff_t		offset_shift_fsb,
5977 	bool			*done,
5978 	xfs_fileoff_t		stop_fsb)
5979 {
5980 	int			whichfork = XFS_DATA_FORK;
5981 	struct xfs_mount	*mp = ip->i_mount;
5982 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
5983 	struct xfs_btree_cur	*cur = NULL;
5984 	struct xfs_bmbt_irec	got, next;
5985 	struct xfs_iext_cursor	icur;
5986 	xfs_fileoff_t		new_startoff;
5987 	int			error = 0;
5988 	int			logflags = 0;
5989 
5990 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5991 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
5992 		xfs_bmap_mark_sick(ip, whichfork);
5993 		return -EFSCORRUPTED;
5994 	}
5995 
5996 	if (xfs_is_shutdown(mp))
5997 		return -EIO;
5998 
5999 	xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
6000 
6001 	error = xfs_iread_extents(tp, ip, whichfork);
6002 	if (error)
6003 		return error;
6004 
6005 	if (ifp->if_format == XFS_DINODE_FMT_BTREE)
6006 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6007 
6008 	if (*next_fsb == NULLFSBLOCK) {
6009 		xfs_iext_last(ifp, &icur);
6010 		if (!xfs_iext_get_extent(ifp, &icur, &got) ||
6011 		    stop_fsb > got.br_startoff) {
6012 			*done = true;
6013 			goto del_cursor;
6014 		}
6015 	} else {
6016 		if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
6017 			*done = true;
6018 			goto del_cursor;
6019 		}
6020 	}
6021 	if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
6022 		xfs_bmap_mark_sick(ip, whichfork);
6023 		error = -EFSCORRUPTED;
6024 		goto del_cursor;
6025 	}
6026 
6027 	if (XFS_IS_CORRUPT(mp, stop_fsb > got.br_startoff)) {
6028 		xfs_bmap_mark_sick(ip, whichfork);
6029 		error = -EFSCORRUPTED;
6030 		goto del_cursor;
6031 	}
6032 
6033 	new_startoff = got.br_startoff + offset_shift_fsb;
6034 	if (xfs_iext_peek_next_extent(ifp, &icur, &next)) {
6035 		if (new_startoff + got.br_blockcount > next.br_startoff) {
6036 			error = -EINVAL;
6037 			goto del_cursor;
6038 		}
6039 
6040 		/*
6041 		 * Unlike a left shift (which involves a hole punch), a right
6042 		 * shift does not modify extent neighbors in any way.  We should
6043 		 * never find mergeable extents in this scenario.  Check anyways
6044 		 * and warn if we encounter two extents that could be one.
6045 		 */
6046 		if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb))
6047 			WARN_ON_ONCE(1);
6048 	}
6049 
6050 	error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
6051 			cur, &logflags, new_startoff);
6052 	if (error)
6053 		goto del_cursor;
6054 
6055 	if (!xfs_iext_prev_extent(ifp, &icur, &got) ||
6056 	    stop_fsb >= got.br_startoff + got.br_blockcount) {
6057 		*done = true;
6058 		goto del_cursor;
6059 	}
6060 
6061 	*next_fsb = got.br_startoff;
6062 del_cursor:
6063 	if (cur)
6064 		xfs_btree_del_cursor(cur, error);
6065 	if (logflags)
6066 		xfs_trans_log_inode(tp, ip, logflags);
6067 	return error;
6068 }
6069 
6070 /*
6071  * Splits an extent into two extents at split_fsb block such that it is the
6072  * first block of the current_ext. @ext is a target extent to be split.
6073  * @split_fsb is a block where the extents is split.  If split_fsb lies in a
6074  * hole or the first block of extents, just return 0.
6075  */
6076 int
xfs_bmap_split_extent(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t split_fsb)6077 xfs_bmap_split_extent(
6078 	struct xfs_trans	*tp,
6079 	struct xfs_inode	*ip,
6080 	xfs_fileoff_t		split_fsb)
6081 {
6082 	int				whichfork = XFS_DATA_FORK;
6083 	struct xfs_ifork		*ifp = xfs_ifork_ptr(ip, whichfork);
6084 	struct xfs_btree_cur		*cur = NULL;
6085 	struct xfs_bmbt_irec		got;
6086 	struct xfs_bmbt_irec		new; /* split extent */
6087 	struct xfs_mount		*mp = ip->i_mount;
6088 	xfs_fsblock_t			gotblkcnt; /* new block count for got */
6089 	struct xfs_iext_cursor		icur;
6090 	int				error = 0;
6091 	int				logflags = 0;
6092 	int				i = 0;
6093 
6094 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
6095 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
6096 		xfs_bmap_mark_sick(ip, whichfork);
6097 		return -EFSCORRUPTED;
6098 	}
6099 
6100 	if (xfs_is_shutdown(mp))
6101 		return -EIO;
6102 
6103 	/* Read in all the extents */
6104 	error = xfs_iread_extents(tp, ip, whichfork);
6105 	if (error)
6106 		return error;
6107 
6108 	/*
6109 	 * If there are not extents, or split_fsb lies in a hole we are done.
6110 	 */
6111 	if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) ||
6112 	    got.br_startoff >= split_fsb)
6113 		return 0;
6114 
6115 	gotblkcnt = split_fsb - got.br_startoff;
6116 	new.br_startoff = split_fsb;
6117 	new.br_startblock = got.br_startblock + gotblkcnt;
6118 	new.br_blockcount = got.br_blockcount - gotblkcnt;
6119 	new.br_state = got.br_state;
6120 
6121 	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
6122 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6123 		error = xfs_bmbt_lookup_eq(cur, &got, &i);
6124 		if (error)
6125 			goto del_cursor;
6126 		if (XFS_IS_CORRUPT(mp, i != 1)) {
6127 			xfs_btree_mark_sick(cur);
6128 			error = -EFSCORRUPTED;
6129 			goto del_cursor;
6130 		}
6131 	}
6132 
6133 	got.br_blockcount = gotblkcnt;
6134 	xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur,
6135 			&got);
6136 
6137 	logflags = XFS_ILOG_CORE;
6138 	if (cur) {
6139 		error = xfs_bmbt_update(cur, &got);
6140 		if (error)
6141 			goto del_cursor;
6142 	} else
6143 		logflags |= XFS_ILOG_DEXT;
6144 
6145 	/* Add new extent */
6146 	xfs_iext_next(ifp, &icur);
6147 	xfs_iext_insert(ip, &icur, &new, 0);
6148 	ifp->if_nextents++;
6149 
6150 	if (cur) {
6151 		error = xfs_bmbt_lookup_eq(cur, &new, &i);
6152 		if (error)
6153 			goto del_cursor;
6154 		if (XFS_IS_CORRUPT(mp, i != 0)) {
6155 			xfs_btree_mark_sick(cur);
6156 			error = -EFSCORRUPTED;
6157 			goto del_cursor;
6158 		}
6159 		error = xfs_btree_insert(cur, &i);
6160 		if (error)
6161 			goto del_cursor;
6162 		if (XFS_IS_CORRUPT(mp, i != 1)) {
6163 			xfs_btree_mark_sick(cur);
6164 			error = -EFSCORRUPTED;
6165 			goto del_cursor;
6166 		}
6167 	}
6168 
6169 	/*
6170 	 * Convert to a btree if necessary.
6171 	 */
6172 	if (xfs_bmap_needs_btree(ip, whichfork)) {
6173 		int tmp_logflags; /* partial log flag return val */
6174 
6175 		ASSERT(cur == NULL);
6176 		error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
6177 				&tmp_logflags, whichfork);
6178 		logflags |= tmp_logflags;
6179 	}
6180 
6181 del_cursor:
6182 	if (cur) {
6183 		cur->bc_bmap.allocated = 0;
6184 		xfs_btree_del_cursor(cur, error);
6185 	}
6186 
6187 	if (logflags)
6188 		xfs_trans_log_inode(tp, ip, logflags);
6189 	return error;
6190 }
6191 
6192 /* Record a bmap intent. */
6193 static inline void
__xfs_bmap_add(struct xfs_trans * tp,enum xfs_bmap_intent_type type,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * bmap)6194 __xfs_bmap_add(
6195 	struct xfs_trans		*tp,
6196 	enum xfs_bmap_intent_type	type,
6197 	struct xfs_inode		*ip,
6198 	int				whichfork,
6199 	struct xfs_bmbt_irec		*bmap)
6200 {
6201 	struct xfs_bmap_intent		*bi;
6202 
6203 	if ((whichfork != XFS_DATA_FORK && whichfork != XFS_ATTR_FORK) ||
6204 	    bmap->br_startblock == HOLESTARTBLOCK ||
6205 	    bmap->br_startblock == DELAYSTARTBLOCK)
6206 		return;
6207 
6208 	bi = kmem_cache_alloc(xfs_bmap_intent_cache, GFP_KERNEL | __GFP_NOFAIL);
6209 	INIT_LIST_HEAD(&bi->bi_list);
6210 	bi->bi_type = type;
6211 	bi->bi_owner = ip;
6212 	bi->bi_whichfork = whichfork;
6213 	bi->bi_bmap = *bmap;
6214 
6215 	xfs_bmap_defer_add(tp, bi);
6216 }
6217 
6218 /* Map an extent into a file. */
6219 void
xfs_bmap_map_extent(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * PREV)6220 xfs_bmap_map_extent(
6221 	struct xfs_trans	*tp,
6222 	struct xfs_inode	*ip,
6223 	int			whichfork,
6224 	struct xfs_bmbt_irec	*PREV)
6225 {
6226 	__xfs_bmap_add(tp, XFS_BMAP_MAP, ip, whichfork, PREV);
6227 }
6228 
6229 /* Unmap an extent out of a file. */
6230 void
xfs_bmap_unmap_extent(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * PREV)6231 xfs_bmap_unmap_extent(
6232 	struct xfs_trans	*tp,
6233 	struct xfs_inode	*ip,
6234 	int			whichfork,
6235 	struct xfs_bmbt_irec	*PREV)
6236 {
6237 	__xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, whichfork, PREV);
6238 }
6239 
6240 /*
6241  * Process one of the deferred bmap operations.  We pass back the
6242  * btree cursor to maintain our lock on the bmapbt between calls.
6243  */
6244 int
xfs_bmap_finish_one(struct xfs_trans * tp,struct xfs_bmap_intent * bi)6245 xfs_bmap_finish_one(
6246 	struct xfs_trans		*tp,
6247 	struct xfs_bmap_intent		*bi)
6248 {
6249 	struct xfs_bmbt_irec		*bmap = &bi->bi_bmap;
6250 	int				error = 0;
6251 	int				flags = 0;
6252 
6253 	if (bi->bi_whichfork == XFS_ATTR_FORK)
6254 		flags |= XFS_BMAPI_ATTRFORK;
6255 
6256 	ASSERT(tp->t_highest_agno == NULLAGNUMBER);
6257 
6258 	trace_xfs_bmap_deferred(bi);
6259 
6260 	if (XFS_TEST_ERROR(false, tp->t_mountp, XFS_ERRTAG_BMAP_FINISH_ONE))
6261 		return -EIO;
6262 
6263 	switch (bi->bi_type) {
6264 	case XFS_BMAP_MAP:
6265 		if (bi->bi_bmap.br_state == XFS_EXT_UNWRITTEN)
6266 			flags |= XFS_BMAPI_PREALLOC;
6267 		error = xfs_bmapi_remap(tp, bi->bi_owner, bmap->br_startoff,
6268 				bmap->br_blockcount, bmap->br_startblock,
6269 				flags);
6270 		bmap->br_blockcount = 0;
6271 		break;
6272 	case XFS_BMAP_UNMAP:
6273 		error = __xfs_bunmapi(tp, bi->bi_owner, bmap->br_startoff,
6274 				&bmap->br_blockcount, flags | XFS_BMAPI_REMAP,
6275 				1);
6276 		break;
6277 	default:
6278 		ASSERT(0);
6279 		xfs_bmap_mark_sick(bi->bi_owner, bi->bi_whichfork);
6280 		error = -EFSCORRUPTED;
6281 	}
6282 
6283 	return error;
6284 }
6285 
6286 /* Check that an extent does not have invalid flags or bad ranges. */
6287 xfs_failaddr_t
xfs_bmap_validate_extent_raw(struct xfs_mount * mp,bool rtfile,int whichfork,struct xfs_bmbt_irec * irec)6288 xfs_bmap_validate_extent_raw(
6289 	struct xfs_mount	*mp,
6290 	bool			rtfile,
6291 	int			whichfork,
6292 	struct xfs_bmbt_irec	*irec)
6293 {
6294 	if (!xfs_verify_fileext(mp, irec->br_startoff, irec->br_blockcount))
6295 		return __this_address;
6296 
6297 	if (rtfile && whichfork == XFS_DATA_FORK) {
6298 		if (!xfs_verify_rtbext(mp, irec->br_startblock,
6299 					   irec->br_blockcount))
6300 			return __this_address;
6301 	} else {
6302 		if (!xfs_verify_fsbext(mp, irec->br_startblock,
6303 					   irec->br_blockcount))
6304 			return __this_address;
6305 	}
6306 	if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK)
6307 		return __this_address;
6308 	return NULL;
6309 }
6310 
6311 int __init
xfs_bmap_intent_init_cache(void)6312 xfs_bmap_intent_init_cache(void)
6313 {
6314 	xfs_bmap_intent_cache = kmem_cache_create("xfs_bmap_intent",
6315 			sizeof(struct xfs_bmap_intent),
6316 			0, 0, NULL);
6317 
6318 	return xfs_bmap_intent_cache != NULL ? 0 : -ENOMEM;
6319 }
6320 
6321 void
xfs_bmap_intent_destroy_cache(void)6322 xfs_bmap_intent_destroy_cache(void)
6323 {
6324 	kmem_cache_destroy(xfs_bmap_intent_cache);
6325 	xfs_bmap_intent_cache = NULL;
6326 }
6327 
6328 /* Check that an inode's extent does not have invalid flags or bad ranges. */
6329 xfs_failaddr_t
xfs_bmap_validate_extent(struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * irec)6330 xfs_bmap_validate_extent(
6331 	struct xfs_inode	*ip,
6332 	int			whichfork,
6333 	struct xfs_bmbt_irec	*irec)
6334 {
6335 	return xfs_bmap_validate_extent_raw(ip->i_mount,
6336 			XFS_IS_REALTIME_INODE(ip), whichfork, irec);
6337 }
6338 
6339 /*
6340  * Used in xfs_itruncate_extents().  This is the maximum number of extents
6341  * freed from a file in a single transaction.
6342  */
6343 #define	XFS_ITRUNC_MAX_EXTENTS	2
6344 
6345 /*
6346  * Unmap every extent in part of an inode's fork.  We don't do any higher level
6347  * invalidation work at all.
6348  */
6349 int
xfs_bunmapi_range(struct xfs_trans ** tpp,struct xfs_inode * ip,uint32_t flags,xfs_fileoff_t startoff,xfs_fileoff_t endoff)6350 xfs_bunmapi_range(
6351 	struct xfs_trans	**tpp,
6352 	struct xfs_inode	*ip,
6353 	uint32_t		flags,
6354 	xfs_fileoff_t		startoff,
6355 	xfs_fileoff_t		endoff)
6356 {
6357 	xfs_filblks_t		unmap_len = endoff - startoff + 1;
6358 	int			error = 0;
6359 
6360 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
6361 
6362 	while (unmap_len > 0) {
6363 		ASSERT((*tpp)->t_highest_agno == NULLAGNUMBER);
6364 		error = __xfs_bunmapi(*tpp, ip, startoff, &unmap_len, flags,
6365 				XFS_ITRUNC_MAX_EXTENTS);
6366 		if (error)
6367 			goto out;
6368 
6369 		/* free the just unmapped extents */
6370 		error = xfs_defer_finish(tpp);
6371 		if (error)
6372 			goto out;
6373 		cond_resched();
6374 	}
6375 out:
6376 	return error;
6377 }
6378 
6379 struct xfs_bmap_query_range {
6380 	xfs_bmap_query_range_fn	fn;
6381 	void			*priv;
6382 };
6383 
6384 /* Format btree record and pass to our callback. */
6385 STATIC int
xfs_bmap_query_range_helper(struct xfs_btree_cur * cur,const union xfs_btree_rec * rec,void * priv)6386 xfs_bmap_query_range_helper(
6387 	struct xfs_btree_cur		*cur,
6388 	const union xfs_btree_rec	*rec,
6389 	void				*priv)
6390 {
6391 	struct xfs_bmap_query_range	*query = priv;
6392 	struct xfs_bmbt_irec		irec;
6393 	xfs_failaddr_t			fa;
6394 
6395 	xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
6396 	fa = xfs_bmap_validate_extent(cur->bc_ino.ip, cur->bc_ino.whichfork,
6397 			&irec);
6398 	if (fa) {
6399 		xfs_btree_mark_sick(cur);
6400 		return xfs_bmap_complain_bad_rec(cur->bc_ino.ip,
6401 				cur->bc_ino.whichfork, fa, &irec);
6402 	}
6403 
6404 	return query->fn(cur, &irec, query->priv);
6405 }
6406 
6407 /* Find all bmaps. */
6408 int
xfs_bmap_query_all(struct xfs_btree_cur * cur,xfs_bmap_query_range_fn fn,void * priv)6409 xfs_bmap_query_all(
6410 	struct xfs_btree_cur		*cur,
6411 	xfs_bmap_query_range_fn		fn,
6412 	void				*priv)
6413 {
6414 	struct xfs_bmap_query_range	query = {
6415 		.priv			= priv,
6416 		.fn			= fn,
6417 	};
6418 
6419 	return xfs_btree_query_all(cur, xfs_bmap_query_range_helper, &query);
6420 }
6421 
6422 /* Helper function to extract extent size hint from inode */
6423 xfs_extlen_t
xfs_get_extsz_hint(struct xfs_inode * ip)6424 xfs_get_extsz_hint(
6425 	struct xfs_inode	*ip)
6426 {
6427 	/*
6428 	 * No point in aligning allocations if we need to COW to actually
6429 	 * write to them.
6430 	 */
6431 	if (xfs_is_always_cow_inode(ip))
6432 		return 0;
6433 	if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize)
6434 		return ip->i_extsize;
6435 	if (XFS_IS_REALTIME_INODE(ip) &&
6436 	    ip->i_mount->m_sb.sb_rextsize > 1)
6437 		return ip->i_mount->m_sb.sb_rextsize;
6438 	return 0;
6439 }
6440 
6441 /*
6442  * Helper function to extract CoW extent size hint from inode.
6443  * Between the extent size hint and the CoW extent size hint, we
6444  * return the greater of the two.  If the value is zero (automatic),
6445  * use the default size.
6446  */
6447 xfs_extlen_t
xfs_get_cowextsz_hint(struct xfs_inode * ip)6448 xfs_get_cowextsz_hint(
6449 	struct xfs_inode	*ip)
6450 {
6451 	xfs_extlen_t		a, b;
6452 
6453 	a = 0;
6454 	if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
6455 		a = ip->i_cowextsize;
6456 	b = xfs_get_extsz_hint(ip);
6457 
6458 	a = max(a, b);
6459 	if (a == 0)
6460 		return XFS_DEFAULT_COWEXTSZ_HINT;
6461 	return a;
6462 }
6463