xref: /linux/fs/xfs/libxfs/xfs_bmap.c (revision c148bc7535650fbfa95a1f571b9ffa2ab478ea33)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_dir2.h"
17 #include "xfs_inode.h"
18 #include "xfs_btree.h"
19 #include "xfs_trans.h"
20 #include "xfs_alloc.h"
21 #include "xfs_bmap.h"
22 #include "xfs_bmap_util.h"
23 #include "xfs_bmap_btree.h"
24 #include "xfs_rtbitmap.h"
25 #include "xfs_errortag.h"
26 #include "xfs_error.h"
27 #include "xfs_quota.h"
28 #include "xfs_trans_space.h"
29 #include "xfs_buf_item.h"
30 #include "xfs_trace.h"
31 #include "xfs_attr_leaf.h"
32 #include "xfs_filestream.h"
33 #include "xfs_rmap.h"
34 #include "xfs_ag.h"
35 #include "xfs_ag_resv.h"
36 #include "xfs_refcount.h"
37 #include "xfs_iomap.h"
38 #include "xfs_health.h"
39 #include "xfs_bmap_item.h"
40 #include "xfs_symlink_remote.h"
41 #include "xfs_inode_util.h"
42 #include "xfs_rtgroup.h"
43 #include "xfs_zone_alloc.h"
44 
45 struct kmem_cache		*xfs_bmap_intent_cache;
46 
47 /*
48  * Miscellaneous helper functions
49  */
50 
51 /*
52  * Compute and fill in the value of the maximum depth of a bmap btree
53  * in this filesystem.  Done once, during mount.
54  */
55 void
xfs_bmap_compute_maxlevels(xfs_mount_t * mp,int whichfork)56 xfs_bmap_compute_maxlevels(
57 	xfs_mount_t	*mp,		/* file system mount structure */
58 	int		whichfork)	/* data or attr fork */
59 {
60 	uint64_t	maxblocks;	/* max blocks at this level */
61 	xfs_extnum_t	maxleafents;	/* max leaf entries possible */
62 	int		level;		/* btree level */
63 	int		maxrootrecs;	/* max records in root block */
64 	int		minleafrecs;	/* min records in leaf block */
65 	int		minnoderecs;	/* min records in node block */
66 	int		sz;		/* root block size */
67 
68 	/*
69 	 * The maximum number of extents in a fork, hence the maximum number of
70 	 * leaf entries, is controlled by the size of the on-disk extent count.
71 	 *
72 	 * Note that we can no longer assume that if we are in ATTR1 that the
73 	 * fork offset of all the inodes will be
74 	 * (xfs_default_attroffset(ip) >> 3) because we could have mounted with
75 	 * ATTR2 and then mounted back with ATTR1, keeping the i_forkoff's fixed
76 	 * but probably at various positions. Therefore, for both ATTR1 and
77 	 * ATTR2 we have to assume the worst case scenario of a minimum size
78 	 * available.
79 	 */
80 	maxleafents = xfs_iext_max_nextents(xfs_has_large_extent_counts(mp),
81 				whichfork);
82 	if (whichfork == XFS_DATA_FORK)
83 		sz = xfs_bmdr_space_calc(MINDBTPTRS);
84 	else
85 		sz = xfs_bmdr_space_calc(MINABTPTRS);
86 
87 	maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
88 	minleafrecs = mp->m_bmap_dmnr[0];
89 	minnoderecs = mp->m_bmap_dmnr[1];
90 	maxblocks = howmany_64(maxleafents, minleafrecs);
91 	for (level = 1; maxblocks > 1; level++) {
92 		if (maxblocks <= maxrootrecs)
93 			maxblocks = 1;
94 		else
95 			maxblocks = howmany_64(maxblocks, minnoderecs);
96 	}
97 	mp->m_bm_maxlevels[whichfork] = level;
98 	ASSERT(mp->m_bm_maxlevels[whichfork] <= xfs_bmbt_maxlevels_ondisk());
99 }
100 
101 unsigned int
xfs_bmap_compute_attr_offset(struct xfs_mount * mp)102 xfs_bmap_compute_attr_offset(
103 	struct xfs_mount	*mp)
104 {
105 	if (mp->m_sb.sb_inodesize == 256)
106 		return XFS_LITINO(mp) - xfs_bmdr_space_calc(MINABTPTRS);
107 	return xfs_bmdr_space_calc(6 * MINABTPTRS);
108 }
109 
110 STATIC int				/* error */
xfs_bmbt_lookup_eq(struct xfs_btree_cur * cur,struct xfs_bmbt_irec * irec,int * stat)111 xfs_bmbt_lookup_eq(
112 	struct xfs_btree_cur	*cur,
113 	struct xfs_bmbt_irec	*irec,
114 	int			*stat)	/* success/failure */
115 {
116 	cur->bc_rec.b = *irec;
117 	return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
118 }
119 
120 STATIC int				/* error */
xfs_bmbt_lookup_first(struct xfs_btree_cur * cur,int * stat)121 xfs_bmbt_lookup_first(
122 	struct xfs_btree_cur	*cur,
123 	int			*stat)	/* success/failure */
124 {
125 	cur->bc_rec.b.br_startoff = 0;
126 	cur->bc_rec.b.br_startblock = 0;
127 	cur->bc_rec.b.br_blockcount = 0;
128 	return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
129 }
130 
131 /*
132  * Check if the inode needs to be converted to btree format.
133  */
xfs_bmap_needs_btree(struct xfs_inode * ip,int whichfork)134 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
135 {
136 	struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
137 
138 	return whichfork != XFS_COW_FORK &&
139 		ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
140 		ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork);
141 }
142 
143 /*
144  * Check if the inode should be converted to extent format.
145  */
xfs_bmap_wants_extents(struct xfs_inode * ip,int whichfork)146 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
147 {
148 	struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
149 
150 	return whichfork != XFS_COW_FORK &&
151 		ifp->if_format == XFS_DINODE_FMT_BTREE &&
152 		ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork);
153 }
154 
155 /*
156  * Update the record referred to by cur to the value given by irec
157  * This either works (return 0) or gets an EFSCORRUPTED error.
158  */
159 STATIC int
xfs_bmbt_update(struct xfs_btree_cur * cur,struct xfs_bmbt_irec * irec)160 xfs_bmbt_update(
161 	struct xfs_btree_cur	*cur,
162 	struct xfs_bmbt_irec	*irec)
163 {
164 	union xfs_btree_rec	rec;
165 
166 	xfs_bmbt_disk_set_all(&rec.bmbt, irec);
167 	return xfs_btree_update(cur, &rec);
168 }
169 
170 /*
171  * Compute the worst-case number of indirect blocks that will be used
172  * for ip's delayed extent of length "len".
173  */
174 xfs_filblks_t
xfs_bmap_worst_indlen(struct xfs_inode * ip,xfs_filblks_t len)175 xfs_bmap_worst_indlen(
176 	struct xfs_inode	*ip,		/* incore inode pointer */
177 	xfs_filblks_t		len)		/* delayed extent length */
178 {
179 	struct xfs_mount	*mp = ip->i_mount;
180 	int			maxrecs = mp->m_bmap_dmxr[0];
181 	int			level;
182 	xfs_filblks_t		rval;
183 
184 	for (level = 0, rval = 0;
185 	     level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
186 	     level++) {
187 		len += maxrecs - 1;
188 		do_div(len, maxrecs);
189 		rval += len;
190 		if (len == 1)
191 			return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
192 				level - 1;
193 		if (level == 0)
194 			maxrecs = mp->m_bmap_dmxr[1];
195 	}
196 	return rval;
197 }
198 
199 /*
200  * Calculate the default attribute fork offset for newly created inodes.
201  */
202 uint
xfs_default_attroffset(struct xfs_inode * ip)203 xfs_default_attroffset(
204 	struct xfs_inode	*ip)
205 {
206 	if (ip->i_df.if_format == XFS_DINODE_FMT_DEV)
207 		return roundup(sizeof(xfs_dev_t), 8);
208 	return M_IGEO(ip->i_mount)->attr_fork_offset;
209 }
210 
211 /*
212  * Helper routine to reset inode i_forkoff field when switching attribute fork
213  * from local to extent format - we reset it where possible to make space
214  * available for inline data fork extents.
215  */
216 STATIC void
xfs_bmap_forkoff_reset(xfs_inode_t * ip,int whichfork)217 xfs_bmap_forkoff_reset(
218 	xfs_inode_t	*ip,
219 	int		whichfork)
220 {
221 	if (whichfork == XFS_ATTR_FORK &&
222 	    ip->i_df.if_format != XFS_DINODE_FMT_DEV &&
223 	    ip->i_df.if_format != XFS_DINODE_FMT_BTREE) {
224 		uint	dfl_forkoff = xfs_default_attroffset(ip) >> 3;
225 
226 		if (dfl_forkoff > ip->i_forkoff)
227 			ip->i_forkoff = dfl_forkoff;
228 	}
229 }
230 
231 static int
xfs_bmap_read_buf(struct xfs_mount * mp,struct xfs_trans * tp,xfs_fsblock_t fsbno,struct xfs_buf ** bpp)232 xfs_bmap_read_buf(
233 	struct xfs_mount	*mp,		/* file system mount point */
234 	struct xfs_trans	*tp,		/* transaction pointer */
235 	xfs_fsblock_t		fsbno,		/* file system block number */
236 	struct xfs_buf		**bpp)		/* buffer for fsbno */
237 {
238 	struct xfs_buf		*bp;		/* return value */
239 	int			error;
240 
241 	if (!xfs_verify_fsbno(mp, fsbno))
242 		return -EFSCORRUPTED;
243 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
244 			XFS_FSB_TO_DADDR(mp, fsbno), mp->m_bsize, 0, &bp,
245 			&xfs_bmbt_buf_ops);
246 	if (!error) {
247 		xfs_buf_set_ref(bp, XFS_BMAP_BTREE_REF);
248 		*bpp = bp;
249 	}
250 	return error;
251 }
252 
253 #ifdef DEBUG
254 STATIC struct xfs_buf *
xfs_bmap_get_bp(struct xfs_btree_cur * cur,xfs_fsblock_t bno)255 xfs_bmap_get_bp(
256 	struct xfs_btree_cur	*cur,
257 	xfs_fsblock_t		bno)
258 {
259 	struct xfs_log_item	*lip;
260 	int			i;
261 
262 	if (!cur)
263 		return NULL;
264 
265 	for (i = 0; i < cur->bc_maxlevels; i++) {
266 		if (!cur->bc_levels[i].bp)
267 			break;
268 		if (xfs_buf_daddr(cur->bc_levels[i].bp) == bno)
269 			return cur->bc_levels[i].bp;
270 	}
271 
272 	/* Chase down all the log items to see if the bp is there */
273 	list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) {
274 		struct xfs_buf_log_item	*bip = (struct xfs_buf_log_item *)lip;
275 
276 		if (bip->bli_item.li_type == XFS_LI_BUF &&
277 		    xfs_buf_daddr(bip->bli_buf) == bno)
278 			return bip->bli_buf;
279 	}
280 
281 	return NULL;
282 }
283 
284 STATIC void
xfs_check_block(struct xfs_btree_block * block,xfs_mount_t * mp,int root,short sz)285 xfs_check_block(
286 	struct xfs_btree_block	*block,
287 	xfs_mount_t		*mp,
288 	int			root,
289 	short			sz)
290 {
291 	int			i, j, dmxr;
292 	__be64			*pp, *thispa;	/* pointer to block address */
293 	xfs_bmbt_key_t		*prevp, *keyp;
294 
295 	ASSERT(be16_to_cpu(block->bb_level) > 0);
296 
297 	prevp = NULL;
298 	for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
299 		dmxr = mp->m_bmap_dmxr[0];
300 		keyp = xfs_bmbt_key_addr(mp, block, i);
301 
302 		if (prevp) {
303 			ASSERT(be64_to_cpu(prevp->br_startoff) <
304 			       be64_to_cpu(keyp->br_startoff));
305 		}
306 		prevp = keyp;
307 
308 		/*
309 		 * Compare the block numbers to see if there are dups.
310 		 */
311 		if (root)
312 			pp = xfs_bmap_broot_ptr_addr(mp, block, i, sz);
313 		else
314 			pp = xfs_bmbt_ptr_addr(mp, block, i, dmxr);
315 
316 		for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
317 			if (root)
318 				thispa = xfs_bmap_broot_ptr_addr(mp, block, j, sz);
319 			else
320 				thispa = xfs_bmbt_ptr_addr(mp, block, j, dmxr);
321 			if (*thispa == *pp) {
322 				xfs_warn(mp, "%s: thispa(%d) == pp(%d) %lld",
323 					__func__, j, i,
324 					(unsigned long long)be64_to_cpu(*thispa));
325 				xfs_err(mp, "%s: ptrs are equal in node\n",
326 					__func__);
327 				xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
328 			}
329 		}
330 	}
331 }
332 
333 /*
334  * Check that the extents for the inode ip are in the right order in all
335  * btree leaves. THis becomes prohibitively expensive for large extent count
336  * files, so don't bother with inodes that have more than 10,000 extents in
337  * them. The btree record ordering checks will still be done, so for such large
338  * bmapbt constructs that is going to catch most corruptions.
339  */
340 STATIC void
xfs_bmap_check_leaf_extents(struct xfs_btree_cur * cur,xfs_inode_t * ip,int whichfork)341 xfs_bmap_check_leaf_extents(
342 	struct xfs_btree_cur	*cur,	/* btree cursor or null */
343 	xfs_inode_t		*ip,		/* incore inode pointer */
344 	int			whichfork)	/* data or attr fork */
345 {
346 	struct xfs_mount	*mp = ip->i_mount;
347 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
348 	struct xfs_btree_block	*block;	/* current btree block */
349 	xfs_fsblock_t		bno;	/* block # of "block" */
350 	struct xfs_buf		*bp;	/* buffer for "block" */
351 	int			error;	/* error return value */
352 	xfs_extnum_t		i=0, j;	/* index into the extents list */
353 	int			level;	/* btree level, for checking */
354 	__be64			*pp;	/* pointer to block address */
355 	xfs_bmbt_rec_t		*ep;	/* pointer to current extent */
356 	xfs_bmbt_rec_t		last = {0, 0}; /* last extent in prev block */
357 	xfs_bmbt_rec_t		*nextp;	/* pointer to next extent */
358 	int			bp_release = 0;
359 
360 	if (ifp->if_format != XFS_DINODE_FMT_BTREE)
361 		return;
362 
363 	/* skip large extent count inodes */
364 	if (ip->i_df.if_nextents > 10000)
365 		return;
366 
367 	bno = NULLFSBLOCK;
368 	block = ifp->if_broot;
369 	/*
370 	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
371 	 */
372 	level = be16_to_cpu(block->bb_level);
373 	ASSERT(level > 0);
374 	xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
375 	pp = xfs_bmap_broot_ptr_addr(mp, block, 1, ifp->if_broot_bytes);
376 	bno = be64_to_cpu(*pp);
377 
378 	ASSERT(bno != NULLFSBLOCK);
379 	ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
380 	ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
381 
382 	/*
383 	 * Go down the tree until leaf level is reached, following the first
384 	 * pointer (leftmost) at each level.
385 	 */
386 	while (level-- > 0) {
387 		/* See if buf is in cur first */
388 		bp_release = 0;
389 		bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
390 		if (!bp) {
391 			bp_release = 1;
392 			error = xfs_bmap_read_buf(mp, NULL, bno, &bp);
393 			if (xfs_metadata_is_sick(error))
394 				xfs_btree_mark_sick(cur);
395 			if (error)
396 				goto error_norelse;
397 		}
398 		block = XFS_BUF_TO_BLOCK(bp);
399 		if (level == 0)
400 			break;
401 
402 		/*
403 		 * Check this block for basic sanity (increasing keys and
404 		 * no duplicate blocks).
405 		 */
406 
407 		xfs_check_block(block, mp, 0, 0);
408 		pp = xfs_bmbt_ptr_addr(mp, block, 1, mp->m_bmap_dmxr[1]);
409 		bno = be64_to_cpu(*pp);
410 		if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, bno))) {
411 			xfs_btree_mark_sick(cur);
412 			error = -EFSCORRUPTED;
413 			goto error0;
414 		}
415 		if (bp_release) {
416 			bp_release = 0;
417 			xfs_trans_brelse(NULL, bp);
418 		}
419 	}
420 
421 	/*
422 	 * Here with bp and block set to the leftmost leaf node in the tree.
423 	 */
424 	i = 0;
425 
426 	/*
427 	 * Loop over all leaf nodes checking that all extents are in the right order.
428 	 */
429 	for (;;) {
430 		xfs_fsblock_t	nextbno;
431 		xfs_extnum_t	num_recs;
432 
433 
434 		num_recs = xfs_btree_get_numrecs(block);
435 
436 		/*
437 		 * Read-ahead the next leaf block, if any.
438 		 */
439 
440 		nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
441 
442 		/*
443 		 * Check all the extents to make sure they are OK.
444 		 * If we had a previous block, the last entry should
445 		 * conform with the first entry in this one.
446 		 */
447 
448 		ep = xfs_bmbt_rec_addr(mp, block, 1);
449 		if (i) {
450 			ASSERT(xfs_bmbt_disk_get_startoff(&last) +
451 			       xfs_bmbt_disk_get_blockcount(&last) <=
452 			       xfs_bmbt_disk_get_startoff(ep));
453 		}
454 		for (j = 1; j < num_recs; j++) {
455 			nextp = xfs_bmbt_rec_addr(mp, block, j + 1);
456 			ASSERT(xfs_bmbt_disk_get_startoff(ep) +
457 			       xfs_bmbt_disk_get_blockcount(ep) <=
458 			       xfs_bmbt_disk_get_startoff(nextp));
459 			ep = nextp;
460 		}
461 
462 		last = *ep;
463 		i += num_recs;
464 		if (bp_release) {
465 			bp_release = 0;
466 			xfs_trans_brelse(NULL, bp);
467 		}
468 		bno = nextbno;
469 		/*
470 		 * If we've reached the end, stop.
471 		 */
472 		if (bno == NULLFSBLOCK)
473 			break;
474 
475 		bp_release = 0;
476 		bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
477 		if (!bp) {
478 			bp_release = 1;
479 			error = xfs_bmap_read_buf(mp, NULL, bno, &bp);
480 			if (xfs_metadata_is_sick(error))
481 				xfs_btree_mark_sick(cur);
482 			if (error)
483 				goto error_norelse;
484 		}
485 		block = XFS_BUF_TO_BLOCK(bp);
486 	}
487 
488 	return;
489 
490 error0:
491 	xfs_warn(mp, "%s: at error0", __func__);
492 	if (bp_release)
493 		xfs_trans_brelse(NULL, bp);
494 error_norelse:
495 	xfs_warn(mp, "%s: BAD after btree leaves for %llu extents",
496 		__func__, i);
497 	xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__);
498 	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
499 	return;
500 }
501 
502 /*
503  * Validate that the bmbt_irecs being returned from bmapi are valid
504  * given the caller's original parameters.  Specifically check the
505  * ranges of the returned irecs to ensure that they only extend beyond
506  * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
507  */
508 STATIC void
xfs_bmap_validate_ret(xfs_fileoff_t bno,xfs_filblks_t len,uint32_t flags,xfs_bmbt_irec_t * mval,int nmap,int ret_nmap)509 xfs_bmap_validate_ret(
510 	xfs_fileoff_t		bno,
511 	xfs_filblks_t		len,
512 	uint32_t		flags,
513 	xfs_bmbt_irec_t		*mval,
514 	int			nmap,
515 	int			ret_nmap)
516 {
517 	int			i;		/* index to map values */
518 
519 	ASSERT(ret_nmap <= nmap);
520 
521 	for (i = 0; i < ret_nmap; i++) {
522 		ASSERT(mval[i].br_blockcount > 0);
523 		if (!(flags & XFS_BMAPI_ENTIRE)) {
524 			ASSERT(mval[i].br_startoff >= bno);
525 			ASSERT(mval[i].br_blockcount <= len);
526 			ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
527 			       bno + len);
528 		} else {
529 			ASSERT(mval[i].br_startoff < bno + len);
530 			ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
531 			       bno);
532 		}
533 		ASSERT(i == 0 ||
534 		       mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
535 		       mval[i].br_startoff);
536 		ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
537 		       mval[i].br_startblock != HOLESTARTBLOCK);
538 		ASSERT(mval[i].br_state == XFS_EXT_NORM ||
539 		       mval[i].br_state == XFS_EXT_UNWRITTEN);
540 	}
541 }
542 
543 #else
544 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork)		do { } while (0)
545 #define	xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)	do { } while (0)
546 #endif /* DEBUG */
547 
548 /*
549  * Inode fork format manipulation functions
550  */
551 
552 /*
553  * Convert the inode format to extent format if it currently is in btree format,
554  * but the extent list is small enough that it fits into the extent format.
555  *
556  * Since the extents are already in-core, all we have to do is give up the space
557  * for the btree root and pitch the leaf block.
558  */
559 STATIC int				/* error */
xfs_bmap_btree_to_extents(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_btree_cur * cur,int * logflagsp,int whichfork)560 xfs_bmap_btree_to_extents(
561 	struct xfs_trans	*tp,	/* transaction pointer */
562 	struct xfs_inode	*ip,	/* incore inode pointer */
563 	struct xfs_btree_cur	*cur,	/* btree cursor */
564 	int			*logflagsp, /* inode logging flags */
565 	int			whichfork)  /* data or attr fork */
566 {
567 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
568 	struct xfs_mount	*mp = ip->i_mount;
569 	struct xfs_btree_block	*rblock = ifp->if_broot;
570 	struct xfs_btree_block	*cblock;/* child btree block */
571 	xfs_fsblock_t		cbno;	/* child block number */
572 	struct xfs_buf		*cbp;	/* child block's buffer */
573 	int			error;	/* error return value */
574 	__be64			*pp;	/* ptr to block address */
575 	struct xfs_owner_info	oinfo;
576 
577 	/* check if we actually need the extent format first: */
578 	if (!xfs_bmap_wants_extents(ip, whichfork))
579 		return 0;
580 
581 	ASSERT(cur);
582 	ASSERT(whichfork != XFS_COW_FORK);
583 	ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
584 	ASSERT(be16_to_cpu(rblock->bb_level) == 1);
585 	ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
586 	ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, false) == 1);
587 
588 	pp = xfs_bmap_broot_ptr_addr(mp, rblock, 1, ifp->if_broot_bytes);
589 	cbno = be64_to_cpu(*pp);
590 #ifdef DEBUG
591 	if (XFS_IS_CORRUPT(cur->bc_mp, !xfs_verify_fsbno(mp, cbno))) {
592 		xfs_btree_mark_sick(cur);
593 		return -EFSCORRUPTED;
594 	}
595 #endif
596 	error = xfs_bmap_read_buf(mp, tp, cbno, &cbp);
597 	if (xfs_metadata_is_sick(error))
598 		xfs_btree_mark_sick(cur);
599 	if (error)
600 		return error;
601 	cblock = XFS_BUF_TO_BLOCK(cbp);
602 	if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
603 		return error;
604 
605 	xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
606 	error = xfs_free_extent_later(cur->bc_tp, cbno, 1, &oinfo,
607 			XFS_AG_RESV_NONE, 0);
608 	if (error)
609 		return error;
610 
611 	ip->i_nblocks--;
612 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
613 	xfs_trans_binval(tp, cbp);
614 	if (cur->bc_levels[0].bp == cbp)
615 		cur->bc_levels[0].bp = NULL;
616 	xfs_bmap_broot_realloc(ip, whichfork, 0);
617 	ASSERT(ifp->if_broot == NULL);
618 	ifp->if_format = XFS_DINODE_FMT_EXTENTS;
619 	*logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
620 	return 0;
621 }
622 
623 /*
624  * Convert an extents-format file into a btree-format file.
625  * The new file will have a root block (in the inode) and a single child block.
626  */
627 STATIC int					/* error */
xfs_bmap_extents_to_btree(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_btree_cur ** curp,int wasdel,int * logflagsp,int whichfork)628 xfs_bmap_extents_to_btree(
629 	struct xfs_trans	*tp,		/* transaction pointer */
630 	struct xfs_inode	*ip,		/* incore inode pointer */
631 	struct xfs_btree_cur	**curp,		/* cursor returned to caller */
632 	int			wasdel,		/* converting a delayed alloc */
633 	int			*logflagsp,	/* inode logging flags */
634 	int			whichfork)	/* data or attr fork */
635 {
636 	struct xfs_btree_block	*ablock;	/* allocated (child) bt block */
637 	struct xfs_buf		*abp;		/* buffer for ablock */
638 	struct xfs_alloc_arg	args;		/* allocation arguments */
639 	struct xfs_bmbt_rec	*arp;		/* child record pointer */
640 	struct xfs_btree_block	*block;		/* btree root block */
641 	struct xfs_btree_cur	*cur;		/* bmap btree cursor */
642 	int			error;		/* error return value */
643 	struct xfs_ifork	*ifp;		/* inode fork pointer */
644 	struct xfs_bmbt_key	*kp;		/* root block key pointer */
645 	struct xfs_mount	*mp;		/* mount structure */
646 	xfs_bmbt_ptr_t		*pp;		/* root block address pointer */
647 	struct xfs_iext_cursor	icur;
648 	struct xfs_bmbt_irec	rec;
649 	xfs_extnum_t		cnt = 0;
650 
651 	mp = ip->i_mount;
652 	ASSERT(whichfork != XFS_COW_FORK);
653 	ifp = xfs_ifork_ptr(ip, whichfork);
654 	ASSERT(ifp->if_format == XFS_DINODE_FMT_EXTENTS);
655 
656 	/*
657 	 * Make space in the inode incore. This needs to be undone if we fail
658 	 * to expand the root.
659 	 */
660 	block = xfs_bmap_broot_realloc(ip, whichfork, 1);
661 
662 	/*
663 	 * Fill in the root.
664 	 */
665 	xfs_bmbt_init_block(ip, block, NULL, 1, 1);
666 	/*
667 	 * Need a cursor.  Can't allocate until bb_level is filled in.
668 	 */
669 	cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
670 	if (wasdel)
671 		cur->bc_flags |= XFS_BTREE_BMBT_WASDEL;
672 	/*
673 	 * Convert to a btree with two levels, one record in root.
674 	 */
675 	ifp->if_format = XFS_DINODE_FMT_BTREE;
676 	memset(&args, 0, sizeof(args));
677 	args.tp = tp;
678 	args.mp = mp;
679 	xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
680 
681 	args.minlen = args.maxlen = args.prod = 1;
682 	args.wasdel = wasdel;
683 	*logflagsp = 0;
684 	error = xfs_alloc_vextent_start_ag(&args,
685 				XFS_INO_TO_FSB(mp, ip->i_ino));
686 	if (error)
687 		goto out_root_realloc;
688 
689 	/*
690 	 * Allocation can't fail, the space was reserved.
691 	 */
692 	if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
693 		error = -ENOSPC;
694 		goto out_root_realloc;
695 	}
696 
697 	cur->bc_bmap.allocated++;
698 	ip->i_nblocks++;
699 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
700 	error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
701 			XFS_FSB_TO_DADDR(mp, args.fsbno),
702 			mp->m_bsize, 0, &abp);
703 	if (error)
704 		goto out_unreserve_dquot;
705 
706 	/*
707 	 * Fill in the child block.
708 	 */
709 	ablock = XFS_BUF_TO_BLOCK(abp);
710 	xfs_bmbt_init_block(ip, ablock, abp, 0, 0);
711 
712 	for_each_xfs_iext(ifp, &icur, &rec) {
713 		if (isnullstartblock(rec.br_startblock))
714 			continue;
715 		arp = xfs_bmbt_rec_addr(mp, ablock, 1 + cnt);
716 		xfs_bmbt_disk_set_all(arp, &rec);
717 		cnt++;
718 	}
719 	ASSERT(cnt == ifp->if_nextents);
720 	xfs_btree_set_numrecs(ablock, cnt);
721 
722 	/*
723 	 * Fill in the root key and pointer.
724 	 */
725 	kp = xfs_bmbt_key_addr(mp, block, 1);
726 	arp = xfs_bmbt_rec_addr(mp, ablock, 1);
727 	kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
728 	pp = xfs_bmbt_ptr_addr(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
729 						be16_to_cpu(block->bb_level)));
730 	*pp = cpu_to_be64(args.fsbno);
731 
732 	/*
733 	 * Do all this logging at the end so that
734 	 * the root is at the right level.
735 	 */
736 	xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
737 	xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
738 	ASSERT(*curp == NULL);
739 	*curp = cur;
740 	*logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
741 	return 0;
742 
743 out_unreserve_dquot:
744 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
745 out_root_realloc:
746 	xfs_bmap_broot_realloc(ip, whichfork, 0);
747 	ifp->if_format = XFS_DINODE_FMT_EXTENTS;
748 	ASSERT(ifp->if_broot == NULL);
749 	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
750 
751 	return error;
752 }
753 
754 /*
755  * Convert a local file to an extents file.
756  * This code is out of bounds for data forks of regular files,
757  * since the file data needs to get logged so things will stay consistent.
758  * (The bmap-level manipulations are ok, though).
759  */
760 void
xfs_bmap_local_to_extents_empty(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork)761 xfs_bmap_local_to_extents_empty(
762 	struct xfs_trans	*tp,
763 	struct xfs_inode	*ip,
764 	int			whichfork)
765 {
766 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
767 
768 	ASSERT(whichfork != XFS_COW_FORK);
769 	ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
770 	ASSERT(ifp->if_bytes == 0);
771 	ASSERT(ifp->if_nextents == 0);
772 
773 	xfs_bmap_forkoff_reset(ip, whichfork);
774 	ifp->if_data = NULL;
775 	ifp->if_height = 0;
776 	ifp->if_format = XFS_DINODE_FMT_EXTENTS;
777 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
778 }
779 
780 
781 int					/* error */
xfs_bmap_local_to_extents(xfs_trans_t * tp,xfs_inode_t * ip,xfs_extlen_t total,int * logflagsp,int whichfork,void (* init_fn)(struct xfs_trans * tp,struct xfs_buf * bp,struct xfs_inode * ip,struct xfs_ifork * ifp,void * priv),void * priv)782 xfs_bmap_local_to_extents(
783 	xfs_trans_t	*tp,		/* transaction pointer */
784 	xfs_inode_t	*ip,		/* incore inode pointer */
785 	xfs_extlen_t	total,		/* total blocks needed by transaction */
786 	int		*logflagsp,	/* inode logging flags */
787 	int		whichfork,
788 	void		(*init_fn)(struct xfs_trans *tp,
789 				   struct xfs_buf *bp,
790 				   struct xfs_inode *ip,
791 				   struct xfs_ifork *ifp, void *priv),
792 	void		*priv)
793 {
794 	int		error = 0;
795 	int		flags;		/* logging flags returned */
796 	struct xfs_ifork *ifp;		/* inode fork pointer */
797 	xfs_alloc_arg_t	args;		/* allocation arguments */
798 	struct xfs_buf	*bp;		/* buffer for extent block */
799 	struct xfs_bmbt_irec rec;
800 	struct xfs_iext_cursor icur;
801 
802 	/*
803 	 * We don't want to deal with the case of keeping inode data inline yet.
804 	 * So sending the data fork of a regular inode is invalid.
805 	 */
806 	ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
807 	ifp = xfs_ifork_ptr(ip, whichfork);
808 	ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
809 
810 	if (!ifp->if_bytes) {
811 		xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
812 		flags = XFS_ILOG_CORE;
813 		goto done;
814 	}
815 
816 	flags = 0;
817 	error = 0;
818 	memset(&args, 0, sizeof(args));
819 	args.tp = tp;
820 	args.mp = ip->i_mount;
821 	args.total = total;
822 	args.minlen = args.maxlen = args.prod = 1;
823 	xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
824 
825 	/*
826 	 * Allocate a block.  We know we need only one, since the
827 	 * file currently fits in an inode.
828 	 */
829 	args.total = total;
830 	args.minlen = args.maxlen = args.prod = 1;
831 	error = xfs_alloc_vextent_start_ag(&args,
832 			XFS_INO_TO_FSB(args.mp, ip->i_ino));
833 	if (error)
834 		goto done;
835 
836 	/* Can't fail, the space was reserved. */
837 	ASSERT(args.fsbno != NULLFSBLOCK);
838 	ASSERT(args.len == 1);
839 	error = xfs_trans_get_buf(tp, args.mp->m_ddev_targp,
840 			XFS_FSB_TO_DADDR(args.mp, args.fsbno),
841 			args.mp->m_bsize, 0, &bp);
842 	if (error)
843 		goto done;
844 
845 	/*
846 	 * Initialize the block, copy the data and log the remote buffer.
847 	 *
848 	 * The callout is responsible for logging because the remote format
849 	 * might differ from the local format and thus we don't know how much to
850 	 * log here. Note that init_fn must also set the buffer log item type
851 	 * correctly.
852 	 */
853 	init_fn(tp, bp, ip, ifp, priv);
854 
855 	/* account for the change in fork size */
856 	xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
857 	xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
858 	flags |= XFS_ILOG_CORE;
859 
860 	ifp->if_data = NULL;
861 	ifp->if_height = 0;
862 
863 	rec.br_startoff = 0;
864 	rec.br_startblock = args.fsbno;
865 	rec.br_blockcount = 1;
866 	rec.br_state = XFS_EXT_NORM;
867 	xfs_iext_first(ifp, &icur);
868 	xfs_iext_insert(ip, &icur, &rec, 0);
869 
870 	ifp->if_nextents = 1;
871 	ip->i_nblocks = 1;
872 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
873 	flags |= xfs_ilog_fext(whichfork);
874 
875 done:
876 	*logflagsp = flags;
877 	return error;
878 }
879 
880 /*
881  * Called from xfs_bmap_add_attrfork to handle btree format files.
882  */
883 STATIC int					/* error */
xfs_bmap_add_attrfork_btree(xfs_trans_t * tp,xfs_inode_t * ip,int * flags)884 xfs_bmap_add_attrfork_btree(
885 	xfs_trans_t		*tp,		/* transaction pointer */
886 	xfs_inode_t		*ip,		/* incore inode pointer */
887 	int			*flags)		/* inode logging flags */
888 {
889 	struct xfs_btree_block	*block = ip->i_df.if_broot;
890 	struct xfs_btree_cur	*cur;		/* btree cursor */
891 	int			error;		/* error return value */
892 	xfs_mount_t		*mp;		/* file system mount struct */
893 	int			stat;		/* newroot status */
894 
895 	mp = ip->i_mount;
896 
897 	if (xfs_bmap_bmdr_space(block) <= xfs_inode_data_fork_size(ip))
898 		*flags |= XFS_ILOG_DBROOT;
899 	else {
900 		cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
901 		error = xfs_bmbt_lookup_first(cur, &stat);
902 		if (error)
903 			goto error0;
904 		/* must be at least one entry */
905 		if (XFS_IS_CORRUPT(mp, stat != 1)) {
906 			xfs_btree_mark_sick(cur);
907 			error = -EFSCORRUPTED;
908 			goto error0;
909 		}
910 		if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
911 			goto error0;
912 		if (stat == 0) {
913 			xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
914 			return -ENOSPC;
915 		}
916 		cur->bc_bmap.allocated = 0;
917 		xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
918 	}
919 	return 0;
920 error0:
921 	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
922 	return error;
923 }
924 
925 /*
926  * Called from xfs_bmap_add_attrfork to handle extents format files.
927  */
928 STATIC int					/* error */
xfs_bmap_add_attrfork_extents(struct xfs_trans * tp,struct xfs_inode * ip,int * flags)929 xfs_bmap_add_attrfork_extents(
930 	struct xfs_trans	*tp,		/* transaction pointer */
931 	struct xfs_inode	*ip,		/* incore inode pointer */
932 	int			*flags)		/* inode logging flags */
933 {
934 	struct xfs_btree_cur	*cur;		/* bmap btree cursor */
935 	int			error;		/* error return value */
936 
937 	if (ip->i_df.if_nextents * sizeof(struct xfs_bmbt_rec) <=
938 	    xfs_inode_data_fork_size(ip))
939 		return 0;
940 	cur = NULL;
941 	error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
942 					  XFS_DATA_FORK);
943 	if (cur) {
944 		cur->bc_bmap.allocated = 0;
945 		xfs_btree_del_cursor(cur, error);
946 	}
947 	return error;
948 }
949 
950 /*
951  * Called from xfs_bmap_add_attrfork to handle local format files. Each
952  * different data fork content type needs a different callout to do the
953  * conversion. Some are basic and only require special block initialisation
954  * callouts for the data formating, others (directories) are so specialised they
955  * handle everything themselves.
956  *
957  * XXX (dgc): investigate whether directory conversion can use the generic
958  * formatting callout. It should be possible - it's just a very complex
959  * formatter.
960  */
961 STATIC int					/* error */
xfs_bmap_add_attrfork_local(struct xfs_trans * tp,struct xfs_inode * ip,int * flags)962 xfs_bmap_add_attrfork_local(
963 	struct xfs_trans	*tp,		/* transaction pointer */
964 	struct xfs_inode	*ip,		/* incore inode pointer */
965 	int			*flags)		/* inode logging flags */
966 {
967 	struct xfs_da_args	dargs;		/* args for dir/attr code */
968 
969 	if (ip->i_df.if_bytes <= xfs_inode_data_fork_size(ip))
970 		return 0;
971 
972 	if (S_ISDIR(VFS_I(ip)->i_mode)) {
973 		memset(&dargs, 0, sizeof(dargs));
974 		dargs.geo = ip->i_mount->m_dir_geo;
975 		dargs.dp = ip;
976 		dargs.total = dargs.geo->fsbcount;
977 		dargs.whichfork = XFS_DATA_FORK;
978 		dargs.trans = tp;
979 		dargs.owner = ip->i_ino;
980 		return xfs_dir2_sf_to_block(&dargs);
981 	}
982 
983 	if (S_ISLNK(VFS_I(ip)->i_mode))
984 		return xfs_bmap_local_to_extents(tp, ip, 1, flags,
985 				XFS_DATA_FORK, xfs_symlink_local_to_remote,
986 				NULL);
987 
988 	/* should only be called for types that support local format data */
989 	ASSERT(0);
990 	xfs_bmap_mark_sick(ip, XFS_ATTR_FORK);
991 	return -EFSCORRUPTED;
992 }
993 
994 /*
995  * Set an inode attr fork offset based on the format of the data fork.
996  */
997 static int
xfs_bmap_set_attrforkoff(struct xfs_inode * ip,int size,int * version)998 xfs_bmap_set_attrforkoff(
999 	struct xfs_inode	*ip,
1000 	int			size,
1001 	int			*version)
1002 {
1003 	int			default_size = xfs_default_attroffset(ip) >> 3;
1004 
1005 	switch (ip->i_df.if_format) {
1006 	case XFS_DINODE_FMT_DEV:
1007 		ip->i_forkoff = default_size;
1008 		break;
1009 	case XFS_DINODE_FMT_LOCAL:
1010 	case XFS_DINODE_FMT_EXTENTS:
1011 	case XFS_DINODE_FMT_BTREE:
1012 		ip->i_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1013 		if (!ip->i_forkoff)
1014 			ip->i_forkoff = default_size;
1015 		else if (xfs_has_attr2(ip->i_mount) && version)
1016 			*version = 2;
1017 		break;
1018 	default:
1019 		ASSERT(0);
1020 		return -EINVAL;
1021 	}
1022 
1023 	return 0;
1024 }
1025 
1026 /*
1027  * Convert inode from non-attributed to attributed.  Caller must hold the
1028  * ILOCK_EXCL and the file cannot have an attr fork.
1029  */
1030 int						/* error code */
xfs_bmap_add_attrfork(struct xfs_trans * tp,struct xfs_inode * ip,int size,int rsvd)1031 xfs_bmap_add_attrfork(
1032 	struct xfs_trans	*tp,
1033 	struct xfs_inode	*ip,		/* incore inode pointer */
1034 	int			size,		/* space new attribute needs */
1035 	int			rsvd)		/* xact may use reserved blks */
1036 {
1037 	struct xfs_mount	*mp = tp->t_mountp;
1038 	int			version = 1;	/* superblock attr version */
1039 	int			logflags;	/* logging flags */
1040 	int			error;		/* error return value */
1041 
1042 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1043 	if (!xfs_is_metadir_inode(ip))
1044 		ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1045 	ASSERT(!xfs_inode_has_attr_fork(ip));
1046 
1047 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1048 	error = xfs_bmap_set_attrforkoff(ip, size, &version);
1049 	if (error)
1050 		return error;
1051 
1052 	xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0);
1053 	logflags = 0;
1054 	switch (ip->i_df.if_format) {
1055 	case XFS_DINODE_FMT_LOCAL:
1056 		error = xfs_bmap_add_attrfork_local(tp, ip, &logflags);
1057 		break;
1058 	case XFS_DINODE_FMT_EXTENTS:
1059 		error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags);
1060 		break;
1061 	case XFS_DINODE_FMT_BTREE:
1062 		error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags);
1063 		break;
1064 	default:
1065 		error = 0;
1066 		break;
1067 	}
1068 	if (logflags)
1069 		xfs_trans_log_inode(tp, ip, logflags);
1070 	if (error)
1071 		return error;
1072 	if (!xfs_has_attr(mp) ||
1073 	   (!xfs_has_attr2(mp) && version == 2)) {
1074 		bool log_sb = false;
1075 
1076 		spin_lock(&mp->m_sb_lock);
1077 		if (!xfs_has_attr(mp)) {
1078 			xfs_add_attr(mp);
1079 			log_sb = true;
1080 		}
1081 		if (!xfs_has_attr2(mp) && version == 2) {
1082 			xfs_add_attr2(mp);
1083 			log_sb = true;
1084 		}
1085 		spin_unlock(&mp->m_sb_lock);
1086 		if (log_sb)
1087 			xfs_log_sb(tp);
1088 	}
1089 
1090 	return 0;
1091 }
1092 
1093 /*
1094  * Internal and external extent tree search functions.
1095  */
1096 
1097 struct xfs_iread_state {
1098 	struct xfs_iext_cursor	icur;
1099 	xfs_extnum_t		loaded;
1100 };
1101 
1102 int
xfs_bmap_complain_bad_rec(struct xfs_inode * ip,int whichfork,xfs_failaddr_t fa,const struct xfs_bmbt_irec * irec)1103 xfs_bmap_complain_bad_rec(
1104 	struct xfs_inode		*ip,
1105 	int				whichfork,
1106 	xfs_failaddr_t			fa,
1107 	const struct xfs_bmbt_irec	*irec)
1108 {
1109 	struct xfs_mount		*mp = ip->i_mount;
1110 	const char			*forkname;
1111 
1112 	switch (whichfork) {
1113 	case XFS_DATA_FORK:	forkname = "data"; break;
1114 	case XFS_ATTR_FORK:	forkname = "attr"; break;
1115 	case XFS_COW_FORK:	forkname = "CoW"; break;
1116 	default:		forkname = "???"; break;
1117 	}
1118 
1119 	xfs_warn(mp,
1120  "Bmap BTree record corruption in inode 0x%llx %s fork detected at %pS!",
1121 				ip->i_ino, forkname, fa);
1122 	xfs_warn(mp,
1123 		"Offset 0x%llx, start block 0x%llx, block count 0x%llx state 0x%x",
1124 		irec->br_startoff, irec->br_startblock, irec->br_blockcount,
1125 		irec->br_state);
1126 
1127 	return -EFSCORRUPTED;
1128 }
1129 
1130 /* Stuff every bmbt record from this block into the incore extent map. */
1131 static int
xfs_iread_bmbt_block(struct xfs_btree_cur * cur,int level,void * priv)1132 xfs_iread_bmbt_block(
1133 	struct xfs_btree_cur	*cur,
1134 	int			level,
1135 	void			*priv)
1136 {
1137 	struct xfs_iread_state	*ir = priv;
1138 	struct xfs_mount	*mp = cur->bc_mp;
1139 	struct xfs_inode	*ip = cur->bc_ino.ip;
1140 	struct xfs_btree_block	*block;
1141 	struct xfs_buf		*bp;
1142 	struct xfs_bmbt_rec	*frp;
1143 	xfs_extnum_t		num_recs;
1144 	xfs_extnum_t		j;
1145 	int			whichfork = cur->bc_ino.whichfork;
1146 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1147 
1148 	block = xfs_btree_get_block(cur, level, &bp);
1149 
1150 	/* Abort if we find more records than nextents. */
1151 	num_recs = xfs_btree_get_numrecs(block);
1152 	if (unlikely(ir->loaded + num_recs > ifp->if_nextents)) {
1153 		xfs_warn(ip->i_mount, "corrupt dinode %llu, (btree extents).",
1154 				(unsigned long long)ip->i_ino);
1155 		xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, block,
1156 				sizeof(*block), __this_address);
1157 		xfs_bmap_mark_sick(ip, whichfork);
1158 		return -EFSCORRUPTED;
1159 	}
1160 
1161 	/* Copy records into the incore cache. */
1162 	frp = xfs_bmbt_rec_addr(mp, block, 1);
1163 	for (j = 0; j < num_recs; j++, frp++, ir->loaded++) {
1164 		struct xfs_bmbt_irec	new;
1165 		xfs_failaddr_t		fa;
1166 
1167 		xfs_bmbt_disk_get_all(frp, &new);
1168 		fa = xfs_bmap_validate_extent(ip, whichfork, &new);
1169 		if (fa) {
1170 			xfs_inode_verifier_error(ip, -EFSCORRUPTED,
1171 					"xfs_iread_extents(2)", frp,
1172 					sizeof(*frp), fa);
1173 			xfs_bmap_mark_sick(ip, whichfork);
1174 			return xfs_bmap_complain_bad_rec(ip, whichfork, fa,
1175 					&new);
1176 		}
1177 		xfs_iext_insert(ip, &ir->icur, &new,
1178 				xfs_bmap_fork_to_state(whichfork));
1179 		trace_xfs_read_extent(ip, &ir->icur,
1180 				xfs_bmap_fork_to_state(whichfork), _THIS_IP_);
1181 		xfs_iext_next(ifp, &ir->icur);
1182 	}
1183 
1184 	return 0;
1185 }
1186 
1187 /*
1188  * Read in extents from a btree-format inode.
1189  */
1190 int
xfs_iread_extents(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork)1191 xfs_iread_extents(
1192 	struct xfs_trans	*tp,
1193 	struct xfs_inode	*ip,
1194 	int			whichfork)
1195 {
1196 	struct xfs_iread_state	ir;
1197 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1198 	struct xfs_mount	*mp = ip->i_mount;
1199 	struct xfs_btree_cur	*cur;
1200 	int			error;
1201 
1202 	if (!xfs_need_iread_extents(ifp))
1203 		return 0;
1204 
1205 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1206 
1207 	ir.loaded = 0;
1208 	xfs_iext_first(ifp, &ir.icur);
1209 	cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
1210 	error = xfs_btree_visit_blocks(cur, xfs_iread_bmbt_block,
1211 			XFS_BTREE_VISIT_RECORDS, &ir);
1212 	xfs_btree_del_cursor(cur, error);
1213 	if (error)
1214 		goto out;
1215 
1216 	if (XFS_IS_CORRUPT(mp, ir.loaded != ifp->if_nextents)) {
1217 		xfs_bmap_mark_sick(ip, whichfork);
1218 		error = -EFSCORRUPTED;
1219 		goto out;
1220 	}
1221 	ASSERT(ir.loaded == xfs_iext_count(ifp));
1222 	/*
1223 	 * Use release semantics so that we can use acquire semantics in
1224 	 * xfs_need_iread_extents and be guaranteed to see a valid mapping tree
1225 	 * after that load.
1226 	 */
1227 	smp_store_release(&ifp->if_needextents, 0);
1228 	return 0;
1229 out:
1230 	if (xfs_metadata_is_sick(error))
1231 		xfs_bmap_mark_sick(ip, whichfork);
1232 	xfs_iext_destroy(ifp);
1233 	return error;
1234 }
1235 
1236 /*
1237  * Returns the relative block number of the first unused block(s) in the given
1238  * fork with at least "len" logically contiguous blocks free.  This is the
1239  * lowest-address hole if the fork has holes, else the first block past the end
1240  * of fork.  Return 0 if the fork is currently local (in-inode).
1241  */
1242 int						/* error */
xfs_bmap_first_unused(struct xfs_trans * tp,struct xfs_inode * ip,xfs_extlen_t len,xfs_fileoff_t * first_unused,int whichfork)1243 xfs_bmap_first_unused(
1244 	struct xfs_trans	*tp,		/* transaction pointer */
1245 	struct xfs_inode	*ip,		/* incore inode */
1246 	xfs_extlen_t		len,		/* size of hole to find */
1247 	xfs_fileoff_t		*first_unused,	/* unused block */
1248 	int			whichfork)	/* data or attr fork */
1249 {
1250 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1251 	struct xfs_bmbt_irec	got;
1252 	struct xfs_iext_cursor	icur;
1253 	xfs_fileoff_t		lastaddr = 0;
1254 	xfs_fileoff_t		lowest, max;
1255 	int			error;
1256 
1257 	if (ifp->if_format == XFS_DINODE_FMT_LOCAL) {
1258 		*first_unused = 0;
1259 		return 0;
1260 	}
1261 
1262 	ASSERT(xfs_ifork_has_extents(ifp));
1263 
1264 	error = xfs_iread_extents(tp, ip, whichfork);
1265 	if (error)
1266 		return error;
1267 
1268 	lowest = max = *first_unused;
1269 	for_each_xfs_iext(ifp, &icur, &got) {
1270 		/*
1271 		 * See if the hole before this extent will work.
1272 		 */
1273 		if (got.br_startoff >= lowest + len &&
1274 		    got.br_startoff - max >= len)
1275 			break;
1276 		lastaddr = got.br_startoff + got.br_blockcount;
1277 		max = XFS_FILEOFF_MAX(lastaddr, lowest);
1278 	}
1279 
1280 	*first_unused = max;
1281 	return 0;
1282 }
1283 
1284 /*
1285  * Returns the file-relative block number of the last block - 1 before
1286  * last_block (input value) in the file.
1287  * This is not based on i_size, it is based on the extent records.
1288  * Returns 0 for local files, as they do not have extent records.
1289  */
1290 int						/* error */
xfs_bmap_last_before(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t * last_block,int whichfork)1291 xfs_bmap_last_before(
1292 	struct xfs_trans	*tp,		/* transaction pointer */
1293 	struct xfs_inode	*ip,		/* incore inode */
1294 	xfs_fileoff_t		*last_block,	/* last block */
1295 	int			whichfork)	/* data or attr fork */
1296 {
1297 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1298 	struct xfs_bmbt_irec	got;
1299 	struct xfs_iext_cursor	icur;
1300 	int			error;
1301 
1302 	switch (ifp->if_format) {
1303 	case XFS_DINODE_FMT_LOCAL:
1304 		*last_block = 0;
1305 		return 0;
1306 	case XFS_DINODE_FMT_BTREE:
1307 	case XFS_DINODE_FMT_EXTENTS:
1308 		break;
1309 	default:
1310 		ASSERT(0);
1311 		xfs_bmap_mark_sick(ip, whichfork);
1312 		return -EFSCORRUPTED;
1313 	}
1314 
1315 	error = xfs_iread_extents(tp, ip, whichfork);
1316 	if (error)
1317 		return error;
1318 
1319 	if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got))
1320 		*last_block = 0;
1321 	return 0;
1322 }
1323 
1324 int
xfs_bmap_last_extent(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * rec,int * is_empty)1325 xfs_bmap_last_extent(
1326 	struct xfs_trans	*tp,
1327 	struct xfs_inode	*ip,
1328 	int			whichfork,
1329 	struct xfs_bmbt_irec	*rec,
1330 	int			*is_empty)
1331 {
1332 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1333 	struct xfs_iext_cursor	icur;
1334 	int			error;
1335 
1336 	error = xfs_iread_extents(tp, ip, whichfork);
1337 	if (error)
1338 		return error;
1339 
1340 	xfs_iext_last(ifp, &icur);
1341 	if (!xfs_iext_get_extent(ifp, &icur, rec))
1342 		*is_empty = 1;
1343 	else
1344 		*is_empty = 0;
1345 	return 0;
1346 }
1347 
1348 /*
1349  * Check the last inode extent to determine whether this allocation will result
1350  * in blocks being allocated at the end of the file. When we allocate new data
1351  * blocks at the end of the file which do not start at the previous data block,
1352  * we will try to align the new blocks at stripe unit boundaries.
1353  *
1354  * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1355  * at, or past the EOF.
1356  */
1357 STATIC int
xfs_bmap_isaeof(struct xfs_bmalloca * bma,int whichfork)1358 xfs_bmap_isaeof(
1359 	struct xfs_bmalloca	*bma,
1360 	int			whichfork)
1361 {
1362 	struct xfs_bmbt_irec	rec;
1363 	int			is_empty;
1364 	int			error;
1365 
1366 	bma->aeof = false;
1367 	error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1368 				     &is_empty);
1369 	if (error)
1370 		return error;
1371 
1372 	if (is_empty) {
1373 		bma->aeof = true;
1374 		return 0;
1375 	}
1376 
1377 	/*
1378 	 * Check if we are allocation or past the last extent, or at least into
1379 	 * the last delayed allocated extent.
1380 	 */
1381 	bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1382 		(bma->offset >= rec.br_startoff &&
1383 		 isnullstartblock(rec.br_startblock));
1384 	return 0;
1385 }
1386 
1387 /*
1388  * Returns the file-relative block number of the first block past eof in
1389  * the file.  This is not based on i_size, it is based on the extent records.
1390  * Returns 0 for local files, as they do not have extent records.
1391  */
1392 int
xfs_bmap_last_offset(struct xfs_inode * ip,xfs_fileoff_t * last_block,int whichfork)1393 xfs_bmap_last_offset(
1394 	struct xfs_inode	*ip,
1395 	xfs_fileoff_t		*last_block,
1396 	int			whichfork)
1397 {
1398 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1399 	struct xfs_bmbt_irec	rec;
1400 	int			is_empty;
1401 	int			error;
1402 
1403 	*last_block = 0;
1404 
1405 	if (ifp->if_format == XFS_DINODE_FMT_LOCAL)
1406 		return 0;
1407 
1408 	if (XFS_IS_CORRUPT(ip->i_mount, !xfs_ifork_has_extents(ifp))) {
1409 		xfs_bmap_mark_sick(ip, whichfork);
1410 		return -EFSCORRUPTED;
1411 	}
1412 
1413 	error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1414 	if (error || is_empty)
1415 		return error;
1416 
1417 	*last_block = rec.br_startoff + rec.br_blockcount;
1418 	return 0;
1419 }
1420 
1421 /*
1422  * Extent tree manipulation functions used during allocation.
1423  */
1424 
1425 static inline bool
xfs_bmap_same_rtgroup(struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * left,struct xfs_bmbt_irec * right)1426 xfs_bmap_same_rtgroup(
1427 	struct xfs_inode	*ip,
1428 	int			whichfork,
1429 	struct xfs_bmbt_irec	*left,
1430 	struct xfs_bmbt_irec	*right)
1431 {
1432 	struct xfs_mount	*mp = ip->i_mount;
1433 
1434 	if (xfs_ifork_is_realtime(ip, whichfork) && xfs_has_rtgroups(mp)) {
1435 		if (xfs_rtb_to_rgno(mp, left->br_startblock) !=
1436 		    xfs_rtb_to_rgno(mp, right->br_startblock))
1437 			return false;
1438 	}
1439 
1440 	return true;
1441 }
1442 
1443 /*
1444  * Convert a delayed allocation to a real allocation.
1445  */
1446 STATIC int				/* error */
xfs_bmap_add_extent_delay_real(struct xfs_bmalloca * bma,int whichfork)1447 xfs_bmap_add_extent_delay_real(
1448 	struct xfs_bmalloca	*bma,
1449 	int			whichfork)
1450 {
1451 	struct xfs_mount	*mp = bma->ip->i_mount;
1452 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
1453 	struct xfs_bmbt_irec	*new = &bma->got;
1454 	int			error;	/* error return value */
1455 	int			i;	/* temp state */
1456 	xfs_fileoff_t		new_endoff;	/* end offset of new entry */
1457 	xfs_bmbt_irec_t		r[3];	/* neighbor extent entries */
1458 					/* left is 0, right is 1, prev is 2 */
1459 	int			rval=0;	/* return value (logging flags) */
1460 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
1461 	xfs_filblks_t		da_new; /* new count del alloc blocks used */
1462 	xfs_filblks_t		da_old; /* old count del alloc blocks used */
1463 	xfs_filblks_t		temp=0;	/* value for da_new calculations */
1464 	int			tmp_rval;	/* partial logging flags */
1465 	struct xfs_bmbt_irec	old;
1466 
1467 	ASSERT(whichfork != XFS_ATTR_FORK);
1468 	ASSERT(!isnullstartblock(new->br_startblock));
1469 	ASSERT(!bma->cur || (bma->cur->bc_flags & XFS_BTREE_BMBT_WASDEL));
1470 
1471 	XFS_STATS_INC(mp, xs_add_exlist);
1472 
1473 #define	LEFT		r[0]
1474 #define	RIGHT		r[1]
1475 #define	PREV		r[2]
1476 
1477 	/*
1478 	 * Set up a bunch of variables to make the tests simpler.
1479 	 */
1480 	xfs_iext_get_extent(ifp, &bma->icur, &PREV);
1481 	new_endoff = new->br_startoff + new->br_blockcount;
1482 	ASSERT(isnullstartblock(PREV.br_startblock));
1483 	ASSERT(PREV.br_startoff <= new->br_startoff);
1484 	ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1485 
1486 	da_old = startblockval(PREV.br_startblock);
1487 	da_new = 0;
1488 
1489 	/*
1490 	 * Set flags determining what part of the previous delayed allocation
1491 	 * extent is being replaced by a real allocation.
1492 	 */
1493 	if (PREV.br_startoff == new->br_startoff)
1494 		state |= BMAP_LEFT_FILLING;
1495 	if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1496 		state |= BMAP_RIGHT_FILLING;
1497 
1498 	/*
1499 	 * Check and set flags if this segment has a left neighbor.
1500 	 * Don't set contiguous if the combined extent would be too large.
1501 	 */
1502 	if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) {
1503 		state |= BMAP_LEFT_VALID;
1504 		if (isnullstartblock(LEFT.br_startblock))
1505 			state |= BMAP_LEFT_DELAY;
1506 	}
1507 
1508 	if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1509 	    LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1510 	    LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1511 	    LEFT.br_state == new->br_state &&
1512 	    LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
1513 	    xfs_bmap_same_rtgroup(bma->ip, whichfork, &LEFT, new))
1514 		state |= BMAP_LEFT_CONTIG;
1515 
1516 	/*
1517 	 * Check and set flags if this segment has a right neighbor.
1518 	 * Don't set contiguous if the combined extent would be too large.
1519 	 * Also check for all-three-contiguous being too large.
1520 	 */
1521 	if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) {
1522 		state |= BMAP_RIGHT_VALID;
1523 		if (isnullstartblock(RIGHT.br_startblock))
1524 			state |= BMAP_RIGHT_DELAY;
1525 	}
1526 
1527 	if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1528 	    new_endoff == RIGHT.br_startoff &&
1529 	    new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1530 	    new->br_state == RIGHT.br_state &&
1531 	    new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
1532 	    ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1533 		       BMAP_RIGHT_FILLING)) !=
1534 		      (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1535 		       BMAP_RIGHT_FILLING) ||
1536 	     LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1537 			<= XFS_MAX_BMBT_EXTLEN) &&
1538 	    xfs_bmap_same_rtgroup(bma->ip, whichfork, new, &RIGHT))
1539 		state |= BMAP_RIGHT_CONTIG;
1540 
1541 	error = 0;
1542 	/*
1543 	 * Switch out based on the FILLING and CONTIG state bits.
1544 	 */
1545 	switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1546 			 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1547 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1548 	     BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1549 		/*
1550 		 * Filling in all of a previously delayed allocation extent.
1551 		 * The left and right neighbors are both contiguous with new.
1552 		 */
1553 		LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
1554 
1555 		xfs_iext_remove(bma->ip, &bma->icur, state);
1556 		xfs_iext_remove(bma->ip, &bma->icur, state);
1557 		xfs_iext_prev(ifp, &bma->icur);
1558 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1559 		ifp->if_nextents--;
1560 
1561 		if (bma->cur == NULL)
1562 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1563 		else {
1564 			rval = XFS_ILOG_CORE;
1565 			error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1566 			if (error)
1567 				goto done;
1568 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1569 				xfs_btree_mark_sick(bma->cur);
1570 				error = -EFSCORRUPTED;
1571 				goto done;
1572 			}
1573 			error = xfs_btree_delete(bma->cur, &i);
1574 			if (error)
1575 				goto done;
1576 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1577 				xfs_btree_mark_sick(bma->cur);
1578 				error = -EFSCORRUPTED;
1579 				goto done;
1580 			}
1581 			error = xfs_btree_decrement(bma->cur, 0, &i);
1582 			if (error)
1583 				goto done;
1584 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1585 				xfs_btree_mark_sick(bma->cur);
1586 				error = -EFSCORRUPTED;
1587 				goto done;
1588 			}
1589 			error = xfs_bmbt_update(bma->cur, &LEFT);
1590 			if (error)
1591 				goto done;
1592 		}
1593 		ASSERT(da_new <= da_old);
1594 		break;
1595 
1596 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1597 		/*
1598 		 * Filling in all of a previously delayed allocation extent.
1599 		 * The left neighbor is contiguous, the right is not.
1600 		 */
1601 		old = LEFT;
1602 		LEFT.br_blockcount += PREV.br_blockcount;
1603 
1604 		xfs_iext_remove(bma->ip, &bma->icur, state);
1605 		xfs_iext_prev(ifp, &bma->icur);
1606 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1607 
1608 		if (bma->cur == NULL)
1609 			rval = XFS_ILOG_DEXT;
1610 		else {
1611 			rval = 0;
1612 			error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1613 			if (error)
1614 				goto done;
1615 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1616 				xfs_btree_mark_sick(bma->cur);
1617 				error = -EFSCORRUPTED;
1618 				goto done;
1619 			}
1620 			error = xfs_bmbt_update(bma->cur, &LEFT);
1621 			if (error)
1622 				goto done;
1623 		}
1624 		ASSERT(da_new <= da_old);
1625 		break;
1626 
1627 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1628 		/*
1629 		 * Filling in all of a previously delayed allocation extent.
1630 		 * The right neighbor is contiguous, the left is not. Take care
1631 		 * with delay -> unwritten extent allocation here because the
1632 		 * delalloc record we are overwriting is always written.
1633 		 */
1634 		PREV.br_startblock = new->br_startblock;
1635 		PREV.br_blockcount += RIGHT.br_blockcount;
1636 		PREV.br_state = new->br_state;
1637 
1638 		xfs_iext_next(ifp, &bma->icur);
1639 		xfs_iext_remove(bma->ip, &bma->icur, state);
1640 		xfs_iext_prev(ifp, &bma->icur);
1641 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1642 
1643 		if (bma->cur == NULL)
1644 			rval = XFS_ILOG_DEXT;
1645 		else {
1646 			rval = 0;
1647 			error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1648 			if (error)
1649 				goto done;
1650 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1651 				xfs_btree_mark_sick(bma->cur);
1652 				error = -EFSCORRUPTED;
1653 				goto done;
1654 			}
1655 			error = xfs_bmbt_update(bma->cur, &PREV);
1656 			if (error)
1657 				goto done;
1658 		}
1659 		ASSERT(da_new <= da_old);
1660 		break;
1661 
1662 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1663 		/*
1664 		 * Filling in all of a previously delayed allocation extent.
1665 		 * Neither the left nor right neighbors are contiguous with
1666 		 * the new one.
1667 		 */
1668 		PREV.br_startblock = new->br_startblock;
1669 		PREV.br_state = new->br_state;
1670 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1671 		ifp->if_nextents++;
1672 
1673 		if (bma->cur == NULL)
1674 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1675 		else {
1676 			rval = XFS_ILOG_CORE;
1677 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1678 			if (error)
1679 				goto done;
1680 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1681 				xfs_btree_mark_sick(bma->cur);
1682 				error = -EFSCORRUPTED;
1683 				goto done;
1684 			}
1685 			error = xfs_btree_insert(bma->cur, &i);
1686 			if (error)
1687 				goto done;
1688 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1689 				xfs_btree_mark_sick(bma->cur);
1690 				error = -EFSCORRUPTED;
1691 				goto done;
1692 			}
1693 		}
1694 		ASSERT(da_new <= da_old);
1695 		break;
1696 
1697 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1698 		/*
1699 		 * Filling in the first part of a previous delayed allocation.
1700 		 * The left neighbor is contiguous.
1701 		 */
1702 		old = LEFT;
1703 		temp = PREV.br_blockcount - new->br_blockcount;
1704 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1705 				startblockval(PREV.br_startblock));
1706 
1707 		LEFT.br_blockcount += new->br_blockcount;
1708 
1709 		PREV.br_blockcount = temp;
1710 		PREV.br_startoff += new->br_blockcount;
1711 		PREV.br_startblock = nullstartblock(da_new);
1712 
1713 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1714 		xfs_iext_prev(ifp, &bma->icur);
1715 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1716 
1717 		if (bma->cur == NULL)
1718 			rval = XFS_ILOG_DEXT;
1719 		else {
1720 			rval = 0;
1721 			error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1722 			if (error)
1723 				goto done;
1724 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1725 				xfs_btree_mark_sick(bma->cur);
1726 				error = -EFSCORRUPTED;
1727 				goto done;
1728 			}
1729 			error = xfs_bmbt_update(bma->cur, &LEFT);
1730 			if (error)
1731 				goto done;
1732 		}
1733 		ASSERT(da_new <= da_old);
1734 		break;
1735 
1736 	case BMAP_LEFT_FILLING:
1737 		/*
1738 		 * Filling in the first part of a previous delayed allocation.
1739 		 * The left neighbor is not contiguous.
1740 		 */
1741 		xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1742 		ifp->if_nextents++;
1743 
1744 		if (bma->cur == NULL)
1745 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1746 		else {
1747 			rval = XFS_ILOG_CORE;
1748 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1749 			if (error)
1750 				goto done;
1751 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1752 				xfs_btree_mark_sick(bma->cur);
1753 				error = -EFSCORRUPTED;
1754 				goto done;
1755 			}
1756 			error = xfs_btree_insert(bma->cur, &i);
1757 			if (error)
1758 				goto done;
1759 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1760 				xfs_btree_mark_sick(bma->cur);
1761 				error = -EFSCORRUPTED;
1762 				goto done;
1763 			}
1764 		}
1765 
1766 		if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1767 			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1768 					&bma->cur, 1, &tmp_rval, whichfork);
1769 			rval |= tmp_rval;
1770 			if (error)
1771 				goto done;
1772 		}
1773 
1774 		temp = PREV.br_blockcount - new->br_blockcount;
1775 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1776 			startblockval(PREV.br_startblock) -
1777 			(bma->cur ? bma->cur->bc_bmap.allocated : 0));
1778 
1779 		PREV.br_startoff = new_endoff;
1780 		PREV.br_blockcount = temp;
1781 		PREV.br_startblock = nullstartblock(da_new);
1782 		xfs_iext_next(ifp, &bma->icur);
1783 		xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1784 		xfs_iext_prev(ifp, &bma->icur);
1785 		break;
1786 
1787 	case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1788 		/*
1789 		 * Filling in the last part of a previous delayed allocation.
1790 		 * The right neighbor is contiguous with the new allocation.
1791 		 */
1792 		old = RIGHT;
1793 		RIGHT.br_startoff = new->br_startoff;
1794 		RIGHT.br_startblock = new->br_startblock;
1795 		RIGHT.br_blockcount += new->br_blockcount;
1796 
1797 		if (bma->cur == NULL)
1798 			rval = XFS_ILOG_DEXT;
1799 		else {
1800 			rval = 0;
1801 			error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1802 			if (error)
1803 				goto done;
1804 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1805 				xfs_btree_mark_sick(bma->cur);
1806 				error = -EFSCORRUPTED;
1807 				goto done;
1808 			}
1809 			error = xfs_bmbt_update(bma->cur, &RIGHT);
1810 			if (error)
1811 				goto done;
1812 		}
1813 
1814 		temp = PREV.br_blockcount - new->br_blockcount;
1815 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1816 			startblockval(PREV.br_startblock));
1817 
1818 		PREV.br_blockcount = temp;
1819 		PREV.br_startblock = nullstartblock(da_new);
1820 
1821 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1822 		xfs_iext_next(ifp, &bma->icur);
1823 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT);
1824 		ASSERT(da_new <= da_old);
1825 		break;
1826 
1827 	case BMAP_RIGHT_FILLING:
1828 		/*
1829 		 * Filling in the last part of a previous delayed allocation.
1830 		 * The right neighbor is not contiguous.
1831 		 */
1832 		xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1833 		ifp->if_nextents++;
1834 
1835 		if (bma->cur == NULL)
1836 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1837 		else {
1838 			rval = XFS_ILOG_CORE;
1839 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1840 			if (error)
1841 				goto done;
1842 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1843 				xfs_btree_mark_sick(bma->cur);
1844 				error = -EFSCORRUPTED;
1845 				goto done;
1846 			}
1847 			error = xfs_btree_insert(bma->cur, &i);
1848 			if (error)
1849 				goto done;
1850 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1851 				xfs_btree_mark_sick(bma->cur);
1852 				error = -EFSCORRUPTED;
1853 				goto done;
1854 			}
1855 		}
1856 
1857 		if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1858 			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1859 				&bma->cur, 1, &tmp_rval, whichfork);
1860 			rval |= tmp_rval;
1861 			if (error)
1862 				goto done;
1863 		}
1864 
1865 		temp = PREV.br_blockcount - new->br_blockcount;
1866 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1867 			startblockval(PREV.br_startblock) -
1868 			(bma->cur ? bma->cur->bc_bmap.allocated : 0));
1869 
1870 		PREV.br_startblock = nullstartblock(da_new);
1871 		PREV.br_blockcount = temp;
1872 		xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1873 		xfs_iext_next(ifp, &bma->icur);
1874 		ASSERT(da_new <= da_old);
1875 		break;
1876 
1877 	case 0:
1878 		/*
1879 		 * Filling in the middle part of a previous delayed allocation.
1880 		 * Contiguity is impossible here.
1881 		 * This case is avoided almost all the time.
1882 		 *
1883 		 * We start with a delayed allocation:
1884 		 *
1885 		 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
1886 		 *  PREV @ idx
1887 		 *
1888 	         * and we are allocating:
1889 		 *                     +rrrrrrrrrrrrrrrrr+
1890 		 *			      new
1891 		 *
1892 		 * and we set it up for insertion as:
1893 		 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
1894 		 *                            new
1895 		 *  PREV @ idx          LEFT              RIGHT
1896 		 *                      inserted at idx + 1
1897 		 */
1898 		old = PREV;
1899 
1900 		/* LEFT is the new middle */
1901 		LEFT = *new;
1902 
1903 		/* RIGHT is the new right */
1904 		RIGHT.br_state = PREV.br_state;
1905 		RIGHT.br_startoff = new_endoff;
1906 		RIGHT.br_blockcount =
1907 			PREV.br_startoff + PREV.br_blockcount - new_endoff;
1908 		RIGHT.br_startblock =
1909 			nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1910 					RIGHT.br_blockcount));
1911 
1912 		/* truncate PREV */
1913 		PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
1914 		PREV.br_startblock =
1915 			nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1916 					PREV.br_blockcount));
1917 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1918 
1919 		xfs_iext_next(ifp, &bma->icur);
1920 		xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state);
1921 		xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state);
1922 		ifp->if_nextents++;
1923 
1924 		if (bma->cur == NULL)
1925 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1926 		else {
1927 			rval = XFS_ILOG_CORE;
1928 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1929 			if (error)
1930 				goto done;
1931 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1932 				xfs_btree_mark_sick(bma->cur);
1933 				error = -EFSCORRUPTED;
1934 				goto done;
1935 			}
1936 			error = xfs_btree_insert(bma->cur, &i);
1937 			if (error)
1938 				goto done;
1939 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1940 				xfs_btree_mark_sick(bma->cur);
1941 				error = -EFSCORRUPTED;
1942 				goto done;
1943 			}
1944 		}
1945 
1946 		if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1947 			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1948 					&bma->cur, 1, &tmp_rval, whichfork);
1949 			rval |= tmp_rval;
1950 			if (error)
1951 				goto done;
1952 		}
1953 
1954 		da_new = startblockval(PREV.br_startblock) +
1955 			 startblockval(RIGHT.br_startblock);
1956 		break;
1957 
1958 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1959 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1960 	case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1961 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1962 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1963 	case BMAP_LEFT_CONTIG:
1964 	case BMAP_RIGHT_CONTIG:
1965 		/*
1966 		 * These cases are all impossible.
1967 		 */
1968 		ASSERT(0);
1969 	}
1970 
1971 	/* add reverse mapping unless caller opted out */
1972 	if (!(bma->flags & XFS_BMAPI_NORMAP))
1973 		xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new);
1974 
1975 	/* convert to a btree if necessary */
1976 	if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1977 		int	tmp_logflags;	/* partial log flag return val */
1978 
1979 		ASSERT(bma->cur == NULL);
1980 		error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1981 				&bma->cur, da_old > 0, &tmp_logflags,
1982 				whichfork);
1983 		bma->logflags |= tmp_logflags;
1984 		if (error)
1985 			goto done;
1986 	}
1987 
1988 	if (da_new != da_old)
1989 		xfs_mod_delalloc(bma->ip, 0, (int64_t)da_new - da_old);
1990 
1991 	if (bma->cur) {
1992 		da_new += bma->cur->bc_bmap.allocated;
1993 		bma->cur->bc_bmap.allocated = 0;
1994 	}
1995 
1996 	/* adjust for changes in reserved delayed indirect blocks */
1997 	if (da_new < da_old)
1998 		xfs_add_fdblocks(mp, da_old - da_new);
1999 	else if (da_new > da_old)
2000 		error = xfs_dec_fdblocks(mp, da_new - da_old, true);
2001 
2002 	xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
2003 done:
2004 	if (whichfork != XFS_COW_FORK)
2005 		bma->logflags |= rval;
2006 	return error;
2007 #undef	LEFT
2008 #undef	RIGHT
2009 #undef	PREV
2010 }
2011 
2012 /*
2013  * Convert an unwritten allocation to a real allocation or vice versa.
2014  */
2015 int					/* error */
xfs_bmap_add_extent_unwritten_real(struct xfs_trans * tp,xfs_inode_t * ip,int whichfork,struct xfs_iext_cursor * icur,struct xfs_btree_cur ** curp,xfs_bmbt_irec_t * new,int * logflagsp)2016 xfs_bmap_add_extent_unwritten_real(
2017 	struct xfs_trans	*tp,
2018 	xfs_inode_t		*ip,	/* incore inode pointer */
2019 	int			whichfork,
2020 	struct xfs_iext_cursor	*icur,
2021 	struct xfs_btree_cur	**curp,	/* if *curp is null, not a btree */
2022 	xfs_bmbt_irec_t		*new,	/* new data to add to file extents */
2023 	int			*logflagsp) /* inode logging flags */
2024 {
2025 	struct xfs_btree_cur	*cur;	/* btree cursor */
2026 	int			error;	/* error return value */
2027 	int			i;	/* temp state */
2028 	struct xfs_ifork	*ifp;	/* inode fork pointer */
2029 	xfs_fileoff_t		new_endoff;	/* end offset of new entry */
2030 	xfs_bmbt_irec_t		r[3];	/* neighbor extent entries */
2031 					/* left is 0, right is 1, prev is 2 */
2032 	int			rval=0;	/* return value (logging flags) */
2033 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
2034 	struct xfs_mount	*mp = ip->i_mount;
2035 	struct xfs_bmbt_irec	old;
2036 
2037 	*logflagsp = 0;
2038 
2039 	cur = *curp;
2040 	ifp = xfs_ifork_ptr(ip, whichfork);
2041 
2042 	ASSERT(!isnullstartblock(new->br_startblock));
2043 
2044 	XFS_STATS_INC(mp, xs_add_exlist);
2045 
2046 #define	LEFT		r[0]
2047 #define	RIGHT		r[1]
2048 #define	PREV		r[2]
2049 
2050 	/*
2051 	 * Set up a bunch of variables to make the tests simpler.
2052 	 */
2053 	error = 0;
2054 	xfs_iext_get_extent(ifp, icur, &PREV);
2055 	ASSERT(new->br_state != PREV.br_state);
2056 	new_endoff = new->br_startoff + new->br_blockcount;
2057 	ASSERT(PREV.br_startoff <= new->br_startoff);
2058 	ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2059 
2060 	/*
2061 	 * Set flags determining what part of the previous oldext allocation
2062 	 * extent is being replaced by a newext allocation.
2063 	 */
2064 	if (PREV.br_startoff == new->br_startoff)
2065 		state |= BMAP_LEFT_FILLING;
2066 	if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2067 		state |= BMAP_RIGHT_FILLING;
2068 
2069 	/*
2070 	 * Check and set flags if this segment has a left neighbor.
2071 	 * Don't set contiguous if the combined extent would be too large.
2072 	 */
2073 	if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) {
2074 		state |= BMAP_LEFT_VALID;
2075 		if (isnullstartblock(LEFT.br_startblock))
2076 			state |= BMAP_LEFT_DELAY;
2077 	}
2078 
2079 	if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2080 	    LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2081 	    LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2082 	    LEFT.br_state == new->br_state &&
2083 	    LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2084 	    xfs_bmap_same_rtgroup(ip, whichfork, &LEFT, new))
2085 		state |= BMAP_LEFT_CONTIG;
2086 
2087 	/*
2088 	 * Check and set flags if this segment has a right neighbor.
2089 	 * Don't set contiguous if the combined extent would be too large.
2090 	 * Also check for all-three-contiguous being too large.
2091 	 */
2092 	if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) {
2093 		state |= BMAP_RIGHT_VALID;
2094 		if (isnullstartblock(RIGHT.br_startblock))
2095 			state |= BMAP_RIGHT_DELAY;
2096 	}
2097 
2098 	if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2099 	    new_endoff == RIGHT.br_startoff &&
2100 	    new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2101 	    new->br_state == RIGHT.br_state &&
2102 	    new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2103 	    ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2104 		       BMAP_RIGHT_FILLING)) !=
2105 		      (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2106 		       BMAP_RIGHT_FILLING) ||
2107 	     LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2108 			<= XFS_MAX_BMBT_EXTLEN) &&
2109 	    xfs_bmap_same_rtgroup(ip, whichfork, new, &RIGHT))
2110 		state |= BMAP_RIGHT_CONTIG;
2111 
2112 	/*
2113 	 * Switch out based on the FILLING and CONTIG state bits.
2114 	 */
2115 	switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2116 			 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2117 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2118 	     BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2119 		/*
2120 		 * Setting all of a previous oldext extent to newext.
2121 		 * The left and right neighbors are both contiguous with new.
2122 		 */
2123 		LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
2124 
2125 		xfs_iext_remove(ip, icur, state);
2126 		xfs_iext_remove(ip, icur, state);
2127 		xfs_iext_prev(ifp, icur);
2128 		xfs_iext_update_extent(ip, state, icur, &LEFT);
2129 		ifp->if_nextents -= 2;
2130 		if (cur == NULL)
2131 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2132 		else {
2133 			rval = XFS_ILOG_CORE;
2134 			error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2135 			if (error)
2136 				goto done;
2137 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2138 				xfs_btree_mark_sick(cur);
2139 				error = -EFSCORRUPTED;
2140 				goto done;
2141 			}
2142 			if ((error = xfs_btree_delete(cur, &i)))
2143 				goto done;
2144 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2145 				xfs_btree_mark_sick(cur);
2146 				error = -EFSCORRUPTED;
2147 				goto done;
2148 			}
2149 			if ((error = xfs_btree_decrement(cur, 0, &i)))
2150 				goto done;
2151 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2152 				xfs_btree_mark_sick(cur);
2153 				error = -EFSCORRUPTED;
2154 				goto done;
2155 			}
2156 			if ((error = xfs_btree_delete(cur, &i)))
2157 				goto done;
2158 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2159 				xfs_btree_mark_sick(cur);
2160 				error = -EFSCORRUPTED;
2161 				goto done;
2162 			}
2163 			if ((error = xfs_btree_decrement(cur, 0, &i)))
2164 				goto done;
2165 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2166 				xfs_btree_mark_sick(cur);
2167 				error = -EFSCORRUPTED;
2168 				goto done;
2169 			}
2170 			error = xfs_bmbt_update(cur, &LEFT);
2171 			if (error)
2172 				goto done;
2173 		}
2174 		break;
2175 
2176 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2177 		/*
2178 		 * Setting all of a previous oldext extent to newext.
2179 		 * The left neighbor is contiguous, the right is not.
2180 		 */
2181 		LEFT.br_blockcount += PREV.br_blockcount;
2182 
2183 		xfs_iext_remove(ip, icur, state);
2184 		xfs_iext_prev(ifp, icur);
2185 		xfs_iext_update_extent(ip, state, icur, &LEFT);
2186 		ifp->if_nextents--;
2187 		if (cur == NULL)
2188 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2189 		else {
2190 			rval = XFS_ILOG_CORE;
2191 			error = xfs_bmbt_lookup_eq(cur, &PREV, &i);
2192 			if (error)
2193 				goto done;
2194 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2195 				xfs_btree_mark_sick(cur);
2196 				error = -EFSCORRUPTED;
2197 				goto done;
2198 			}
2199 			if ((error = xfs_btree_delete(cur, &i)))
2200 				goto done;
2201 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2202 				xfs_btree_mark_sick(cur);
2203 				error = -EFSCORRUPTED;
2204 				goto done;
2205 			}
2206 			if ((error = xfs_btree_decrement(cur, 0, &i)))
2207 				goto done;
2208 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2209 				xfs_btree_mark_sick(cur);
2210 				error = -EFSCORRUPTED;
2211 				goto done;
2212 			}
2213 			error = xfs_bmbt_update(cur, &LEFT);
2214 			if (error)
2215 				goto done;
2216 		}
2217 		break;
2218 
2219 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2220 		/*
2221 		 * Setting all of a previous oldext extent to newext.
2222 		 * The right neighbor is contiguous, the left is not.
2223 		 */
2224 		PREV.br_blockcount += RIGHT.br_blockcount;
2225 		PREV.br_state = new->br_state;
2226 
2227 		xfs_iext_next(ifp, icur);
2228 		xfs_iext_remove(ip, icur, state);
2229 		xfs_iext_prev(ifp, icur);
2230 		xfs_iext_update_extent(ip, state, icur, &PREV);
2231 		ifp->if_nextents--;
2232 
2233 		if (cur == NULL)
2234 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2235 		else {
2236 			rval = XFS_ILOG_CORE;
2237 			error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2238 			if (error)
2239 				goto done;
2240 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2241 				xfs_btree_mark_sick(cur);
2242 				error = -EFSCORRUPTED;
2243 				goto done;
2244 			}
2245 			if ((error = xfs_btree_delete(cur, &i)))
2246 				goto done;
2247 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2248 				xfs_btree_mark_sick(cur);
2249 				error = -EFSCORRUPTED;
2250 				goto done;
2251 			}
2252 			if ((error = xfs_btree_decrement(cur, 0, &i)))
2253 				goto done;
2254 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2255 				xfs_btree_mark_sick(cur);
2256 				error = -EFSCORRUPTED;
2257 				goto done;
2258 			}
2259 			error = xfs_bmbt_update(cur, &PREV);
2260 			if (error)
2261 				goto done;
2262 		}
2263 		break;
2264 
2265 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2266 		/*
2267 		 * Setting all of a previous oldext extent to newext.
2268 		 * Neither the left nor right neighbors are contiguous with
2269 		 * the new one.
2270 		 */
2271 		PREV.br_state = new->br_state;
2272 		xfs_iext_update_extent(ip, state, icur, &PREV);
2273 
2274 		if (cur == NULL)
2275 			rval = XFS_ILOG_DEXT;
2276 		else {
2277 			rval = 0;
2278 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2279 			if (error)
2280 				goto done;
2281 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2282 				xfs_btree_mark_sick(cur);
2283 				error = -EFSCORRUPTED;
2284 				goto done;
2285 			}
2286 			error = xfs_bmbt_update(cur, &PREV);
2287 			if (error)
2288 				goto done;
2289 		}
2290 		break;
2291 
2292 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2293 		/*
2294 		 * Setting the first part of a previous oldext extent to newext.
2295 		 * The left neighbor is contiguous.
2296 		 */
2297 		LEFT.br_blockcount += new->br_blockcount;
2298 
2299 		old = PREV;
2300 		PREV.br_startoff += new->br_blockcount;
2301 		PREV.br_startblock += new->br_blockcount;
2302 		PREV.br_blockcount -= new->br_blockcount;
2303 
2304 		xfs_iext_update_extent(ip, state, icur, &PREV);
2305 		xfs_iext_prev(ifp, icur);
2306 		xfs_iext_update_extent(ip, state, icur, &LEFT);
2307 
2308 		if (cur == NULL)
2309 			rval = XFS_ILOG_DEXT;
2310 		else {
2311 			rval = 0;
2312 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2313 			if (error)
2314 				goto done;
2315 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2316 				xfs_btree_mark_sick(cur);
2317 				error = -EFSCORRUPTED;
2318 				goto done;
2319 			}
2320 			error = xfs_bmbt_update(cur, &PREV);
2321 			if (error)
2322 				goto done;
2323 			error = xfs_btree_decrement(cur, 0, &i);
2324 			if (error)
2325 				goto done;
2326 			error = xfs_bmbt_update(cur, &LEFT);
2327 			if (error)
2328 				goto done;
2329 		}
2330 		break;
2331 
2332 	case BMAP_LEFT_FILLING:
2333 		/*
2334 		 * Setting the first part of a previous oldext extent to newext.
2335 		 * The left neighbor is not contiguous.
2336 		 */
2337 		old = PREV;
2338 		PREV.br_startoff += new->br_blockcount;
2339 		PREV.br_startblock += new->br_blockcount;
2340 		PREV.br_blockcount -= new->br_blockcount;
2341 
2342 		xfs_iext_update_extent(ip, state, icur, &PREV);
2343 		xfs_iext_insert(ip, icur, new, state);
2344 		ifp->if_nextents++;
2345 
2346 		if (cur == NULL)
2347 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2348 		else {
2349 			rval = XFS_ILOG_CORE;
2350 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2351 			if (error)
2352 				goto done;
2353 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2354 				xfs_btree_mark_sick(cur);
2355 				error = -EFSCORRUPTED;
2356 				goto done;
2357 			}
2358 			error = xfs_bmbt_update(cur, &PREV);
2359 			if (error)
2360 				goto done;
2361 			cur->bc_rec.b = *new;
2362 			if ((error = xfs_btree_insert(cur, &i)))
2363 				goto done;
2364 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2365 				xfs_btree_mark_sick(cur);
2366 				error = -EFSCORRUPTED;
2367 				goto done;
2368 			}
2369 		}
2370 		break;
2371 
2372 	case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2373 		/*
2374 		 * Setting the last part of a previous oldext extent to newext.
2375 		 * The right neighbor is contiguous with the new allocation.
2376 		 */
2377 		old = PREV;
2378 		PREV.br_blockcount -= new->br_blockcount;
2379 
2380 		RIGHT.br_startoff = new->br_startoff;
2381 		RIGHT.br_startblock = new->br_startblock;
2382 		RIGHT.br_blockcount += new->br_blockcount;
2383 
2384 		xfs_iext_update_extent(ip, state, icur, &PREV);
2385 		xfs_iext_next(ifp, icur);
2386 		xfs_iext_update_extent(ip, state, icur, &RIGHT);
2387 
2388 		if (cur == NULL)
2389 			rval = XFS_ILOG_DEXT;
2390 		else {
2391 			rval = 0;
2392 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2393 			if (error)
2394 				goto done;
2395 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2396 				xfs_btree_mark_sick(cur);
2397 				error = -EFSCORRUPTED;
2398 				goto done;
2399 			}
2400 			error = xfs_bmbt_update(cur, &PREV);
2401 			if (error)
2402 				goto done;
2403 			error = xfs_btree_increment(cur, 0, &i);
2404 			if (error)
2405 				goto done;
2406 			error = xfs_bmbt_update(cur, &RIGHT);
2407 			if (error)
2408 				goto done;
2409 		}
2410 		break;
2411 
2412 	case BMAP_RIGHT_FILLING:
2413 		/*
2414 		 * Setting the last part of a previous oldext extent to newext.
2415 		 * The right neighbor is not contiguous.
2416 		 */
2417 		old = PREV;
2418 		PREV.br_blockcount -= new->br_blockcount;
2419 
2420 		xfs_iext_update_extent(ip, state, icur, &PREV);
2421 		xfs_iext_next(ifp, icur);
2422 		xfs_iext_insert(ip, icur, new, state);
2423 		ifp->if_nextents++;
2424 
2425 		if (cur == NULL)
2426 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2427 		else {
2428 			rval = XFS_ILOG_CORE;
2429 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2430 			if (error)
2431 				goto done;
2432 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2433 				xfs_btree_mark_sick(cur);
2434 				error = -EFSCORRUPTED;
2435 				goto done;
2436 			}
2437 			error = xfs_bmbt_update(cur, &PREV);
2438 			if (error)
2439 				goto done;
2440 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2441 			if (error)
2442 				goto done;
2443 			if (XFS_IS_CORRUPT(mp, i != 0)) {
2444 				xfs_btree_mark_sick(cur);
2445 				error = -EFSCORRUPTED;
2446 				goto done;
2447 			}
2448 			if ((error = xfs_btree_insert(cur, &i)))
2449 				goto done;
2450 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2451 				xfs_btree_mark_sick(cur);
2452 				error = -EFSCORRUPTED;
2453 				goto done;
2454 			}
2455 		}
2456 		break;
2457 
2458 	case 0:
2459 		/*
2460 		 * Setting the middle part of a previous oldext extent to
2461 		 * newext.  Contiguity is impossible here.
2462 		 * One extent becomes three extents.
2463 		 */
2464 		old = PREV;
2465 		PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
2466 
2467 		r[0] = *new;
2468 		r[1].br_startoff = new_endoff;
2469 		r[1].br_blockcount =
2470 			old.br_startoff + old.br_blockcount - new_endoff;
2471 		r[1].br_startblock = new->br_startblock + new->br_blockcount;
2472 		r[1].br_state = PREV.br_state;
2473 
2474 		xfs_iext_update_extent(ip, state, icur, &PREV);
2475 		xfs_iext_next(ifp, icur);
2476 		xfs_iext_insert(ip, icur, &r[1], state);
2477 		xfs_iext_insert(ip, icur, &r[0], state);
2478 		ifp->if_nextents += 2;
2479 
2480 		if (cur == NULL)
2481 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2482 		else {
2483 			rval = XFS_ILOG_CORE;
2484 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2485 			if (error)
2486 				goto done;
2487 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2488 				xfs_btree_mark_sick(cur);
2489 				error = -EFSCORRUPTED;
2490 				goto done;
2491 			}
2492 			/* new right extent - oldext */
2493 			error = xfs_bmbt_update(cur, &r[1]);
2494 			if (error)
2495 				goto done;
2496 			/* new left extent - oldext */
2497 			cur->bc_rec.b = PREV;
2498 			if ((error = xfs_btree_insert(cur, &i)))
2499 				goto done;
2500 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2501 				xfs_btree_mark_sick(cur);
2502 				error = -EFSCORRUPTED;
2503 				goto done;
2504 			}
2505 			/*
2506 			 * Reset the cursor to the position of the new extent
2507 			 * we are about to insert as we can't trust it after
2508 			 * the previous insert.
2509 			 */
2510 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2511 			if (error)
2512 				goto done;
2513 			if (XFS_IS_CORRUPT(mp, i != 0)) {
2514 				xfs_btree_mark_sick(cur);
2515 				error = -EFSCORRUPTED;
2516 				goto done;
2517 			}
2518 			/* new middle extent - newext */
2519 			if ((error = xfs_btree_insert(cur, &i)))
2520 				goto done;
2521 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2522 				xfs_btree_mark_sick(cur);
2523 				error = -EFSCORRUPTED;
2524 				goto done;
2525 			}
2526 		}
2527 		break;
2528 
2529 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2530 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2531 	case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2532 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2533 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2534 	case BMAP_LEFT_CONTIG:
2535 	case BMAP_RIGHT_CONTIG:
2536 		/*
2537 		 * These cases are all impossible.
2538 		 */
2539 		ASSERT(0);
2540 	}
2541 
2542 	/* update reverse mappings */
2543 	xfs_rmap_convert_extent(mp, tp, ip, whichfork, new);
2544 
2545 	/* convert to a btree if necessary */
2546 	if (xfs_bmap_needs_btree(ip, whichfork)) {
2547 		int	tmp_logflags;	/* partial log flag return val */
2548 
2549 		ASSERT(cur == NULL);
2550 		error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
2551 				&tmp_logflags, whichfork);
2552 		*logflagsp |= tmp_logflags;
2553 		if (error)
2554 			goto done;
2555 	}
2556 
2557 	/* clear out the allocated field, done with it now in any case. */
2558 	if (cur) {
2559 		cur->bc_bmap.allocated = 0;
2560 		*curp = cur;
2561 	}
2562 
2563 	xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
2564 done:
2565 	*logflagsp |= rval;
2566 	return error;
2567 #undef	LEFT
2568 #undef	RIGHT
2569 #undef	PREV
2570 }
2571 
2572 /*
2573  * Convert a hole to a real allocation.
2574  */
2575 STATIC int				/* error */
xfs_bmap_add_extent_hole_real(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_iext_cursor * icur,struct xfs_btree_cur ** curp,struct xfs_bmbt_irec * new,int * logflagsp,uint32_t flags)2576 xfs_bmap_add_extent_hole_real(
2577 	struct xfs_trans	*tp,
2578 	struct xfs_inode	*ip,
2579 	int			whichfork,
2580 	struct xfs_iext_cursor	*icur,
2581 	struct xfs_btree_cur	**curp,
2582 	struct xfs_bmbt_irec	*new,
2583 	int			*logflagsp,
2584 	uint32_t		flags)
2585 {
2586 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
2587 	struct xfs_mount	*mp = ip->i_mount;
2588 	struct xfs_btree_cur	*cur = *curp;
2589 	int			error;	/* error return value */
2590 	int			i;	/* temp state */
2591 	xfs_bmbt_irec_t		left;	/* left neighbor extent entry */
2592 	xfs_bmbt_irec_t		right;	/* right neighbor extent entry */
2593 	int			rval=0;	/* return value (logging flags) */
2594 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
2595 	struct xfs_bmbt_irec	old;
2596 
2597 	ASSERT(!isnullstartblock(new->br_startblock));
2598 	ASSERT(!cur || !(cur->bc_flags & XFS_BTREE_BMBT_WASDEL));
2599 
2600 	XFS_STATS_INC(mp, xs_add_exlist);
2601 
2602 	/*
2603 	 * Check and set flags if this segment has a left neighbor.
2604 	 */
2605 	if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2606 		state |= BMAP_LEFT_VALID;
2607 		if (isnullstartblock(left.br_startblock))
2608 			state |= BMAP_LEFT_DELAY;
2609 	}
2610 
2611 	/*
2612 	 * Check and set flags if this segment has a current value.
2613 	 * Not true if we're inserting into the "hole" at eof.
2614 	 */
2615 	if (xfs_iext_get_extent(ifp, icur, &right)) {
2616 		state |= BMAP_RIGHT_VALID;
2617 		if (isnullstartblock(right.br_startblock))
2618 			state |= BMAP_RIGHT_DELAY;
2619 	}
2620 
2621 	/*
2622 	 * We're inserting a real allocation between "left" and "right".
2623 	 * Set the contiguity flags.  Don't let extents get too large.
2624 	 */
2625 	if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2626 	    left.br_startoff + left.br_blockcount == new->br_startoff &&
2627 	    left.br_startblock + left.br_blockcount == new->br_startblock &&
2628 	    left.br_state == new->br_state &&
2629 	    left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2630 	    xfs_bmap_same_rtgroup(ip, whichfork, &left, new))
2631 		state |= BMAP_LEFT_CONTIG;
2632 
2633 	if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2634 	    new->br_startoff + new->br_blockcount == right.br_startoff &&
2635 	    new->br_startblock + new->br_blockcount == right.br_startblock &&
2636 	    new->br_state == right.br_state &&
2637 	    new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2638 	    (!(state & BMAP_LEFT_CONTIG) ||
2639 	     left.br_blockcount + new->br_blockcount +
2640 	     right.br_blockcount <= XFS_MAX_BMBT_EXTLEN) &&
2641 	    xfs_bmap_same_rtgroup(ip, whichfork, new, &right))
2642 		state |= BMAP_RIGHT_CONTIG;
2643 
2644 	error = 0;
2645 	/*
2646 	 * Select which case we're in here, and implement it.
2647 	 */
2648 	switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2649 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2650 		/*
2651 		 * New allocation is contiguous with real allocations on the
2652 		 * left and on the right.
2653 		 * Merge all three into a single extent record.
2654 		 */
2655 		left.br_blockcount += new->br_blockcount + right.br_blockcount;
2656 
2657 		xfs_iext_remove(ip, icur, state);
2658 		xfs_iext_prev(ifp, icur);
2659 		xfs_iext_update_extent(ip, state, icur, &left);
2660 		ifp->if_nextents--;
2661 
2662 		if (cur == NULL) {
2663 			rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2664 		} else {
2665 			rval = XFS_ILOG_CORE;
2666 			error = xfs_bmbt_lookup_eq(cur, &right, &i);
2667 			if (error)
2668 				goto done;
2669 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2670 				xfs_btree_mark_sick(cur);
2671 				error = -EFSCORRUPTED;
2672 				goto done;
2673 			}
2674 			error = xfs_btree_delete(cur, &i);
2675 			if (error)
2676 				goto done;
2677 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2678 				xfs_btree_mark_sick(cur);
2679 				error = -EFSCORRUPTED;
2680 				goto done;
2681 			}
2682 			error = xfs_btree_decrement(cur, 0, &i);
2683 			if (error)
2684 				goto done;
2685 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2686 				xfs_btree_mark_sick(cur);
2687 				error = -EFSCORRUPTED;
2688 				goto done;
2689 			}
2690 			error = xfs_bmbt_update(cur, &left);
2691 			if (error)
2692 				goto done;
2693 		}
2694 		break;
2695 
2696 	case BMAP_LEFT_CONTIG:
2697 		/*
2698 		 * New allocation is contiguous with a real allocation
2699 		 * on the left.
2700 		 * Merge the new allocation with the left neighbor.
2701 		 */
2702 		old = left;
2703 		left.br_blockcount += new->br_blockcount;
2704 
2705 		xfs_iext_prev(ifp, icur);
2706 		xfs_iext_update_extent(ip, state, icur, &left);
2707 
2708 		if (cur == NULL) {
2709 			rval = xfs_ilog_fext(whichfork);
2710 		} else {
2711 			rval = 0;
2712 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2713 			if (error)
2714 				goto done;
2715 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2716 				xfs_btree_mark_sick(cur);
2717 				error = -EFSCORRUPTED;
2718 				goto done;
2719 			}
2720 			error = xfs_bmbt_update(cur, &left);
2721 			if (error)
2722 				goto done;
2723 		}
2724 		break;
2725 
2726 	case BMAP_RIGHT_CONTIG:
2727 		/*
2728 		 * New allocation is contiguous with a real allocation
2729 		 * on the right.
2730 		 * Merge the new allocation with the right neighbor.
2731 		 */
2732 		old = right;
2733 
2734 		right.br_startoff = new->br_startoff;
2735 		right.br_startblock = new->br_startblock;
2736 		right.br_blockcount += new->br_blockcount;
2737 		xfs_iext_update_extent(ip, state, icur, &right);
2738 
2739 		if (cur == NULL) {
2740 			rval = xfs_ilog_fext(whichfork);
2741 		} else {
2742 			rval = 0;
2743 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2744 			if (error)
2745 				goto done;
2746 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2747 				xfs_btree_mark_sick(cur);
2748 				error = -EFSCORRUPTED;
2749 				goto done;
2750 			}
2751 			error = xfs_bmbt_update(cur, &right);
2752 			if (error)
2753 				goto done;
2754 		}
2755 		break;
2756 
2757 	case 0:
2758 		/*
2759 		 * New allocation is not contiguous with another
2760 		 * real allocation.
2761 		 * Insert a new entry.
2762 		 */
2763 		xfs_iext_insert(ip, icur, new, state);
2764 		ifp->if_nextents++;
2765 
2766 		if (cur == NULL) {
2767 			rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2768 		} else {
2769 			rval = XFS_ILOG_CORE;
2770 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2771 			if (error)
2772 				goto done;
2773 			if (XFS_IS_CORRUPT(mp, i != 0)) {
2774 				xfs_btree_mark_sick(cur);
2775 				error = -EFSCORRUPTED;
2776 				goto done;
2777 			}
2778 			error = xfs_btree_insert(cur, &i);
2779 			if (error)
2780 				goto done;
2781 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2782 				xfs_btree_mark_sick(cur);
2783 				error = -EFSCORRUPTED;
2784 				goto done;
2785 			}
2786 		}
2787 		break;
2788 	}
2789 
2790 	/* add reverse mapping unless caller opted out */
2791 	if (!(flags & XFS_BMAPI_NORMAP))
2792 		xfs_rmap_map_extent(tp, ip, whichfork, new);
2793 
2794 	/* convert to a btree if necessary */
2795 	if (xfs_bmap_needs_btree(ip, whichfork)) {
2796 		int	tmp_logflags;	/* partial log flag return val */
2797 
2798 		ASSERT(cur == NULL);
2799 		error = xfs_bmap_extents_to_btree(tp, ip, curp, 0,
2800 				&tmp_logflags, whichfork);
2801 		*logflagsp |= tmp_logflags;
2802 		cur = *curp;
2803 		if (error)
2804 			goto done;
2805 	}
2806 
2807 	/* clear out the allocated field, done with it now in any case. */
2808 	if (cur)
2809 		cur->bc_bmap.allocated = 0;
2810 
2811 	xfs_bmap_check_leaf_extents(cur, ip, whichfork);
2812 done:
2813 	*logflagsp |= rval;
2814 	return error;
2815 }
2816 
2817 /*
2818  * Functions used in the extent read, allocate and remove paths
2819  */
2820 
2821 /*
2822  * Adjust the size of the new extent based on i_extsize and rt extsize.
2823  */
2824 int
xfs_bmap_extsize_align(xfs_mount_t * mp,xfs_bmbt_irec_t * gotp,xfs_bmbt_irec_t * prevp,xfs_extlen_t extsz,int rt,int eof,int delay,int convert,xfs_fileoff_t * offp,xfs_extlen_t * lenp)2825 xfs_bmap_extsize_align(
2826 	xfs_mount_t	*mp,
2827 	xfs_bmbt_irec_t	*gotp,		/* next extent pointer */
2828 	xfs_bmbt_irec_t	*prevp,		/* previous extent pointer */
2829 	xfs_extlen_t	extsz,		/* align to this extent size */
2830 	int		rt,		/* is this a realtime inode? */
2831 	int		eof,		/* is extent at end-of-file? */
2832 	int		delay,		/* creating delalloc extent? */
2833 	int		convert,	/* overwriting unwritten extent? */
2834 	xfs_fileoff_t	*offp,		/* in/out: aligned offset */
2835 	xfs_extlen_t	*lenp)		/* in/out: aligned length */
2836 {
2837 	xfs_fileoff_t	orig_off;	/* original offset */
2838 	xfs_extlen_t	orig_alen;	/* original length */
2839 	xfs_fileoff_t	orig_end;	/* original off+len */
2840 	xfs_fileoff_t	nexto;		/* next file offset */
2841 	xfs_fileoff_t	prevo;		/* previous file offset */
2842 	xfs_fileoff_t	align_off;	/* temp for offset */
2843 	xfs_extlen_t	align_alen;	/* temp for length */
2844 	xfs_extlen_t	temp;		/* temp for calculations */
2845 
2846 	if (convert)
2847 		return 0;
2848 
2849 	orig_off = align_off = *offp;
2850 	orig_alen = align_alen = *lenp;
2851 	orig_end = orig_off + orig_alen;
2852 
2853 	/*
2854 	 * If this request overlaps an existing extent, then don't
2855 	 * attempt to perform any additional alignment.
2856 	 */
2857 	if (!delay && !eof &&
2858 	    (orig_off >= gotp->br_startoff) &&
2859 	    (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
2860 		return 0;
2861 	}
2862 
2863 	/*
2864 	 * If the file offset is unaligned vs. the extent size
2865 	 * we need to align it.  This will be possible unless
2866 	 * the file was previously written with a kernel that didn't
2867 	 * perform this alignment, or if a truncate shot us in the
2868 	 * foot.
2869 	 */
2870 	div_u64_rem(orig_off, extsz, &temp);
2871 	if (temp) {
2872 		align_alen += temp;
2873 		align_off -= temp;
2874 	}
2875 
2876 	/* Same adjustment for the end of the requested area. */
2877 	temp = (align_alen % extsz);
2878 	if (temp)
2879 		align_alen += extsz - temp;
2880 
2881 	/*
2882 	 * For large extent hint sizes, the aligned extent might be larger than
2883 	 * XFS_BMBT_MAX_EXTLEN. In that case, reduce the size by an extsz so
2884 	 * that it pulls the length back under XFS_BMBT_MAX_EXTLEN. The outer
2885 	 * allocation loops handle short allocation just fine, so it is safe to
2886 	 * do this. We only want to do it when we are forced to, though, because
2887 	 * it means more allocation operations are required.
2888 	 */
2889 	while (align_alen > XFS_MAX_BMBT_EXTLEN)
2890 		align_alen -= extsz;
2891 	ASSERT(align_alen <= XFS_MAX_BMBT_EXTLEN);
2892 
2893 	/*
2894 	 * If the previous block overlaps with this proposed allocation
2895 	 * then move the start forward without adjusting the length.
2896 	 */
2897 	if (prevp->br_startoff != NULLFILEOFF) {
2898 		if (prevp->br_startblock == HOLESTARTBLOCK)
2899 			prevo = prevp->br_startoff;
2900 		else
2901 			prevo = prevp->br_startoff + prevp->br_blockcount;
2902 	} else
2903 		prevo = 0;
2904 	if (align_off != orig_off && align_off < prevo)
2905 		align_off = prevo;
2906 	/*
2907 	 * If the next block overlaps with this proposed allocation
2908 	 * then move the start back without adjusting the length,
2909 	 * but not before offset 0.
2910 	 * This may of course make the start overlap previous block,
2911 	 * and if we hit the offset 0 limit then the next block
2912 	 * can still overlap too.
2913 	 */
2914 	if (!eof && gotp->br_startoff != NULLFILEOFF) {
2915 		if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
2916 		    (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
2917 			nexto = gotp->br_startoff + gotp->br_blockcount;
2918 		else
2919 			nexto = gotp->br_startoff;
2920 	} else
2921 		nexto = NULLFILEOFF;
2922 	if (!eof &&
2923 	    align_off + align_alen != orig_end &&
2924 	    align_off + align_alen > nexto)
2925 		align_off = nexto > align_alen ? nexto - align_alen : 0;
2926 	/*
2927 	 * If we're now overlapping the next or previous extent that
2928 	 * means we can't fit an extsz piece in this hole.  Just move
2929 	 * the start forward to the first valid spot and set
2930 	 * the length so we hit the end.
2931 	 */
2932 	if (align_off != orig_off && align_off < prevo)
2933 		align_off = prevo;
2934 	if (align_off + align_alen != orig_end &&
2935 	    align_off + align_alen > nexto &&
2936 	    nexto != NULLFILEOFF) {
2937 		ASSERT(nexto > prevo);
2938 		align_alen = nexto - align_off;
2939 	}
2940 
2941 	/*
2942 	 * If realtime, and the result isn't a multiple of the realtime
2943 	 * extent size we need to remove blocks until it is.
2944 	 */
2945 	if (rt && (temp = xfs_extlen_to_rtxmod(mp, align_alen))) {
2946 		/*
2947 		 * We're not covering the original request, or
2948 		 * we won't be able to once we fix the length.
2949 		 */
2950 		if (orig_off < align_off ||
2951 		    orig_end > align_off + align_alen ||
2952 		    align_alen - temp < orig_alen)
2953 			return -EINVAL;
2954 		/*
2955 		 * Try to fix it by moving the start up.
2956 		 */
2957 		if (align_off + temp <= orig_off) {
2958 			align_alen -= temp;
2959 			align_off += temp;
2960 		}
2961 		/*
2962 		 * Try to fix it by moving the end in.
2963 		 */
2964 		else if (align_off + align_alen - temp >= orig_end)
2965 			align_alen -= temp;
2966 		/*
2967 		 * Set the start to the minimum then trim the length.
2968 		 */
2969 		else {
2970 			align_alen -= orig_off - align_off;
2971 			align_off = orig_off;
2972 			align_alen -= xfs_extlen_to_rtxmod(mp, align_alen);
2973 		}
2974 		/*
2975 		 * Result doesn't cover the request, fail it.
2976 		 */
2977 		if (orig_off < align_off || orig_end > align_off + align_alen)
2978 			return -EINVAL;
2979 	} else {
2980 		ASSERT(orig_off >= align_off);
2981 		/* see XFS_BMBT_MAX_EXTLEN handling above */
2982 		ASSERT(orig_end <= align_off + align_alen ||
2983 		       align_alen + extsz > XFS_MAX_BMBT_EXTLEN);
2984 	}
2985 
2986 #ifdef DEBUG
2987 	if (!eof && gotp->br_startoff != NULLFILEOFF)
2988 		ASSERT(align_off + align_alen <= gotp->br_startoff);
2989 	if (prevp->br_startoff != NULLFILEOFF)
2990 		ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
2991 #endif
2992 
2993 	*lenp = align_alen;
2994 	*offp = align_off;
2995 	return 0;
2996 }
2997 
2998 static inline bool
xfs_bmap_adjacent_valid(struct xfs_bmalloca * ap,xfs_fsblock_t x,xfs_fsblock_t y)2999 xfs_bmap_adjacent_valid(
3000 	struct xfs_bmalloca	*ap,
3001 	xfs_fsblock_t		x,
3002 	xfs_fsblock_t		y)
3003 {
3004 	struct xfs_mount	*mp = ap->ip->i_mount;
3005 
3006 	if (XFS_IS_REALTIME_INODE(ap->ip) &&
3007 	    (ap->datatype & XFS_ALLOC_USERDATA)) {
3008 		if (!xfs_has_rtgroups(mp))
3009 			return x < mp->m_sb.sb_rblocks;
3010 
3011 		return xfs_rtb_to_rgno(mp, x) == xfs_rtb_to_rgno(mp, y) &&
3012 			xfs_rtb_to_rgno(mp, x) < mp->m_sb.sb_rgcount &&
3013 			xfs_rtb_to_rtx(mp, x) < mp->m_sb.sb_rgextents;
3014 
3015 	}
3016 
3017 	return XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) &&
3018 		XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount &&
3019 		XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks;
3020 }
3021 
3022 #define XFS_ALLOC_GAP_UNITS	4
3023 
3024 /* returns true if ap->blkno was modified */
3025 bool
xfs_bmap_adjacent(struct xfs_bmalloca * ap)3026 xfs_bmap_adjacent(
3027 	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
3028 {
3029 	xfs_fsblock_t		adjust;		/* adjustment to block numbers */
3030 
3031 	/*
3032 	 * If allocating at eof, and there's a previous real block,
3033 	 * try to use its last block as our starting point.
3034 	 */
3035 	if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3036 	    !isnullstartblock(ap->prev.br_startblock) &&
3037 	    xfs_bmap_adjacent_valid(ap,
3038 			ap->prev.br_startblock + ap->prev.br_blockcount,
3039 			ap->prev.br_startblock)) {
3040 		ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3041 		/*
3042 		 * Adjust for the gap between prevp and us.
3043 		 */
3044 		adjust = ap->offset -
3045 			(ap->prev.br_startoff + ap->prev.br_blockcount);
3046 		if (adjust && xfs_bmap_adjacent_valid(ap, ap->blkno + adjust,
3047 				ap->prev.br_startblock))
3048 			ap->blkno += adjust;
3049 		return true;
3050 	}
3051 	/*
3052 	 * If not at eof, then compare the two neighbor blocks.
3053 	 * Figure out whether either one gives us a good starting point,
3054 	 * and pick the better one.
3055 	 */
3056 	if (!ap->eof) {
3057 		xfs_fsblock_t	gotbno;		/* right side block number */
3058 		xfs_fsblock_t	gotdiff=0;	/* right side difference */
3059 		xfs_fsblock_t	prevbno;	/* left side block number */
3060 		xfs_fsblock_t	prevdiff=0;	/* left side difference */
3061 
3062 		/*
3063 		 * If there's a previous (left) block, select a requested
3064 		 * start block based on it.
3065 		 */
3066 		if (ap->prev.br_startoff != NULLFILEOFF &&
3067 		    !isnullstartblock(ap->prev.br_startblock) &&
3068 		    (prevbno = ap->prev.br_startblock +
3069 			       ap->prev.br_blockcount) &&
3070 		    xfs_bmap_adjacent_valid(ap, prevbno,
3071 				ap->prev.br_startblock)) {
3072 			/*
3073 			 * Calculate gap to end of previous block.
3074 			 */
3075 			adjust = prevdiff = ap->offset -
3076 				(ap->prev.br_startoff +
3077 				 ap->prev.br_blockcount);
3078 			/*
3079 			 * Figure the startblock based on the previous block's
3080 			 * end and the gap size.
3081 			 * Heuristic!
3082 			 * If the gap is large relative to the piece we're
3083 			 * allocating, or using it gives us an invalid block
3084 			 * number, then just use the end of the previous block.
3085 			 */
3086 			if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3087 			    xfs_bmap_adjacent_valid(ap, prevbno + prevdiff,
3088 					ap->prev.br_startblock))
3089 				prevbno += adjust;
3090 			else
3091 				prevdiff += adjust;
3092 		}
3093 		/*
3094 		 * No previous block or can't follow it, just default.
3095 		 */
3096 		else
3097 			prevbno = NULLFSBLOCK;
3098 		/*
3099 		 * If there's a following (right) block, select a requested
3100 		 * start block based on it.
3101 		 */
3102 		if (!isnullstartblock(ap->got.br_startblock)) {
3103 			/*
3104 			 * Calculate gap to start of next block.
3105 			 */
3106 			adjust = gotdiff = ap->got.br_startoff - ap->offset;
3107 			/*
3108 			 * Figure the startblock based on the next block's
3109 			 * start and the gap size.
3110 			 */
3111 			gotbno = ap->got.br_startblock;
3112 			/*
3113 			 * Heuristic!
3114 			 * If the gap is large relative to the piece we're
3115 			 * allocating, or using it gives us an invalid block
3116 			 * number, then just use the start of the next block
3117 			 * offset by our length.
3118 			 */
3119 			if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3120 			    xfs_bmap_adjacent_valid(ap, gotbno - gotdiff,
3121 					gotbno))
3122 				gotbno -= adjust;
3123 			else if (xfs_bmap_adjacent_valid(ap, gotbno - ap->length,
3124 					gotbno)) {
3125 				gotbno -= ap->length;
3126 				gotdiff += adjust - ap->length;
3127 			} else
3128 				gotdiff += adjust;
3129 		}
3130 		/*
3131 		 * No next block, just default.
3132 		 */
3133 		else
3134 			gotbno = NULLFSBLOCK;
3135 		/*
3136 		 * If both valid, pick the better one, else the only good
3137 		 * one, else ap->blkno is already set (to 0 or the inode block).
3138 		 */
3139 		if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) {
3140 			ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3141 			return true;
3142 		}
3143 		if (prevbno != NULLFSBLOCK) {
3144 			ap->blkno = prevbno;
3145 			return true;
3146 		}
3147 		if (gotbno != NULLFSBLOCK) {
3148 			ap->blkno = gotbno;
3149 			return true;
3150 		}
3151 	}
3152 
3153 	return false;
3154 }
3155 
3156 int
xfs_bmap_longest_free_extent(struct xfs_perag * pag,struct xfs_trans * tp,xfs_extlen_t * blen)3157 xfs_bmap_longest_free_extent(
3158 	struct xfs_perag	*pag,
3159 	struct xfs_trans	*tp,
3160 	xfs_extlen_t		*blen)
3161 {
3162 	xfs_extlen_t		longest;
3163 	int			error = 0;
3164 
3165 	if (!xfs_perag_initialised_agf(pag)) {
3166 		error = xfs_alloc_read_agf(pag, tp, XFS_ALLOC_FLAG_TRYLOCK,
3167 				NULL);
3168 		if (error)
3169 			return error;
3170 	}
3171 
3172 	longest = xfs_alloc_longest_free_extent(pag,
3173 				xfs_alloc_min_freelist(pag_mount(pag), pag),
3174 				xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
3175 	if (*blen < longest)
3176 		*blen = longest;
3177 
3178 	return 0;
3179 }
3180 
3181 static xfs_extlen_t
xfs_bmap_select_minlen(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,xfs_extlen_t blen)3182 xfs_bmap_select_minlen(
3183 	struct xfs_bmalloca	*ap,
3184 	struct xfs_alloc_arg	*args,
3185 	xfs_extlen_t		blen)
3186 {
3187 
3188 	/*
3189 	 * Since we used XFS_ALLOC_FLAG_TRYLOCK in _longest_free_extent(), it is
3190 	 * possible that there is enough contiguous free space for this request.
3191 	 */
3192 	if (blen < ap->minlen)
3193 		return ap->minlen;
3194 
3195 	/*
3196 	 * If the best seen length is less than the request length,
3197 	 * use the best as the minimum, otherwise we've got the maxlen we
3198 	 * were asked for.
3199 	 */
3200 	if (blen < args->maxlen)
3201 		return blen;
3202 	return args->maxlen;
3203 }
3204 
3205 static int
xfs_bmap_btalloc_select_lengths(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,xfs_extlen_t * blen)3206 xfs_bmap_btalloc_select_lengths(
3207 	struct xfs_bmalloca	*ap,
3208 	struct xfs_alloc_arg	*args,
3209 	xfs_extlen_t		*blen)
3210 {
3211 	struct xfs_mount	*mp = args->mp;
3212 	struct xfs_perag	*pag;
3213 	xfs_agnumber_t		agno, startag;
3214 	int			error = 0;
3215 
3216 	if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
3217 		args->total = ap->minlen;
3218 		args->minlen = ap->minlen;
3219 		return 0;
3220 	}
3221 
3222 	args->total = ap->total;
3223 	startag = XFS_FSB_TO_AGNO(mp, ap->blkno);
3224 	if (startag == NULLAGNUMBER)
3225 		startag = 0;
3226 
3227 	*blen = 0;
3228 	for_each_perag_wrap(mp, startag, agno, pag) {
3229 		error = xfs_bmap_longest_free_extent(pag, args->tp, blen);
3230 		if (error && error != -EAGAIN)
3231 			break;
3232 		error = 0;
3233 		if (*blen >= args->maxlen)
3234 			break;
3235 	}
3236 	if (pag)
3237 		xfs_perag_rele(pag);
3238 
3239 	args->minlen = xfs_bmap_select_minlen(ap, args, *blen);
3240 	return error;
3241 }
3242 
3243 /* Update all inode and quota accounting for the allocation we just did. */
3244 void
xfs_bmap_alloc_account(struct xfs_bmalloca * ap)3245 xfs_bmap_alloc_account(
3246 	struct xfs_bmalloca	*ap)
3247 {
3248 	bool			isrt = XFS_IS_REALTIME_INODE(ap->ip) &&
3249 					!(ap->flags & XFS_BMAPI_ATTRFORK);
3250 	uint			fld;
3251 
3252 	if (ap->flags & XFS_BMAPI_COWFORK) {
3253 		/*
3254 		 * COW fork blocks are in-core only and thus are treated as
3255 		 * in-core quota reservation (like delalloc blocks) even when
3256 		 * converted to real blocks. The quota reservation is not
3257 		 * accounted to disk until blocks are remapped to the data
3258 		 * fork. So if these blocks were previously delalloc, we
3259 		 * already have quota reservation and there's nothing to do
3260 		 * yet.
3261 		 */
3262 		if (ap->wasdel) {
3263 			xfs_mod_delalloc(ap->ip, -(int64_t)ap->length, 0);
3264 			return;
3265 		}
3266 
3267 		/*
3268 		 * Otherwise, we've allocated blocks in a hole. The transaction
3269 		 * has acquired in-core quota reservation for this extent.
3270 		 * Rather than account these as real blocks, however, we reduce
3271 		 * the transaction quota reservation based on the allocation.
3272 		 * This essentially transfers the transaction quota reservation
3273 		 * to that of a delalloc extent.
3274 		 */
3275 		ap->ip->i_delayed_blks += ap->length;
3276 		xfs_trans_mod_dquot_byino(ap->tp, ap->ip, isrt ?
3277 				XFS_TRANS_DQ_RES_RTBLKS : XFS_TRANS_DQ_RES_BLKS,
3278 				-(long)ap->length);
3279 		return;
3280 	}
3281 
3282 	/* data/attr fork only */
3283 	ap->ip->i_nblocks += ap->length;
3284 	xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3285 	if (ap->wasdel) {
3286 		ap->ip->i_delayed_blks -= ap->length;
3287 		xfs_mod_delalloc(ap->ip, -(int64_t)ap->length, 0);
3288 		fld = isrt ? XFS_TRANS_DQ_DELRTBCOUNT : XFS_TRANS_DQ_DELBCOUNT;
3289 	} else {
3290 		fld = isrt ? XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
3291 	}
3292 
3293 	xfs_trans_mod_dquot_byino(ap->tp, ap->ip, fld, ap->length);
3294 }
3295 
3296 static int
xfs_bmap_compute_alignments(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args)3297 xfs_bmap_compute_alignments(
3298 	struct xfs_bmalloca	*ap,
3299 	struct xfs_alloc_arg	*args)
3300 {
3301 	struct xfs_mount	*mp = args->mp;
3302 	xfs_extlen_t		align = 0; /* minimum allocation alignment */
3303 	int			stripe_align = 0;
3304 
3305 	/* stripe alignment for allocation is determined by mount parameters */
3306 	if (mp->m_swidth && xfs_has_swalloc(mp))
3307 		stripe_align = mp->m_swidth;
3308 	else if (mp->m_dalign)
3309 		stripe_align = mp->m_dalign;
3310 
3311 	if (ap->flags & XFS_BMAPI_COWFORK)
3312 		align = xfs_get_cowextsz_hint(ap->ip);
3313 	else if (ap->datatype & XFS_ALLOC_USERDATA)
3314 		align = xfs_get_extsz_hint(ap->ip);
3315 	if (align) {
3316 		if (xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 0,
3317 					ap->eof, 0, ap->conv, &ap->offset,
3318 					&ap->length))
3319 			ASSERT(0);
3320 		ASSERT(ap->length);
3321 	}
3322 
3323 	/* apply extent size hints if obtained earlier */
3324 	if (align) {
3325 		args->prod = align;
3326 		div_u64_rem(ap->offset, args->prod, &args->mod);
3327 		if (args->mod)
3328 			args->mod = args->prod - args->mod;
3329 	} else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3330 		args->prod = 1;
3331 		args->mod = 0;
3332 	} else {
3333 		args->prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3334 		div_u64_rem(ap->offset, args->prod, &args->mod);
3335 		if (args->mod)
3336 			args->mod = args->prod - args->mod;
3337 	}
3338 
3339 	return stripe_align;
3340 }
3341 
3342 static void
xfs_bmap_process_allocated_extent(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,xfs_fileoff_t orig_offset,xfs_extlen_t orig_length)3343 xfs_bmap_process_allocated_extent(
3344 	struct xfs_bmalloca	*ap,
3345 	struct xfs_alloc_arg	*args,
3346 	xfs_fileoff_t		orig_offset,
3347 	xfs_extlen_t		orig_length)
3348 {
3349 	ap->blkno = args->fsbno;
3350 	ap->length = args->len;
3351 	/*
3352 	 * If the extent size hint is active, we tried to round the
3353 	 * caller's allocation request offset down to extsz and the
3354 	 * length up to another extsz boundary.  If we found a free
3355 	 * extent we mapped it in starting at this new offset.  If the
3356 	 * newly mapped space isn't long enough to cover any of the
3357 	 * range of offsets that was originally requested, move the
3358 	 * mapping up so that we can fill as much of the caller's
3359 	 * original request as possible.  Free space is apparently
3360 	 * very fragmented so we're unlikely to be able to satisfy the
3361 	 * hints anyway.
3362 	 */
3363 	if (ap->length <= orig_length)
3364 		ap->offset = orig_offset;
3365 	else if (ap->offset + ap->length < orig_offset + orig_length)
3366 		ap->offset = orig_offset + orig_length - ap->length;
3367 	xfs_bmap_alloc_account(ap);
3368 }
3369 
3370 static int
xfs_bmap_exact_minlen_extent_alloc(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args)3371 xfs_bmap_exact_minlen_extent_alloc(
3372 	struct xfs_bmalloca	*ap,
3373 	struct xfs_alloc_arg	*args)
3374 {
3375 	if (ap->minlen != 1) {
3376 		args->fsbno = NULLFSBLOCK;
3377 		return 0;
3378 	}
3379 
3380 	args->alloc_minlen_only = 1;
3381 	args->minlen = args->maxlen = ap->minlen;
3382 	args->total = ap->total;
3383 
3384 	/*
3385 	 * Unlike the longest extent available in an AG, we don't track
3386 	 * the length of an AG's shortest extent.
3387 	 * XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT is a debug only knob and
3388 	 * hence we can afford to start traversing from the 0th AG since
3389 	 * we need not be concerned about a drop in performance in
3390 	 * "debug only" code paths.
3391 	 */
3392 	ap->blkno = XFS_AGB_TO_FSB(ap->ip->i_mount, 0, 0);
3393 
3394 	/*
3395 	 * Call xfs_bmap_btalloc_low_space here as it first does a "normal" AG
3396 	 * iteration and then drops args->total to args->minlen, which might be
3397 	 * required to find an allocation for the transaction reservation when
3398 	 * the file system is very full.
3399 	 */
3400 	return xfs_bmap_btalloc_low_space(ap, args);
3401 }
3402 
3403 /*
3404  * If we are not low on available data blocks and we are allocating at
3405  * EOF, optimise allocation for contiguous file extension and/or stripe
3406  * alignment of the new extent.
3407  *
3408  * NOTE: ap->aeof is only set if the allocation length is >= the
3409  * stripe unit and the allocation offset is at the end of file.
3410  */
3411 static int
xfs_bmap_btalloc_at_eof(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,xfs_extlen_t blen,int stripe_align,bool ag_only)3412 xfs_bmap_btalloc_at_eof(
3413 	struct xfs_bmalloca	*ap,
3414 	struct xfs_alloc_arg	*args,
3415 	xfs_extlen_t		blen,
3416 	int			stripe_align,
3417 	bool			ag_only)
3418 {
3419 	struct xfs_mount	*mp = args->mp;
3420 	struct xfs_perag	*caller_pag = args->pag;
3421 	int			error;
3422 
3423 	/*
3424 	 * If there are already extents in the file, and xfs_bmap_adjacent() has
3425 	 * given a better blkno, try an exact EOF block allocation to extend the
3426 	 * file as a contiguous extent. If that fails, or it's the first
3427 	 * allocation in a file, just try for a stripe aligned allocation.
3428 	 */
3429 	if (ap->eof) {
3430 		xfs_extlen_t	nextminlen = 0;
3431 
3432 		/*
3433 		 * Compute the minlen+alignment for the next case.  Set slop so
3434 		 * that the value of minlen+alignment+slop doesn't go up between
3435 		 * the calls.
3436 		 */
3437 		args->alignment = 1;
3438 		if (blen > stripe_align && blen <= args->maxlen)
3439 			nextminlen = blen - stripe_align;
3440 		else
3441 			nextminlen = args->minlen;
3442 		if (nextminlen + stripe_align > args->minlen + 1)
3443 			args->minalignslop = nextminlen + stripe_align -
3444 					args->minlen - 1;
3445 		else
3446 			args->minalignslop = 0;
3447 
3448 		if (!caller_pag)
3449 			args->pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, ap->blkno));
3450 		error = xfs_alloc_vextent_exact_bno(args, ap->blkno);
3451 		if (!caller_pag) {
3452 			xfs_perag_put(args->pag);
3453 			args->pag = NULL;
3454 		}
3455 		if (error)
3456 			return error;
3457 
3458 		if (args->fsbno != NULLFSBLOCK)
3459 			return 0;
3460 		/*
3461 		 * Exact allocation failed. Reset to try an aligned allocation
3462 		 * according to the original allocation specification.
3463 		 */
3464 		args->alignment = stripe_align;
3465 		args->minlen = nextminlen;
3466 		args->minalignslop = 0;
3467 	} else {
3468 		/*
3469 		 * Adjust minlen to try and preserve alignment if we
3470 		 * can't guarantee an aligned maxlen extent.
3471 		 */
3472 		args->alignment = stripe_align;
3473 		if (blen > args->alignment &&
3474 		    blen <= args->maxlen + args->alignment)
3475 			args->minlen = blen - args->alignment;
3476 		args->minalignslop = 0;
3477 	}
3478 
3479 	if (ag_only) {
3480 		error = xfs_alloc_vextent_near_bno(args, ap->blkno);
3481 	} else {
3482 		args->pag = NULL;
3483 		error = xfs_alloc_vextent_start_ag(args, ap->blkno);
3484 		ASSERT(args->pag == NULL);
3485 		args->pag = caller_pag;
3486 	}
3487 	if (error)
3488 		return error;
3489 
3490 	if (args->fsbno != NULLFSBLOCK)
3491 		return 0;
3492 
3493 	/*
3494 	 * Allocation failed, so turn return the allocation args to their
3495 	 * original non-aligned state so the caller can proceed on allocation
3496 	 * failure as if this function was never called.
3497 	 */
3498 	args->alignment = 1;
3499 	return 0;
3500 }
3501 
3502 /*
3503  * We have failed multiple allocation attempts so now are in a low space
3504  * allocation situation. Try a locality first full filesystem minimum length
3505  * allocation whilst still maintaining necessary total block reservation
3506  * requirements.
3507  *
3508  * If that fails, we are now critically low on space, so perform a last resort
3509  * allocation attempt: no reserve, no locality, blocking, minimum length, full
3510  * filesystem free space scan. We also indicate to future allocations in this
3511  * transaction that we are critically low on space so they don't waste time on
3512  * allocation modes that are unlikely to succeed.
3513  */
3514 int
xfs_bmap_btalloc_low_space(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args)3515 xfs_bmap_btalloc_low_space(
3516 	struct xfs_bmalloca	*ap,
3517 	struct xfs_alloc_arg	*args)
3518 {
3519 	int			error;
3520 
3521 	if (args->minlen > ap->minlen) {
3522 		args->minlen = ap->minlen;
3523 		error = xfs_alloc_vextent_start_ag(args, ap->blkno);
3524 		if (error || args->fsbno != NULLFSBLOCK)
3525 			return error;
3526 	}
3527 
3528 	/* Last ditch attempt before failure is declared. */
3529 	args->total = ap->minlen;
3530 	error = xfs_alloc_vextent_first_ag(args, 0);
3531 	if (error)
3532 		return error;
3533 	ap->tp->t_flags |= XFS_TRANS_LOWMODE;
3534 	return 0;
3535 }
3536 
3537 static int
xfs_bmap_btalloc_filestreams(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,int stripe_align)3538 xfs_bmap_btalloc_filestreams(
3539 	struct xfs_bmalloca	*ap,
3540 	struct xfs_alloc_arg	*args,
3541 	int			stripe_align)
3542 {
3543 	xfs_extlen_t		blen = 0;
3544 	int			error = 0;
3545 
3546 
3547 	error = xfs_filestream_select_ag(ap, args, &blen);
3548 	if (error)
3549 		return error;
3550 	ASSERT(args->pag);
3551 
3552 	/*
3553 	 * If we are in low space mode, then optimal allocation will fail so
3554 	 * prepare for minimal allocation and jump to the low space algorithm
3555 	 * immediately.
3556 	 */
3557 	if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
3558 		args->minlen = ap->minlen;
3559 		ASSERT(args->fsbno == NULLFSBLOCK);
3560 		goto out_low_space;
3561 	}
3562 
3563 	args->minlen = xfs_bmap_select_minlen(ap, args, blen);
3564 	if (ap->aeof)
3565 		error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align,
3566 				true);
3567 
3568 	if (!error && args->fsbno == NULLFSBLOCK)
3569 		error = xfs_alloc_vextent_near_bno(args, ap->blkno);
3570 
3571 out_low_space:
3572 	/*
3573 	 * We are now done with the perag reference for the filestreams
3574 	 * association provided by xfs_filestream_select_ag(). Release it now as
3575 	 * we've either succeeded, had a fatal error or we are out of space and
3576 	 * need to do a full filesystem scan for free space which will take it's
3577 	 * own references.
3578 	 */
3579 	xfs_perag_rele(args->pag);
3580 	args->pag = NULL;
3581 	if (error || args->fsbno != NULLFSBLOCK)
3582 		return error;
3583 
3584 	return xfs_bmap_btalloc_low_space(ap, args);
3585 }
3586 
3587 static int
xfs_bmap_btalloc_best_length(struct xfs_bmalloca * ap,struct xfs_alloc_arg * args,int stripe_align)3588 xfs_bmap_btalloc_best_length(
3589 	struct xfs_bmalloca	*ap,
3590 	struct xfs_alloc_arg	*args,
3591 	int			stripe_align)
3592 {
3593 	xfs_extlen_t		blen = 0;
3594 	int			error;
3595 
3596 	ap->blkno = XFS_INO_TO_FSB(args->mp, ap->ip->i_ino);
3597 	if (!xfs_bmap_adjacent(ap))
3598 		ap->eof = false;
3599 
3600 	/*
3601 	 * Search for an allocation group with a single extent large enough for
3602 	 * the request.  If one isn't found, then adjust the minimum allocation
3603 	 * size to the largest space found.
3604 	 */
3605 	error = xfs_bmap_btalloc_select_lengths(ap, args, &blen);
3606 	if (error)
3607 		return error;
3608 
3609 	/*
3610 	 * Don't attempt optimal EOF allocation if previous allocations barely
3611 	 * succeeded due to being near ENOSPC. It is highly unlikely we'll get
3612 	 * optimal or even aligned allocations in this case, so don't waste time
3613 	 * trying.
3614 	 */
3615 	if (ap->aeof && !(ap->tp->t_flags & XFS_TRANS_LOWMODE)) {
3616 		error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align,
3617 				false);
3618 		if (error || args->fsbno != NULLFSBLOCK)
3619 			return error;
3620 	}
3621 
3622 	error = xfs_alloc_vextent_start_ag(args, ap->blkno);
3623 	if (error || args->fsbno != NULLFSBLOCK)
3624 		return error;
3625 
3626 	return xfs_bmap_btalloc_low_space(ap, args);
3627 }
3628 
3629 static int
xfs_bmap_btalloc(struct xfs_bmalloca * ap)3630 xfs_bmap_btalloc(
3631 	struct xfs_bmalloca	*ap)
3632 {
3633 	struct xfs_mount	*mp = ap->ip->i_mount;
3634 	struct xfs_alloc_arg	args = {
3635 		.tp		= ap->tp,
3636 		.mp		= mp,
3637 		.fsbno		= NULLFSBLOCK,
3638 		.oinfo		= XFS_RMAP_OINFO_SKIP_UPDATE,
3639 		.minleft	= ap->minleft,
3640 		.wasdel		= ap->wasdel,
3641 		.resv		= XFS_AG_RESV_NONE,
3642 		.datatype	= ap->datatype,
3643 		.alignment	= 1,
3644 		.minalignslop	= 0,
3645 	};
3646 	xfs_fileoff_t		orig_offset;
3647 	xfs_extlen_t		orig_length;
3648 	int			error;
3649 	int			stripe_align;
3650 
3651 	ASSERT(ap->length);
3652 	orig_offset = ap->offset;
3653 	orig_length = ap->length;
3654 
3655 	stripe_align = xfs_bmap_compute_alignments(ap, &args);
3656 
3657 	/* Trim the allocation back to the maximum an AG can fit. */
3658 	args.maxlen = min(ap->length, mp->m_ag_max_usable);
3659 
3660 	if (unlikely(XFS_TEST_ERROR(false, mp,
3661 			XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT)))
3662 		error = xfs_bmap_exact_minlen_extent_alloc(ap, &args);
3663 	else if ((ap->datatype & XFS_ALLOC_USERDATA) &&
3664 			xfs_inode_is_filestream(ap->ip))
3665 		error = xfs_bmap_btalloc_filestreams(ap, &args, stripe_align);
3666 	else
3667 		error = xfs_bmap_btalloc_best_length(ap, &args, stripe_align);
3668 	if (error)
3669 		return error;
3670 
3671 	if (args.fsbno != NULLFSBLOCK) {
3672 		xfs_bmap_process_allocated_extent(ap, &args, orig_offset,
3673 			orig_length);
3674 	} else {
3675 		ap->blkno = NULLFSBLOCK;
3676 		ap->length = 0;
3677 	}
3678 	return 0;
3679 }
3680 
3681 /* Trim extent to fit a logical block range. */
3682 void
xfs_trim_extent(struct xfs_bmbt_irec * irec,xfs_fileoff_t bno,xfs_filblks_t len)3683 xfs_trim_extent(
3684 	struct xfs_bmbt_irec	*irec,
3685 	xfs_fileoff_t		bno,
3686 	xfs_filblks_t		len)
3687 {
3688 	xfs_fileoff_t		distance;
3689 	xfs_fileoff_t		end = bno + len;
3690 
3691 	if (irec->br_startoff + irec->br_blockcount <= bno ||
3692 	    irec->br_startoff >= end) {
3693 		irec->br_blockcount = 0;
3694 		return;
3695 	}
3696 
3697 	if (irec->br_startoff < bno) {
3698 		distance = bno - irec->br_startoff;
3699 		if (isnullstartblock(irec->br_startblock))
3700 			irec->br_startblock = DELAYSTARTBLOCK;
3701 		if (irec->br_startblock != DELAYSTARTBLOCK &&
3702 		    irec->br_startblock != HOLESTARTBLOCK)
3703 			irec->br_startblock += distance;
3704 		irec->br_startoff += distance;
3705 		irec->br_blockcount -= distance;
3706 	}
3707 
3708 	if (end < irec->br_startoff + irec->br_blockcount) {
3709 		distance = irec->br_startoff + irec->br_blockcount - end;
3710 		irec->br_blockcount -= distance;
3711 	}
3712 }
3713 
3714 /*
3715  * Trim the returned map to the required bounds
3716  */
3717 STATIC void
xfs_bmapi_trim_map(struct xfs_bmbt_irec * mval,struct xfs_bmbt_irec * got,xfs_fileoff_t * bno,xfs_filblks_t len,xfs_fileoff_t obno,xfs_fileoff_t end,int n,uint32_t flags)3718 xfs_bmapi_trim_map(
3719 	struct xfs_bmbt_irec	*mval,
3720 	struct xfs_bmbt_irec	*got,
3721 	xfs_fileoff_t		*bno,
3722 	xfs_filblks_t		len,
3723 	xfs_fileoff_t		obno,
3724 	xfs_fileoff_t		end,
3725 	int			n,
3726 	uint32_t		flags)
3727 {
3728 	if ((flags & XFS_BMAPI_ENTIRE) ||
3729 	    got->br_startoff + got->br_blockcount <= obno) {
3730 		*mval = *got;
3731 		if (isnullstartblock(got->br_startblock))
3732 			mval->br_startblock = DELAYSTARTBLOCK;
3733 		return;
3734 	}
3735 
3736 	if (obno > *bno)
3737 		*bno = obno;
3738 	ASSERT((*bno >= obno) || (n == 0));
3739 	ASSERT(*bno < end);
3740 	mval->br_startoff = *bno;
3741 	if (isnullstartblock(got->br_startblock))
3742 		mval->br_startblock = DELAYSTARTBLOCK;
3743 	else
3744 		mval->br_startblock = got->br_startblock +
3745 					(*bno - got->br_startoff);
3746 	/*
3747 	 * Return the minimum of what we got and what we asked for for
3748 	 * the length.  We can use the len variable here because it is
3749 	 * modified below and we could have been there before coming
3750 	 * here if the first part of the allocation didn't overlap what
3751 	 * was asked for.
3752 	 */
3753 	mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3754 			got->br_blockcount - (*bno - got->br_startoff));
3755 	mval->br_state = got->br_state;
3756 	ASSERT(mval->br_blockcount <= len);
3757 	return;
3758 }
3759 
3760 /*
3761  * Update and validate the extent map to return
3762  */
3763 STATIC void
xfs_bmapi_update_map(struct xfs_bmbt_irec ** map,xfs_fileoff_t * bno,xfs_filblks_t * len,xfs_fileoff_t obno,xfs_fileoff_t end,int * n,uint32_t flags)3764 xfs_bmapi_update_map(
3765 	struct xfs_bmbt_irec	**map,
3766 	xfs_fileoff_t		*bno,
3767 	xfs_filblks_t		*len,
3768 	xfs_fileoff_t		obno,
3769 	xfs_fileoff_t		end,
3770 	int			*n,
3771 	uint32_t		flags)
3772 {
3773 	xfs_bmbt_irec_t	*mval = *map;
3774 
3775 	ASSERT((flags & XFS_BMAPI_ENTIRE) ||
3776 	       ((mval->br_startoff + mval->br_blockcount) <= end));
3777 	ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
3778 	       (mval->br_startoff < obno));
3779 
3780 	*bno = mval->br_startoff + mval->br_blockcount;
3781 	*len = end - *bno;
3782 	if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
3783 		/* update previous map with new information */
3784 		ASSERT(mval->br_startblock == mval[-1].br_startblock);
3785 		ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
3786 		ASSERT(mval->br_state == mval[-1].br_state);
3787 		mval[-1].br_blockcount = mval->br_blockcount;
3788 		mval[-1].br_state = mval->br_state;
3789 	} else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
3790 		   mval[-1].br_startblock != DELAYSTARTBLOCK &&
3791 		   mval[-1].br_startblock != HOLESTARTBLOCK &&
3792 		   mval->br_startblock == mval[-1].br_startblock +
3793 					  mval[-1].br_blockcount &&
3794 		   mval[-1].br_state == mval->br_state) {
3795 		ASSERT(mval->br_startoff ==
3796 		       mval[-1].br_startoff + mval[-1].br_blockcount);
3797 		mval[-1].br_blockcount += mval->br_blockcount;
3798 	} else if (*n > 0 &&
3799 		   mval->br_startblock == DELAYSTARTBLOCK &&
3800 		   mval[-1].br_startblock == DELAYSTARTBLOCK &&
3801 		   mval->br_startoff ==
3802 		   mval[-1].br_startoff + mval[-1].br_blockcount) {
3803 		mval[-1].br_blockcount += mval->br_blockcount;
3804 		mval[-1].br_state = mval->br_state;
3805 	} else if (!((*n == 0) &&
3806 		     ((mval->br_startoff + mval->br_blockcount) <=
3807 		      obno))) {
3808 		mval++;
3809 		(*n)++;
3810 	}
3811 	*map = mval;
3812 }
3813 
3814 /*
3815  * Map file blocks to filesystem blocks without allocation.
3816  */
3817 int
xfs_bmapi_read(struct xfs_inode * ip,xfs_fileoff_t bno,xfs_filblks_t len,struct xfs_bmbt_irec * mval,int * nmap,uint32_t flags)3818 xfs_bmapi_read(
3819 	struct xfs_inode	*ip,
3820 	xfs_fileoff_t		bno,
3821 	xfs_filblks_t		len,
3822 	struct xfs_bmbt_irec	*mval,
3823 	int			*nmap,
3824 	uint32_t		flags)
3825 {
3826 	struct xfs_mount	*mp = ip->i_mount;
3827 	int			whichfork = xfs_bmapi_whichfork(flags);
3828 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
3829 	struct xfs_bmbt_irec	got;
3830 	xfs_fileoff_t		obno;
3831 	xfs_fileoff_t		end;
3832 	struct xfs_iext_cursor	icur;
3833 	int			error;
3834 	bool			eof = false;
3835 	int			n = 0;
3836 
3837 	ASSERT(*nmap >= 1);
3838 	ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_ENTIRE)));
3839 	xfs_assert_ilocked(ip, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL);
3840 
3841 	if (WARN_ON_ONCE(!ifp)) {
3842 		xfs_bmap_mark_sick(ip, whichfork);
3843 		return -EFSCORRUPTED;
3844 	}
3845 
3846 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
3847 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
3848 		xfs_bmap_mark_sick(ip, whichfork);
3849 		return -EFSCORRUPTED;
3850 	}
3851 
3852 	if (xfs_is_shutdown(mp))
3853 		return -EIO;
3854 
3855 	XFS_STATS_INC(mp, xs_blk_mapr);
3856 
3857 	error = xfs_iread_extents(NULL, ip, whichfork);
3858 	if (error)
3859 		return error;
3860 
3861 	if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got))
3862 		eof = true;
3863 	end = bno + len;
3864 	obno = bno;
3865 
3866 	while (bno < end && n < *nmap) {
3867 		/* Reading past eof, act as though there's a hole up to end. */
3868 		if (eof)
3869 			got.br_startoff = end;
3870 		if (got.br_startoff > bno) {
3871 			/* Reading in a hole.  */
3872 			mval->br_startoff = bno;
3873 			mval->br_startblock = HOLESTARTBLOCK;
3874 			mval->br_blockcount =
3875 				XFS_FILBLKS_MIN(len, got.br_startoff - bno);
3876 			mval->br_state = XFS_EXT_NORM;
3877 			bno += mval->br_blockcount;
3878 			len -= mval->br_blockcount;
3879 			mval++;
3880 			n++;
3881 			continue;
3882 		}
3883 
3884 		/* set up the extent map to return. */
3885 		xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
3886 		xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
3887 
3888 		/* If we're done, stop now. */
3889 		if (bno >= end || n >= *nmap)
3890 			break;
3891 
3892 		/* Else go on to the next record. */
3893 		if (!xfs_iext_next_extent(ifp, &icur, &got))
3894 			eof = true;
3895 	}
3896 	*nmap = n;
3897 	return 0;
3898 }
3899 
3900 static int
xfs_bmapi_allocate(struct xfs_bmalloca * bma)3901 xfs_bmapi_allocate(
3902 	struct xfs_bmalloca	*bma)
3903 {
3904 	struct xfs_mount	*mp = bma->ip->i_mount;
3905 	int			whichfork = xfs_bmapi_whichfork(bma->flags);
3906 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
3907 	int			error;
3908 
3909 	ASSERT(bma->length > 0);
3910 	ASSERT(bma->length <= XFS_MAX_BMBT_EXTLEN);
3911 
3912 	if (bma->flags & XFS_BMAPI_CONTIG)
3913 		bma->minlen = bma->length;
3914 	else
3915 		bma->minlen = 1;
3916 
3917 	if (!(bma->flags & XFS_BMAPI_METADATA)) {
3918 		/*
3919 		 * For the data and COW fork, the first data in the file is
3920 		 * treated differently to all other allocations. For the
3921 		 * attribute fork, we only need to ensure the allocated range
3922 		 * is not on the busy list.
3923 		 */
3924 		bma->datatype = XFS_ALLOC_NOBUSY;
3925 		if (whichfork == XFS_DATA_FORK || whichfork == XFS_COW_FORK) {
3926 			bma->datatype |= XFS_ALLOC_USERDATA;
3927 			if (bma->offset == 0)
3928 				bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
3929 
3930 			if (mp->m_dalign && bma->length >= mp->m_dalign) {
3931 				error = xfs_bmap_isaeof(bma, whichfork);
3932 				if (error)
3933 					return error;
3934 			}
3935 		}
3936 	}
3937 
3938 	if ((bma->datatype & XFS_ALLOC_USERDATA) &&
3939 	    XFS_IS_REALTIME_INODE(bma->ip))
3940 		error = xfs_bmap_rtalloc(bma);
3941 	else
3942 		error = xfs_bmap_btalloc(bma);
3943 	if (error)
3944 		return error;
3945 	if (bma->blkno == NULLFSBLOCK)
3946 		return -ENOSPC;
3947 
3948 	if (WARN_ON_ONCE(!xfs_valid_startblock(bma->ip, bma->blkno))) {
3949 		xfs_bmap_mark_sick(bma->ip, whichfork);
3950 		return -EFSCORRUPTED;
3951 	}
3952 
3953 	if (bma->flags & XFS_BMAPI_ZERO) {
3954 		error = xfs_zero_extent(bma->ip, bma->blkno, bma->length);
3955 		if (error)
3956 			return error;
3957 	}
3958 
3959 	if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur)
3960 		bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
3961 	/*
3962 	 * Bump the number of extents we've allocated
3963 	 * in this call.
3964 	 */
3965 	bma->nallocs++;
3966 
3967 	if (bma->cur && bma->wasdel)
3968 		bma->cur->bc_flags |= XFS_BTREE_BMBT_WASDEL;
3969 
3970 	bma->got.br_startoff = bma->offset;
3971 	bma->got.br_startblock = bma->blkno;
3972 	bma->got.br_blockcount = bma->length;
3973 	bma->got.br_state = XFS_EXT_NORM;
3974 
3975 	if (bma->flags & XFS_BMAPI_PREALLOC)
3976 		bma->got.br_state = XFS_EXT_UNWRITTEN;
3977 
3978 	if (bma->wasdel)
3979 		error = xfs_bmap_add_extent_delay_real(bma, whichfork);
3980 	else
3981 		error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
3982 				whichfork, &bma->icur, &bma->cur, &bma->got,
3983 				&bma->logflags, bma->flags);
3984 	if (error)
3985 		return error;
3986 
3987 	/*
3988 	 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
3989 	 * or xfs_bmap_add_extent_hole_real might have merged it into one of
3990 	 * the neighbouring ones.
3991 	 */
3992 	xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
3993 
3994 	ASSERT(bma->got.br_startoff <= bma->offset);
3995 	ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
3996 	       bma->offset + bma->length);
3997 	ASSERT(bma->got.br_state == XFS_EXT_NORM ||
3998 	       bma->got.br_state == XFS_EXT_UNWRITTEN);
3999 	return 0;
4000 }
4001 
4002 STATIC int
xfs_bmapi_convert_unwritten(struct xfs_bmalloca * bma,struct xfs_bmbt_irec * mval,xfs_filblks_t len,uint32_t flags)4003 xfs_bmapi_convert_unwritten(
4004 	struct xfs_bmalloca	*bma,
4005 	struct xfs_bmbt_irec	*mval,
4006 	xfs_filblks_t		len,
4007 	uint32_t		flags)
4008 {
4009 	int			whichfork = xfs_bmapi_whichfork(flags);
4010 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
4011 	int			tmp_logflags = 0;
4012 	int			error;
4013 
4014 	/* check if we need to do unwritten->real conversion */
4015 	if (mval->br_state == XFS_EXT_UNWRITTEN &&
4016 	    (flags & XFS_BMAPI_PREALLOC))
4017 		return 0;
4018 
4019 	/* check if we need to do real->unwritten conversion */
4020 	if (mval->br_state == XFS_EXT_NORM &&
4021 	    (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4022 			(XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4023 		return 0;
4024 
4025 	/*
4026 	 * Modify (by adding) the state flag, if writing.
4027 	 */
4028 	ASSERT(mval->br_blockcount <= len);
4029 	if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur) {
4030 		bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4031 					bma->ip, whichfork);
4032 	}
4033 	mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4034 				? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4035 
4036 	/*
4037 	 * Before insertion into the bmbt, zero the range being converted
4038 	 * if required.
4039 	 */
4040 	if (flags & XFS_BMAPI_ZERO) {
4041 		error = xfs_zero_extent(bma->ip, mval->br_startblock,
4042 					mval->br_blockcount);
4043 		if (error)
4044 			return error;
4045 	}
4046 
4047 	error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
4048 			&bma->icur, &bma->cur, mval, &tmp_logflags);
4049 	/*
4050 	 * Log the inode core unconditionally in the unwritten extent conversion
4051 	 * path because the conversion might not have done so (e.g., if the
4052 	 * extent count hasn't changed). We need to make sure the inode is dirty
4053 	 * in the transaction for the sake of fsync(), even if nothing has
4054 	 * changed, because fsync() will not force the log for this transaction
4055 	 * unless it sees the inode pinned.
4056 	 *
4057 	 * Note: If we're only converting cow fork extents, there aren't
4058 	 * any on-disk updates to make, so we don't need to log anything.
4059 	 */
4060 	if (whichfork != XFS_COW_FORK)
4061 		bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4062 	if (error)
4063 		return error;
4064 
4065 	/*
4066 	 * Update our extent pointer, given that
4067 	 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4068 	 * of the neighbouring ones.
4069 	 */
4070 	xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4071 
4072 	/*
4073 	 * We may have combined previously unwritten space with written space,
4074 	 * so generate another request.
4075 	 */
4076 	if (mval->br_blockcount < len)
4077 		return -EAGAIN;
4078 	return 0;
4079 }
4080 
4081 xfs_extlen_t
xfs_bmapi_minleft(struct xfs_trans * tp,struct xfs_inode * ip,int fork)4082 xfs_bmapi_minleft(
4083 	struct xfs_trans	*tp,
4084 	struct xfs_inode	*ip,
4085 	int			fork)
4086 {
4087 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, fork);
4088 
4089 	if (tp && tp->t_highest_agno != NULLAGNUMBER)
4090 		return 0;
4091 	if (ifp->if_format != XFS_DINODE_FMT_BTREE)
4092 		return 1;
4093 	return be16_to_cpu(ifp->if_broot->bb_level) + 1;
4094 }
4095 
4096 /*
4097  * Log whatever the flags say, even if error.  Otherwise we might miss detecting
4098  * a case where the data is changed, there's an error, and it's not logged so we
4099  * don't shutdown when we should.  Don't bother logging extents/btree changes if
4100  * we converted to the other format.
4101  */
4102 static void
xfs_bmapi_finish(struct xfs_bmalloca * bma,int whichfork,int error)4103 xfs_bmapi_finish(
4104 	struct xfs_bmalloca	*bma,
4105 	int			whichfork,
4106 	int			error)
4107 {
4108 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
4109 
4110 	if ((bma->logflags & xfs_ilog_fext(whichfork)) &&
4111 	    ifp->if_format != XFS_DINODE_FMT_EXTENTS)
4112 		bma->logflags &= ~xfs_ilog_fext(whichfork);
4113 	else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) &&
4114 		 ifp->if_format != XFS_DINODE_FMT_BTREE)
4115 		bma->logflags &= ~xfs_ilog_fbroot(whichfork);
4116 
4117 	if (bma->logflags)
4118 		xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags);
4119 	if (bma->cur)
4120 		xfs_btree_del_cursor(bma->cur, error);
4121 }
4122 
4123 /*
4124  * Map file blocks to filesystem blocks, and allocate blocks or convert the
4125  * extent state if necessary.  Details behaviour is controlled by the flags
4126  * parameter.  Only allocates blocks from a single allocation group, to avoid
4127  * locking problems.
4128  *
4129  * Returns 0 on success and places the extent mappings in mval.  nmaps is used
4130  * as an input/output parameter where the caller specifies the maximum number
4131  * of mappings that may be returned and xfs_bmapi_write passes back the number
4132  * of mappings (including existing mappings) it found.
4133  *
4134  * Returns a negative error code on failure, including -ENOSPC when it could not
4135  * allocate any blocks and -ENOSR when it did allocate blocks to convert a
4136  * delalloc range, but those blocks were before the passed in range.
4137  */
4138 int
xfs_bmapi_write(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t bno,xfs_filblks_t len,uint32_t flags,xfs_extlen_t total,struct xfs_bmbt_irec * mval,int * nmap)4139 xfs_bmapi_write(
4140 	struct xfs_trans	*tp,		/* transaction pointer */
4141 	struct xfs_inode	*ip,		/* incore inode */
4142 	xfs_fileoff_t		bno,		/* starting file offs. mapped */
4143 	xfs_filblks_t		len,		/* length to map in file */
4144 	uint32_t		flags,		/* XFS_BMAPI_... */
4145 	xfs_extlen_t		total,		/* total blocks needed */
4146 	struct xfs_bmbt_irec	*mval,		/* output: map values */
4147 	int			*nmap)		/* i/o: mval size/count */
4148 {
4149 	struct xfs_bmalloca	bma = {
4150 		.tp		= tp,
4151 		.ip		= ip,
4152 		.total		= total,
4153 	};
4154 	struct xfs_mount	*mp = ip->i_mount;
4155 	int			whichfork = xfs_bmapi_whichfork(flags);
4156 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
4157 	xfs_fileoff_t		end;		/* end of mapped file region */
4158 	bool			eof = false;	/* after the end of extents */
4159 	int			error;		/* error return */
4160 	int			n;		/* current extent index */
4161 	xfs_fileoff_t		obno;		/* old block number (offset) */
4162 
4163 #ifdef DEBUG
4164 	xfs_fileoff_t		orig_bno;	/* original block number value */
4165 	int			orig_flags;	/* original flags arg value */
4166 	xfs_filblks_t		orig_len;	/* original value of len arg */
4167 	struct xfs_bmbt_irec	*orig_mval;	/* original value of mval */
4168 	int			orig_nmap;	/* original value of *nmap */
4169 
4170 	orig_bno = bno;
4171 	orig_len = len;
4172 	orig_flags = flags;
4173 	orig_mval = mval;
4174 	orig_nmap = *nmap;
4175 #endif
4176 
4177 	ASSERT(*nmap >= 1);
4178 	ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4179 	ASSERT(tp != NULL);
4180 	ASSERT(len > 0);
4181 	ASSERT(ifp->if_format != XFS_DINODE_FMT_LOCAL);
4182 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
4183 	ASSERT(!(flags & XFS_BMAPI_REMAP));
4184 
4185 	/* zeroing is for currently only for data extents, not metadata */
4186 	ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4187 			(XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4188 	/*
4189 	 * we can allocate unwritten extents or pre-zero allocated blocks,
4190 	 * but it makes no sense to do both at once. This would result in
4191 	 * zeroing the unwritten extent twice, but it still being an
4192 	 * unwritten extent....
4193 	 */
4194 	ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4195 			(XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4196 
4197 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
4198 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
4199 		xfs_bmap_mark_sick(ip, whichfork);
4200 		return -EFSCORRUPTED;
4201 	}
4202 
4203 	if (xfs_is_shutdown(mp))
4204 		return -EIO;
4205 
4206 	XFS_STATS_INC(mp, xs_blk_mapw);
4207 
4208 	error = xfs_iread_extents(tp, ip, whichfork);
4209 	if (error)
4210 		goto error0;
4211 
4212 	if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got))
4213 		eof = true;
4214 	if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4215 		bma.prev.br_startoff = NULLFILEOFF;
4216 	bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4217 
4218 	n = 0;
4219 	end = bno + len;
4220 	obno = bno;
4221 	while (bno < end && n < *nmap) {
4222 		bool			need_alloc = false, wasdelay = false;
4223 
4224 		/* in hole or beyond EOF? */
4225 		if (eof || bma.got.br_startoff > bno) {
4226 			/*
4227 			 * CoW fork conversions should /never/ hit EOF or
4228 			 * holes.  There should always be something for us
4229 			 * to work on.
4230 			 */
4231 			ASSERT(!((flags & XFS_BMAPI_CONVERT) &&
4232 			         (flags & XFS_BMAPI_COWFORK)));
4233 
4234 			need_alloc = true;
4235 		} else if (isnullstartblock(bma.got.br_startblock)) {
4236 			wasdelay = true;
4237 		}
4238 
4239 		/*
4240 		 * First, deal with the hole before the allocated space
4241 		 * that we found, if any.
4242 		 */
4243 		if (need_alloc || wasdelay) {
4244 			bma.eof = eof;
4245 			bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4246 			bma.wasdel = wasdelay;
4247 			bma.offset = bno;
4248 			bma.flags = flags;
4249 
4250 			/*
4251 			 * There's a 32/64 bit type mismatch between the
4252 			 * allocation length request (which can be 64 bits in
4253 			 * length) and the bma length request, which is
4254 			 * xfs_extlen_t and therefore 32 bits. Hence we have to
4255 			 * be careful and do the min() using the larger type to
4256 			 * avoid overflows.
4257 			 */
4258 			bma.length = XFS_FILBLKS_MIN(len, XFS_MAX_BMBT_EXTLEN);
4259 
4260 			if (wasdelay) {
4261 				bma.length = XFS_FILBLKS_MIN(bma.length,
4262 					bma.got.br_blockcount -
4263 					(bno - bma.got.br_startoff));
4264 			} else {
4265 				if (!eof)
4266 					bma.length = XFS_FILBLKS_MIN(bma.length,
4267 						bma.got.br_startoff - bno);
4268 			}
4269 
4270 			ASSERT(bma.length > 0);
4271 			error = xfs_bmapi_allocate(&bma);
4272 			if (error) {
4273 				/*
4274 				 * If we already allocated space in a previous
4275 				 * iteration return what we go so far when
4276 				 * running out of space.
4277 				 */
4278 				if (error == -ENOSPC && bma.nallocs)
4279 					break;
4280 				goto error0;
4281 			}
4282 
4283 			/*
4284 			 * If this is a CoW allocation, record the data in
4285 			 * the refcount btree for orphan recovery.
4286 			 */
4287 			if (whichfork == XFS_COW_FORK)
4288 				xfs_refcount_alloc_cow_extent(tp,
4289 						XFS_IS_REALTIME_INODE(ip),
4290 						bma.blkno, bma.length);
4291 		}
4292 
4293 		/* Deal with the allocated space we found.  */
4294 		xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4295 							end, n, flags);
4296 
4297 		/* Execute unwritten extent conversion if necessary */
4298 		error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4299 		if (error == -EAGAIN)
4300 			continue;
4301 		if (error)
4302 			goto error0;
4303 
4304 		/* update the extent map to return */
4305 		xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4306 
4307 		/*
4308 		 * If we're done, stop now.  Stop when we've allocated
4309 		 * XFS_BMAP_MAX_NMAP extents no matter what.  Otherwise
4310 		 * the transaction may get too big.
4311 		 */
4312 		if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4313 			break;
4314 
4315 		/* Else go on to the next record. */
4316 		bma.prev = bma.got;
4317 		if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
4318 			eof = true;
4319 	}
4320 
4321 	error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4322 			whichfork);
4323 	if (error)
4324 		goto error0;
4325 
4326 	ASSERT(ifp->if_format != XFS_DINODE_FMT_BTREE ||
4327 	       ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork));
4328 	xfs_bmapi_finish(&bma, whichfork, 0);
4329 	xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4330 		orig_nmap, n);
4331 
4332 	/*
4333 	 * When converting delayed allocations, xfs_bmapi_allocate ignores
4334 	 * the passed in bno and always converts from the start of the found
4335 	 * delalloc extent.
4336 	 *
4337 	 * To avoid a successful return with *nmap set to 0, return the magic
4338 	 * -ENOSR error code for this particular case so that the caller can
4339 	 * handle it.
4340 	 */
4341 	if (!n) {
4342 		ASSERT(bma.nallocs >= *nmap);
4343 		return -ENOSR;
4344 	}
4345 	*nmap = n;
4346 	return 0;
4347 error0:
4348 	xfs_bmapi_finish(&bma, whichfork, error);
4349 	return error;
4350 }
4351 
4352 /*
4353  * Convert an existing delalloc extent to real blocks based on file offset. This
4354  * attempts to allocate the entire delalloc extent and may require multiple
4355  * invocations to allocate the target offset if a large enough physical extent
4356  * is not available.
4357  */
4358 static int
xfs_bmapi_convert_one_delalloc(struct xfs_inode * ip,int whichfork,xfs_off_t offset,struct iomap * iomap,unsigned int * seq)4359 xfs_bmapi_convert_one_delalloc(
4360 	struct xfs_inode	*ip,
4361 	int			whichfork,
4362 	xfs_off_t		offset,
4363 	struct iomap		*iomap,
4364 	unsigned int		*seq)
4365 {
4366 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
4367 	struct xfs_mount	*mp = ip->i_mount;
4368 	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
4369 	struct xfs_bmalloca	bma = { NULL };
4370 	uint16_t		flags = 0;
4371 	struct xfs_trans	*tp;
4372 	int			error;
4373 
4374 	if (whichfork == XFS_COW_FORK)
4375 		flags |= IOMAP_F_SHARED;
4376 
4377 	/*
4378 	 * Space for the extent and indirect blocks was reserved when the
4379 	 * delalloc extent was created so there's no need to do so here.
4380 	 */
4381 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0,
4382 				XFS_TRANS_RESERVE, &tp);
4383 	if (error)
4384 		return error;
4385 
4386 	xfs_ilock(ip, XFS_ILOCK_EXCL);
4387 	xfs_trans_ijoin(tp, ip, 0);
4388 
4389 	error = xfs_iext_count_extend(tp, ip, whichfork,
4390 			XFS_IEXT_ADD_NOSPLIT_CNT);
4391 	if (error)
4392 		goto out_trans_cancel;
4393 
4394 	if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) ||
4395 	    bma.got.br_startoff > offset_fsb) {
4396 		/*
4397 		 * No extent found in the range we are trying to convert.  This
4398 		 * should only happen for the COW fork, where another thread
4399 		 * might have moved the extent to the data fork in the meantime.
4400 		 */
4401 		WARN_ON_ONCE(whichfork != XFS_COW_FORK);
4402 		error = -EAGAIN;
4403 		goto out_trans_cancel;
4404 	}
4405 
4406 	/*
4407 	 * If we find a real extent here we raced with another thread converting
4408 	 * the extent.  Just return the real extent at this offset.
4409 	 */
4410 	if (!isnullstartblock(bma.got.br_startblock)) {
4411 		xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
4412 				xfs_iomap_inode_sequence(ip, flags));
4413 		if (seq)
4414 			*seq = READ_ONCE(ifp->if_seq);
4415 		goto out_trans_cancel;
4416 	}
4417 
4418 	bma.tp = tp;
4419 	bma.ip = ip;
4420 	bma.wasdel = true;
4421 	bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4422 
4423 	/*
4424 	 * Always allocate convert from the start of the delalloc extent even if
4425 	 * that is outside the passed in range to create large contiguous
4426 	 * extents on disk.
4427 	 */
4428 	bma.offset = bma.got.br_startoff;
4429 	bma.length = bma.got.br_blockcount;
4430 
4431 	/*
4432 	 * When we're converting the delalloc reservations backing dirty pages
4433 	 * in the page cache, we must be careful about how we create the new
4434 	 * extents:
4435 	 *
4436 	 * New CoW fork extents are created unwritten, turned into real extents
4437 	 * when we're about to write the data to disk, and mapped into the data
4438 	 * fork after the write finishes.  End of story.
4439 	 *
4440 	 * New data fork extents must be mapped in as unwritten and converted
4441 	 * to real extents after the write succeeds to avoid exposing stale
4442 	 * disk contents if we crash.
4443 	 */
4444 	bma.flags = XFS_BMAPI_PREALLOC;
4445 	if (whichfork == XFS_COW_FORK)
4446 		bma.flags |= XFS_BMAPI_COWFORK;
4447 
4448 	if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4449 		bma.prev.br_startoff = NULLFILEOFF;
4450 
4451 	error = xfs_bmapi_allocate(&bma);
4452 	if (error)
4453 		goto out_finish;
4454 
4455 	XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length));
4456 	XFS_STATS_INC(mp, xs_xstrat_quick);
4457 
4458 	ASSERT(!isnullstartblock(bma.got.br_startblock));
4459 	xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
4460 				xfs_iomap_inode_sequence(ip, flags));
4461 	if (seq)
4462 		*seq = READ_ONCE(ifp->if_seq);
4463 
4464 	if (whichfork == XFS_COW_FORK)
4465 		xfs_refcount_alloc_cow_extent(tp, XFS_IS_REALTIME_INODE(ip),
4466 				bma.blkno, bma.length);
4467 
4468 	error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4469 			whichfork);
4470 	if (error)
4471 		goto out_finish;
4472 
4473 	xfs_bmapi_finish(&bma, whichfork, 0);
4474 	error = xfs_trans_commit(tp);
4475 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
4476 	return error;
4477 
4478 out_finish:
4479 	xfs_bmapi_finish(&bma, whichfork, error);
4480 out_trans_cancel:
4481 	xfs_trans_cancel(tp);
4482 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
4483 	return error;
4484 }
4485 
4486 /*
4487  * Pass in a dellalloc extent and convert it to real extents, return the real
4488  * extent that maps offset_fsb in iomap.
4489  */
4490 int
xfs_bmapi_convert_delalloc(struct xfs_inode * ip,int whichfork,loff_t offset,struct iomap * iomap,unsigned int * seq)4491 xfs_bmapi_convert_delalloc(
4492 	struct xfs_inode	*ip,
4493 	int			whichfork,
4494 	loff_t			offset,
4495 	struct iomap		*iomap,
4496 	unsigned int		*seq)
4497 {
4498 	int			error;
4499 
4500 	/*
4501 	 * Attempt to allocate whatever delalloc extent currently backs offset
4502 	 * and put the result into iomap.  Allocate in a loop because it may
4503 	 * take several attempts to allocate real blocks for a contiguous
4504 	 * delalloc extent if free space is sufficiently fragmented.
4505 	 */
4506 	do {
4507 		error = xfs_bmapi_convert_one_delalloc(ip, whichfork, offset,
4508 					iomap, seq);
4509 		if (error)
4510 			return error;
4511 	} while (iomap->offset + iomap->length <= offset);
4512 
4513 	return 0;
4514 }
4515 
4516 int
xfs_bmapi_remap(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t bno,xfs_filblks_t len,xfs_fsblock_t startblock,uint32_t flags)4517 xfs_bmapi_remap(
4518 	struct xfs_trans	*tp,
4519 	struct xfs_inode	*ip,
4520 	xfs_fileoff_t		bno,
4521 	xfs_filblks_t		len,
4522 	xfs_fsblock_t		startblock,
4523 	uint32_t		flags)
4524 {
4525 	struct xfs_mount	*mp = ip->i_mount;
4526 	struct xfs_ifork	*ifp;
4527 	struct xfs_btree_cur	*cur = NULL;
4528 	struct xfs_bmbt_irec	got;
4529 	struct xfs_iext_cursor	icur;
4530 	int			whichfork = xfs_bmapi_whichfork(flags);
4531 	int			logflags = 0, error;
4532 
4533 	ifp = xfs_ifork_ptr(ip, whichfork);
4534 	ASSERT(len > 0);
4535 	ASSERT(len <= (xfs_filblks_t)XFS_MAX_BMBT_EXTLEN);
4536 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
4537 	ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC |
4538 			   XFS_BMAPI_NORMAP)));
4539 	ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) !=
4540 			(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC));
4541 
4542 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
4543 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
4544 		xfs_bmap_mark_sick(ip, whichfork);
4545 		return -EFSCORRUPTED;
4546 	}
4547 
4548 	if (xfs_is_shutdown(mp))
4549 		return -EIO;
4550 
4551 	error = xfs_iread_extents(tp, ip, whichfork);
4552 	if (error)
4553 		return error;
4554 
4555 	if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
4556 		/* make sure we only reflink into a hole. */
4557 		ASSERT(got.br_startoff > bno);
4558 		ASSERT(got.br_startoff - bno >= len);
4559 	}
4560 
4561 	ip->i_nblocks += len;
4562 	ip->i_delayed_blks -= len; /* see xfs_bmap_defer_add */
4563 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
4564 
4565 	if (ifp->if_format == XFS_DINODE_FMT_BTREE)
4566 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
4567 
4568 	got.br_startoff = bno;
4569 	got.br_startblock = startblock;
4570 	got.br_blockcount = len;
4571 	if (flags & XFS_BMAPI_PREALLOC)
4572 		got.br_state = XFS_EXT_UNWRITTEN;
4573 	else
4574 		got.br_state = XFS_EXT_NORM;
4575 
4576 	error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur,
4577 			&cur, &got, &logflags, flags);
4578 	if (error)
4579 		goto error0;
4580 
4581 	error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork);
4582 
4583 error0:
4584 	if (ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS)
4585 		logflags &= ~XFS_ILOG_DEXT;
4586 	else if (ip->i_df.if_format != XFS_DINODE_FMT_BTREE)
4587 		logflags &= ~XFS_ILOG_DBROOT;
4588 
4589 	if (logflags)
4590 		xfs_trans_log_inode(tp, ip, logflags);
4591 	if (cur)
4592 		xfs_btree_del_cursor(cur, error);
4593 	return error;
4594 }
4595 
4596 /*
4597  * When a delalloc extent is split (e.g., due to a hole punch), the original
4598  * indlen reservation must be shared across the two new extents that are left
4599  * behind.
4600  *
4601  * Given the original reservation and the worst case indlen for the two new
4602  * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4603  * reservation fairly across the two new extents. If necessary, steal available
4604  * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4605  * ores == 1). The number of stolen blocks is returned. The availability and
4606  * subsequent accounting of stolen blocks is the responsibility of the caller.
4607  */
4608 static void
xfs_bmap_split_indlen(xfs_filblks_t ores,xfs_filblks_t * indlen1,xfs_filblks_t * indlen2)4609 xfs_bmap_split_indlen(
4610 	xfs_filblks_t			ores,		/* original res. */
4611 	xfs_filblks_t			*indlen1,	/* ext1 worst indlen */
4612 	xfs_filblks_t			*indlen2)	/* ext2 worst indlen */
4613 {
4614 	xfs_filblks_t			len1 = *indlen1;
4615 	xfs_filblks_t			len2 = *indlen2;
4616 	xfs_filblks_t			nres = len1 + len2; /* new total res. */
4617 	xfs_filblks_t			resfactor;
4618 
4619 	/*
4620 	 * We can't meet the total required reservation for the two extents.
4621 	 * Calculate the percent of the overall shortage between both extents
4622 	 * and apply this percentage to each of the requested indlen values.
4623 	 * This distributes the shortage fairly and reduces the chances that one
4624 	 * of the two extents is left with nothing when extents are repeatedly
4625 	 * split.
4626 	 */
4627 	resfactor = (ores * 100);
4628 	do_div(resfactor, nres);
4629 	len1 *= resfactor;
4630 	do_div(len1, 100);
4631 	len2 *= resfactor;
4632 	do_div(len2, 100);
4633 	ASSERT(len1 + len2 <= ores);
4634 	ASSERT(len1 < *indlen1 && len2 < *indlen2);
4635 
4636 	/*
4637 	 * Hand out the remainder to each extent. If one of the two reservations
4638 	 * is zero, we want to make sure that one gets a block first. The loop
4639 	 * below starts with len1, so hand len2 a block right off the bat if it
4640 	 * is zero.
4641 	 */
4642 	ores -= (len1 + len2);
4643 	ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
4644 	if (ores && !len2 && *indlen2) {
4645 		len2++;
4646 		ores--;
4647 	}
4648 	while (ores) {
4649 		if (len1 < *indlen1) {
4650 			len1++;
4651 			ores--;
4652 		}
4653 		if (!ores)
4654 			break;
4655 		if (len2 < *indlen2) {
4656 			len2++;
4657 			ores--;
4658 		}
4659 	}
4660 
4661 	*indlen1 = len1;
4662 	*indlen2 = len2;
4663 }
4664 
4665 void
xfs_bmap_del_extent_delay(struct xfs_inode * ip,int whichfork,struct xfs_iext_cursor * icur,struct xfs_bmbt_irec * got,struct xfs_bmbt_irec * del,uint32_t bflags)4666 xfs_bmap_del_extent_delay(
4667 	struct xfs_inode	*ip,
4668 	int			whichfork,
4669 	struct xfs_iext_cursor	*icur,
4670 	struct xfs_bmbt_irec	*got,
4671 	struct xfs_bmbt_irec	*del,
4672 	uint32_t		bflags)	/* bmapi flags */
4673 {
4674 	struct xfs_mount	*mp = ip->i_mount;
4675 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
4676 	struct xfs_bmbt_irec	new;
4677 	int64_t			da_old, da_new, da_diff = 0;
4678 	xfs_fileoff_t		del_endoff, got_endoff;
4679 	xfs_filblks_t		got_indlen, new_indlen, stolen = 0;
4680 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
4681 	uint64_t		fdblocks;
4682 	bool			isrt;
4683 
4684 	XFS_STATS_INC(mp, xs_del_exlist);
4685 
4686 	isrt = xfs_ifork_is_realtime(ip, whichfork);
4687 	del_endoff = del->br_startoff + del->br_blockcount;
4688 	got_endoff = got->br_startoff + got->br_blockcount;
4689 	da_old = startblockval(got->br_startblock);
4690 	da_new = 0;
4691 
4692 	ASSERT(del->br_blockcount > 0);
4693 	ASSERT(got->br_startoff <= del->br_startoff);
4694 	ASSERT(got_endoff >= del_endoff);
4695 
4696 	/*
4697 	 * Update the inode delalloc counter now and wait to update the
4698 	 * sb counters as we might have to borrow some blocks for the
4699 	 * indirect block accounting.
4700 	 */
4701 	xfs_quota_unreserve_blkres(ip, del->br_blockcount);
4702 	ip->i_delayed_blks -= del->br_blockcount;
4703 
4704 	if (got->br_startoff == del->br_startoff)
4705 		state |= BMAP_LEFT_FILLING;
4706 	if (got_endoff == del_endoff)
4707 		state |= BMAP_RIGHT_FILLING;
4708 
4709 	switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4710 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4711 		/*
4712 		 * Matches the whole extent.  Delete the entry.
4713 		 */
4714 		xfs_iext_remove(ip, icur, state);
4715 		xfs_iext_prev(ifp, icur);
4716 		break;
4717 	case BMAP_LEFT_FILLING:
4718 		/*
4719 		 * Deleting the first part of the extent.
4720 		 */
4721 		got->br_startoff = del_endoff;
4722 		got->br_blockcount -= del->br_blockcount;
4723 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4724 				got->br_blockcount), da_old);
4725 		got->br_startblock = nullstartblock((int)da_new);
4726 		xfs_iext_update_extent(ip, state, icur, got);
4727 		break;
4728 	case BMAP_RIGHT_FILLING:
4729 		/*
4730 		 * Deleting the last part of the extent.
4731 		 */
4732 		got->br_blockcount = got->br_blockcount - del->br_blockcount;
4733 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4734 				got->br_blockcount), da_old);
4735 		got->br_startblock = nullstartblock((int)da_new);
4736 		xfs_iext_update_extent(ip, state, icur, got);
4737 		break;
4738 	case 0:
4739 		/*
4740 		 * Deleting the middle of the extent.
4741 		 *
4742 		 * Distribute the original indlen reservation across the two new
4743 		 * extents.  Steal blocks from the deleted extent if necessary.
4744 		 * Stealing blocks simply fudges the fdblocks accounting below.
4745 		 * Warn if either of the new indlen reservations is zero as this
4746 		 * can lead to delalloc problems.
4747 		 */
4748 		got->br_blockcount = del->br_startoff - got->br_startoff;
4749 		got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
4750 
4751 		new.br_blockcount = got_endoff - del_endoff;
4752 		new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
4753 
4754 		WARN_ON_ONCE(!got_indlen || !new_indlen);
4755 		/*
4756 		 * Steal as many blocks as we can to try and satisfy the worst
4757 		 * case indlen for both new extents.
4758 		 *
4759 		 * However, we can't just steal reservations from the data
4760 		 * blocks if this is an RT inodes as the data and metadata
4761 		 * blocks come from different pools.  We'll have to live with
4762 		 * under-filled indirect reservation in this case.
4763 		 */
4764 		da_new = got_indlen + new_indlen;
4765 		if (da_new > da_old && !isrt) {
4766 			stolen = XFS_FILBLKS_MIN(da_new - da_old,
4767 						 del->br_blockcount);
4768 			da_old += stolen;
4769 		}
4770 		if (da_new > da_old)
4771 			xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen);
4772 		da_new = got_indlen + new_indlen;
4773 
4774 		got->br_startblock = nullstartblock((int)got_indlen);
4775 
4776 		new.br_startoff = del_endoff;
4777 		new.br_state = got->br_state;
4778 		new.br_startblock = nullstartblock((int)new_indlen);
4779 
4780 		xfs_iext_update_extent(ip, state, icur, got);
4781 		xfs_iext_next(ifp, icur);
4782 		xfs_iext_insert(ip, icur, &new, state);
4783 
4784 		del->br_blockcount -= stolen;
4785 		break;
4786 	}
4787 
4788 	ASSERT(da_old >= da_new);
4789 	da_diff = da_old - da_new;
4790 	fdblocks = da_diff;
4791 
4792 	if (bflags & XFS_BMAPI_REMAP) {
4793 		;
4794 	} else if (isrt) {
4795 		xfs_rtbxlen_t	rtxlen;
4796 
4797 		rtxlen = xfs_blen_to_rtbxlen(mp, del->br_blockcount);
4798 		if (xfs_is_zoned_inode(ip))
4799 			xfs_zoned_add_available(mp, rtxlen);
4800 		xfs_add_frextents(mp, rtxlen);
4801 	} else {
4802 		fdblocks += del->br_blockcount;
4803 	}
4804 
4805 	xfs_add_fdblocks(mp, fdblocks);
4806 	xfs_mod_delalloc(ip, -(int64_t)del->br_blockcount, -da_diff);
4807 }
4808 
4809 void
xfs_bmap_del_extent_cow(struct xfs_inode * ip,struct xfs_iext_cursor * icur,struct xfs_bmbt_irec * got,struct xfs_bmbt_irec * del)4810 xfs_bmap_del_extent_cow(
4811 	struct xfs_inode	*ip,
4812 	struct xfs_iext_cursor	*icur,
4813 	struct xfs_bmbt_irec	*got,
4814 	struct xfs_bmbt_irec	*del)
4815 {
4816 	struct xfs_mount	*mp = ip->i_mount;
4817 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
4818 	struct xfs_bmbt_irec	new;
4819 	xfs_fileoff_t		del_endoff, got_endoff;
4820 	uint32_t		state = BMAP_COWFORK;
4821 
4822 	XFS_STATS_INC(mp, xs_del_exlist);
4823 
4824 	del_endoff = del->br_startoff + del->br_blockcount;
4825 	got_endoff = got->br_startoff + got->br_blockcount;
4826 
4827 	ASSERT(del->br_blockcount > 0);
4828 	ASSERT(got->br_startoff <= del->br_startoff);
4829 	ASSERT(got_endoff >= del_endoff);
4830 	ASSERT(!isnullstartblock(got->br_startblock));
4831 
4832 	if (got->br_startoff == del->br_startoff)
4833 		state |= BMAP_LEFT_FILLING;
4834 	if (got_endoff == del_endoff)
4835 		state |= BMAP_RIGHT_FILLING;
4836 
4837 	switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4838 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4839 		/*
4840 		 * Matches the whole extent.  Delete the entry.
4841 		 */
4842 		xfs_iext_remove(ip, icur, state);
4843 		xfs_iext_prev(ifp, icur);
4844 		break;
4845 	case BMAP_LEFT_FILLING:
4846 		/*
4847 		 * Deleting the first part of the extent.
4848 		 */
4849 		got->br_startoff = del_endoff;
4850 		got->br_blockcount -= del->br_blockcount;
4851 		got->br_startblock = del->br_startblock + del->br_blockcount;
4852 		xfs_iext_update_extent(ip, state, icur, got);
4853 		break;
4854 	case BMAP_RIGHT_FILLING:
4855 		/*
4856 		 * Deleting the last part of the extent.
4857 		 */
4858 		got->br_blockcount -= del->br_blockcount;
4859 		xfs_iext_update_extent(ip, state, icur, got);
4860 		break;
4861 	case 0:
4862 		/*
4863 		 * Deleting the middle of the extent.
4864 		 */
4865 		got->br_blockcount = del->br_startoff - got->br_startoff;
4866 
4867 		new.br_startoff = del_endoff;
4868 		new.br_blockcount = got_endoff - del_endoff;
4869 		new.br_state = got->br_state;
4870 		new.br_startblock = del->br_startblock + del->br_blockcount;
4871 
4872 		xfs_iext_update_extent(ip, state, icur, got);
4873 		xfs_iext_next(ifp, icur);
4874 		xfs_iext_insert(ip, icur, &new, state);
4875 		break;
4876 	}
4877 	ip->i_delayed_blks -= del->br_blockcount;
4878 }
4879 
4880 static int
xfs_bmap_free_rtblocks(struct xfs_trans * tp,struct xfs_bmbt_irec * del)4881 xfs_bmap_free_rtblocks(
4882 	struct xfs_trans	*tp,
4883 	struct xfs_bmbt_irec	*del)
4884 {
4885 	struct xfs_rtgroup	*rtg;
4886 	int			error;
4887 
4888 	rtg = xfs_rtgroup_grab(tp->t_mountp, 0);
4889 	if (!rtg)
4890 		return -EIO;
4891 
4892 	/*
4893 	 * Ensure the bitmap and summary inodes are locked and joined to the
4894 	 * transaction before modifying them.
4895 	 */
4896 	if (!(tp->t_flags & XFS_TRANS_RTBITMAP_LOCKED)) {
4897 		tp->t_flags |= XFS_TRANS_RTBITMAP_LOCKED;
4898 		xfs_rtgroup_lock(rtg, XFS_RTGLOCK_BITMAP);
4899 		xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_BITMAP);
4900 	}
4901 
4902 	error = xfs_rtfree_blocks(tp, rtg, del->br_startblock,
4903 			del->br_blockcount);
4904 	xfs_rtgroup_rele(rtg);
4905 	return error;
4906 }
4907 
4908 /*
4909  * Called by xfs_bmapi to update file extent records and the btree
4910  * after removing space.
4911  */
4912 STATIC int				/* error */
xfs_bmap_del_extent_real(xfs_inode_t * ip,xfs_trans_t * tp,struct xfs_iext_cursor * icur,struct xfs_btree_cur * cur,xfs_bmbt_irec_t * del,int * logflagsp,int whichfork,uint32_t bflags)4913 xfs_bmap_del_extent_real(
4914 	xfs_inode_t		*ip,	/* incore inode pointer */
4915 	xfs_trans_t		*tp,	/* current transaction pointer */
4916 	struct xfs_iext_cursor	*icur,
4917 	struct xfs_btree_cur	*cur,	/* if null, not a btree */
4918 	xfs_bmbt_irec_t		*del,	/* data to remove from extents */
4919 	int			*logflagsp, /* inode logging flags */
4920 	int			whichfork, /* data or attr fork */
4921 	uint32_t		bflags)	/* bmapi flags */
4922 {
4923 	xfs_fsblock_t		del_endblock=0;	/* first block past del */
4924 	xfs_fileoff_t		del_endoff;	/* first offset past del */
4925 	int			error = 0;	/* error return value */
4926 	struct xfs_bmbt_irec	got;	/* current extent entry */
4927 	xfs_fileoff_t		got_endoff;	/* first offset past got */
4928 	int			i;	/* temp state */
4929 	struct xfs_ifork	*ifp;	/* inode fork pointer */
4930 	xfs_mount_t		*mp;	/* mount structure */
4931 	xfs_filblks_t		nblks;	/* quota/sb block count */
4932 	xfs_bmbt_irec_t		new;	/* new record to be inserted */
4933 	/* REFERENCED */
4934 	uint			qfield;	/* quota field to update */
4935 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
4936 	struct xfs_bmbt_irec	old;
4937 
4938 	*logflagsp = 0;
4939 
4940 	mp = ip->i_mount;
4941 	XFS_STATS_INC(mp, xs_del_exlist);
4942 
4943 	ifp = xfs_ifork_ptr(ip, whichfork);
4944 	ASSERT(del->br_blockcount > 0);
4945 	xfs_iext_get_extent(ifp, icur, &got);
4946 	ASSERT(got.br_startoff <= del->br_startoff);
4947 	del_endoff = del->br_startoff + del->br_blockcount;
4948 	got_endoff = got.br_startoff + got.br_blockcount;
4949 	ASSERT(got_endoff >= del_endoff);
4950 	ASSERT(!isnullstartblock(got.br_startblock));
4951 	qfield = 0;
4952 
4953 	/*
4954 	 * If it's the case where the directory code is running with no block
4955 	 * reservation, and the deleted block is in the middle of its extent,
4956 	 * and the resulting insert of an extent would cause transformation to
4957 	 * btree format, then reject it.  The calling code will then swap blocks
4958 	 * around instead.  We have to do this now, rather than waiting for the
4959 	 * conversion to btree format, since the transaction will be dirty then.
4960 	 */
4961 	if (tp->t_blk_res == 0 &&
4962 	    ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
4963 	    ifp->if_nextents >= XFS_IFORK_MAXEXT(ip, whichfork) &&
4964 	    del->br_startoff > got.br_startoff && del_endoff < got_endoff)
4965 		return -ENOSPC;
4966 
4967 	*logflagsp = XFS_ILOG_CORE;
4968 	if (xfs_ifork_is_realtime(ip, whichfork))
4969 		qfield = XFS_TRANS_DQ_RTBCOUNT;
4970 	else
4971 		qfield = XFS_TRANS_DQ_BCOUNT;
4972 	nblks = del->br_blockcount;
4973 
4974 	del_endblock = del->br_startblock + del->br_blockcount;
4975 	if (cur) {
4976 		error = xfs_bmbt_lookup_eq(cur, &got, &i);
4977 		if (error)
4978 			return error;
4979 		if (XFS_IS_CORRUPT(mp, i != 1)) {
4980 			xfs_btree_mark_sick(cur);
4981 			return -EFSCORRUPTED;
4982 		}
4983 	}
4984 
4985 	if (got.br_startoff == del->br_startoff)
4986 		state |= BMAP_LEFT_FILLING;
4987 	if (got_endoff == del_endoff)
4988 		state |= BMAP_RIGHT_FILLING;
4989 
4990 	switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4991 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4992 		/*
4993 		 * Matches the whole extent.  Delete the entry.
4994 		 */
4995 		xfs_iext_remove(ip, icur, state);
4996 		xfs_iext_prev(ifp, icur);
4997 		ifp->if_nextents--;
4998 
4999 		*logflagsp |= XFS_ILOG_CORE;
5000 		if (!cur) {
5001 			*logflagsp |= xfs_ilog_fext(whichfork);
5002 			break;
5003 		}
5004 		if ((error = xfs_btree_delete(cur, &i)))
5005 			return error;
5006 		if (XFS_IS_CORRUPT(mp, i != 1)) {
5007 			xfs_btree_mark_sick(cur);
5008 			return -EFSCORRUPTED;
5009 		}
5010 		break;
5011 	case BMAP_LEFT_FILLING:
5012 		/*
5013 		 * Deleting the first part of the extent.
5014 		 */
5015 		got.br_startoff = del_endoff;
5016 		got.br_startblock = del_endblock;
5017 		got.br_blockcount -= del->br_blockcount;
5018 		xfs_iext_update_extent(ip, state, icur, &got);
5019 		if (!cur) {
5020 			*logflagsp |= xfs_ilog_fext(whichfork);
5021 			break;
5022 		}
5023 		error = xfs_bmbt_update(cur, &got);
5024 		if (error)
5025 			return error;
5026 		break;
5027 	case BMAP_RIGHT_FILLING:
5028 		/*
5029 		 * Deleting the last part of the extent.
5030 		 */
5031 		got.br_blockcount -= del->br_blockcount;
5032 		xfs_iext_update_extent(ip, state, icur, &got);
5033 		if (!cur) {
5034 			*logflagsp |= xfs_ilog_fext(whichfork);
5035 			break;
5036 		}
5037 		error = xfs_bmbt_update(cur, &got);
5038 		if (error)
5039 			return error;
5040 		break;
5041 	case 0:
5042 		/*
5043 		 * Deleting the middle of the extent.
5044 		 */
5045 
5046 		old = got;
5047 
5048 		got.br_blockcount = del->br_startoff - got.br_startoff;
5049 		xfs_iext_update_extent(ip, state, icur, &got);
5050 
5051 		new.br_startoff = del_endoff;
5052 		new.br_blockcount = got_endoff - del_endoff;
5053 		new.br_state = got.br_state;
5054 		new.br_startblock = del_endblock;
5055 
5056 		*logflagsp |= XFS_ILOG_CORE;
5057 		if (cur) {
5058 			error = xfs_bmbt_update(cur, &got);
5059 			if (error)
5060 				return error;
5061 			error = xfs_btree_increment(cur, 0, &i);
5062 			if (error)
5063 				return error;
5064 			cur->bc_rec.b = new;
5065 			error = xfs_btree_insert(cur, &i);
5066 			if (error && error != -ENOSPC)
5067 				return error;
5068 			/*
5069 			 * If get no-space back from btree insert, it tried a
5070 			 * split, and we have a zero block reservation.  Fix up
5071 			 * our state and return the error.
5072 			 */
5073 			if (error == -ENOSPC) {
5074 				/*
5075 				 * Reset the cursor, don't trust it after any
5076 				 * insert operation.
5077 				 */
5078 				error = xfs_bmbt_lookup_eq(cur, &got, &i);
5079 				if (error)
5080 					return error;
5081 				if (XFS_IS_CORRUPT(mp, i != 1)) {
5082 					xfs_btree_mark_sick(cur);
5083 					return -EFSCORRUPTED;
5084 				}
5085 				/*
5086 				 * Update the btree record back
5087 				 * to the original value.
5088 				 */
5089 				error = xfs_bmbt_update(cur, &old);
5090 				if (error)
5091 					return error;
5092 				/*
5093 				 * Reset the extent record back
5094 				 * to the original value.
5095 				 */
5096 				xfs_iext_update_extent(ip, state, icur, &old);
5097 				*logflagsp = 0;
5098 				return -ENOSPC;
5099 			}
5100 			if (XFS_IS_CORRUPT(mp, i != 1)) {
5101 				xfs_btree_mark_sick(cur);
5102 				return -EFSCORRUPTED;
5103 			}
5104 		} else
5105 			*logflagsp |= xfs_ilog_fext(whichfork);
5106 
5107 		ifp->if_nextents++;
5108 		xfs_iext_next(ifp, icur);
5109 		xfs_iext_insert(ip, icur, &new, state);
5110 		break;
5111 	}
5112 
5113 	/* remove reverse mapping */
5114 	xfs_rmap_unmap_extent(tp, ip, whichfork, del);
5115 
5116 	/*
5117 	 * If we need to, add to list of extents to delete.
5118 	 */
5119 	if (!(bflags & XFS_BMAPI_REMAP)) {
5120 		bool	isrt = xfs_ifork_is_realtime(ip, whichfork);
5121 
5122 		if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
5123 			xfs_refcount_decrease_extent(tp, isrt, del);
5124 		} else if (isrt && !xfs_has_rtgroups(mp)) {
5125 			error = xfs_bmap_free_rtblocks(tp, del);
5126 		} else {
5127 			unsigned int	efi_flags = 0;
5128 
5129 			if ((bflags & XFS_BMAPI_NODISCARD) ||
5130 			    del->br_state == XFS_EXT_UNWRITTEN)
5131 				efi_flags |= XFS_FREE_EXTENT_SKIP_DISCARD;
5132 
5133 			/*
5134 			 * Historically, we did not use EFIs to free realtime
5135 			 * extents.  However, when reverse mapping is enabled,
5136 			 * we must maintain the same order of operations as the
5137 			 * data device, which is: Remove the file mapping,
5138 			 * remove the reverse mapping, and then free the
5139 			 * blocks.  Reflink for realtime volumes requires the
5140 			 * same sort of ordering.  Both features rely on
5141 			 * rtgroups, so let's gate rt EFI usage on rtgroups.
5142 			 */
5143 			if (isrt)
5144 				efi_flags |= XFS_FREE_EXTENT_REALTIME;
5145 
5146 			error = xfs_free_extent_later(tp, del->br_startblock,
5147 					del->br_blockcount, NULL,
5148 					XFS_AG_RESV_NONE, efi_flags);
5149 		}
5150 		if (error)
5151 			return error;
5152 	}
5153 
5154 	/*
5155 	 * Adjust inode # blocks in the file.
5156 	 */
5157 	if (nblks)
5158 		ip->i_nblocks -= nblks;
5159 	/*
5160 	 * Adjust quota data.
5161 	 */
5162 	if (qfield && !(bflags & XFS_BMAPI_REMAP))
5163 		xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5164 
5165 	return 0;
5166 }
5167 
5168 /*
5169  * Unmap (remove) blocks from a file.
5170  * If nexts is nonzero then the number of extents to remove is limited to
5171  * that value.  If not all extents in the block range can be removed then
5172  * *done is set.
5173  */
5174 static int
__xfs_bunmapi(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t start,xfs_filblks_t * rlen,uint32_t flags,xfs_extnum_t nexts)5175 __xfs_bunmapi(
5176 	struct xfs_trans	*tp,		/* transaction pointer */
5177 	struct xfs_inode	*ip,		/* incore inode */
5178 	xfs_fileoff_t		start,		/* first file offset deleted */
5179 	xfs_filblks_t		*rlen,		/* i/o: amount remaining */
5180 	uint32_t		flags,		/* misc flags */
5181 	xfs_extnum_t		nexts)		/* number of extents max */
5182 {
5183 	struct xfs_btree_cur	*cur;		/* bmap btree cursor */
5184 	struct xfs_bmbt_irec	del;		/* extent being deleted */
5185 	int			error;		/* error return value */
5186 	xfs_extnum_t		extno;		/* extent number in list */
5187 	struct xfs_bmbt_irec	got;		/* current extent record */
5188 	struct xfs_ifork	*ifp;		/* inode fork pointer */
5189 	int			isrt;		/* freeing in rt area */
5190 	int			logflags;	/* transaction logging flags */
5191 	xfs_extlen_t		mod;		/* rt extent offset */
5192 	struct xfs_mount	*mp = ip->i_mount;
5193 	int			tmp_logflags;	/* partial logging flags */
5194 	int			wasdel;		/* was a delayed alloc extent */
5195 	int			whichfork;	/* data or attribute fork */
5196 	xfs_filblks_t		len = *rlen;	/* length to unmap in file */
5197 	xfs_fileoff_t		end;
5198 	struct xfs_iext_cursor	icur;
5199 	bool			done = false;
5200 
5201 	trace_xfs_bunmap(ip, start, len, flags, _RET_IP_);
5202 
5203 	whichfork = xfs_bmapi_whichfork(flags);
5204 	ASSERT(whichfork != XFS_COW_FORK);
5205 	ifp = xfs_ifork_ptr(ip, whichfork);
5206 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp))) {
5207 		xfs_bmap_mark_sick(ip, whichfork);
5208 		return -EFSCORRUPTED;
5209 	}
5210 	if (xfs_is_shutdown(mp))
5211 		return -EIO;
5212 
5213 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
5214 	ASSERT(len > 0);
5215 	ASSERT(nexts >= 0);
5216 
5217 	error = xfs_iread_extents(tp, ip, whichfork);
5218 	if (error)
5219 		return error;
5220 
5221 	if (xfs_iext_count(ifp) == 0) {
5222 		*rlen = 0;
5223 		return 0;
5224 	}
5225 	XFS_STATS_INC(mp, xs_blk_unmap);
5226 	isrt = xfs_ifork_is_realtime(ip, whichfork);
5227 	end = start + len;
5228 
5229 	if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) {
5230 		*rlen = 0;
5231 		return 0;
5232 	}
5233 	end--;
5234 
5235 	logflags = 0;
5236 	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
5237 		ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
5238 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5239 	} else
5240 		cur = NULL;
5241 
5242 	extno = 0;
5243 	while (end != (xfs_fileoff_t)-1 && end >= start &&
5244 	       (nexts == 0 || extno < nexts)) {
5245 		/*
5246 		 * Is the found extent after a hole in which end lives?
5247 		 * Just back up to the previous extent, if so.
5248 		 */
5249 		if (got.br_startoff > end &&
5250 		    !xfs_iext_prev_extent(ifp, &icur, &got)) {
5251 			done = true;
5252 			break;
5253 		}
5254 		/*
5255 		 * Is the last block of this extent before the range
5256 		 * we're supposed to delete?  If so, we're done.
5257 		 */
5258 		end = XFS_FILEOFF_MIN(end,
5259 			got.br_startoff + got.br_blockcount - 1);
5260 		if (end < start)
5261 			break;
5262 		/*
5263 		 * Then deal with the (possibly delayed) allocated space
5264 		 * we found.
5265 		 */
5266 		del = got;
5267 		wasdel = isnullstartblock(del.br_startblock);
5268 
5269 		if (got.br_startoff < start) {
5270 			del.br_startoff = start;
5271 			del.br_blockcount -= start - got.br_startoff;
5272 			if (!wasdel)
5273 				del.br_startblock += start - got.br_startoff;
5274 		}
5275 		if (del.br_startoff + del.br_blockcount > end + 1)
5276 			del.br_blockcount = end + 1 - del.br_startoff;
5277 
5278 		if (!isrt || (flags & XFS_BMAPI_REMAP))
5279 			goto delete;
5280 
5281 		mod = xfs_rtb_to_rtxoff(mp,
5282 				del.br_startblock + del.br_blockcount);
5283 		if (mod) {
5284 			/*
5285 			 * Realtime extent not lined up at the end.
5286 			 * The extent could have been split into written
5287 			 * and unwritten pieces, or we could just be
5288 			 * unmapping part of it.  But we can't really
5289 			 * get rid of part of a realtime extent.
5290 			 */
5291 			if (del.br_state == XFS_EXT_UNWRITTEN) {
5292 				/*
5293 				 * This piece is unwritten, or we're not
5294 				 * using unwritten extents.  Skip over it.
5295 				 */
5296 				ASSERT((flags & XFS_BMAPI_REMAP) || end >= mod);
5297 				end -= mod > del.br_blockcount ?
5298 					del.br_blockcount : mod;
5299 				if (end < got.br_startoff &&
5300 				    !xfs_iext_prev_extent(ifp, &icur, &got)) {
5301 					done = true;
5302 					break;
5303 				}
5304 				continue;
5305 			}
5306 			/*
5307 			 * It's written, turn it unwritten.
5308 			 * This is better than zeroing it.
5309 			 */
5310 			ASSERT(del.br_state == XFS_EXT_NORM);
5311 			ASSERT(tp->t_blk_res > 0);
5312 			/*
5313 			 * If this spans a realtime extent boundary,
5314 			 * chop it back to the start of the one we end at.
5315 			 */
5316 			if (del.br_blockcount > mod) {
5317 				del.br_startoff += del.br_blockcount - mod;
5318 				del.br_startblock += del.br_blockcount - mod;
5319 				del.br_blockcount = mod;
5320 			}
5321 			del.br_state = XFS_EXT_UNWRITTEN;
5322 			error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5323 					whichfork, &icur, &cur, &del,
5324 					&logflags);
5325 			if (error)
5326 				goto error0;
5327 			goto nodelete;
5328 		}
5329 
5330 		mod = xfs_rtb_to_rtxoff(mp, del.br_startblock);
5331 		if (mod) {
5332 			xfs_extlen_t off = mp->m_sb.sb_rextsize - mod;
5333 
5334 			/*
5335 			 * Realtime extent is lined up at the end but not
5336 			 * at the front.  We'll get rid of full extents if
5337 			 * we can.
5338 			 */
5339 			if (del.br_blockcount > off) {
5340 				del.br_blockcount -= off;
5341 				del.br_startoff += off;
5342 				del.br_startblock += off;
5343 			} else if (del.br_startoff == start &&
5344 				   (del.br_state == XFS_EXT_UNWRITTEN ||
5345 				    tp->t_blk_res == 0)) {
5346 				/*
5347 				 * Can't make it unwritten.  There isn't
5348 				 * a full extent here so just skip it.
5349 				 */
5350 				ASSERT(end >= del.br_blockcount);
5351 				end -= del.br_blockcount;
5352 				if (got.br_startoff > end &&
5353 				    !xfs_iext_prev_extent(ifp, &icur, &got)) {
5354 					done = true;
5355 					break;
5356 				}
5357 				continue;
5358 			} else if (del.br_state == XFS_EXT_UNWRITTEN) {
5359 				struct xfs_bmbt_irec	prev;
5360 				xfs_fileoff_t		unwrite_start;
5361 
5362 				/*
5363 				 * This one is already unwritten.
5364 				 * It must have a written left neighbor.
5365 				 * Unwrite the killed part of that one and
5366 				 * try again.
5367 				 */
5368 				if (!xfs_iext_prev_extent(ifp, &icur, &prev))
5369 					ASSERT(0);
5370 				ASSERT(prev.br_state == XFS_EXT_NORM);
5371 				ASSERT(!isnullstartblock(prev.br_startblock));
5372 				ASSERT(del.br_startblock ==
5373 				       prev.br_startblock + prev.br_blockcount);
5374 				unwrite_start = max3(start,
5375 						     del.br_startoff - mod,
5376 						     prev.br_startoff);
5377 				mod = unwrite_start - prev.br_startoff;
5378 				prev.br_startoff = unwrite_start;
5379 				prev.br_startblock += mod;
5380 				prev.br_blockcount -= mod;
5381 				prev.br_state = XFS_EXT_UNWRITTEN;
5382 				error = xfs_bmap_add_extent_unwritten_real(tp,
5383 						ip, whichfork, &icur, &cur,
5384 						&prev, &logflags);
5385 				if (error)
5386 					goto error0;
5387 				goto nodelete;
5388 			} else {
5389 				ASSERT(del.br_state == XFS_EXT_NORM);
5390 				del.br_state = XFS_EXT_UNWRITTEN;
5391 				error = xfs_bmap_add_extent_unwritten_real(tp,
5392 						ip, whichfork, &icur, &cur,
5393 						&del, &logflags);
5394 				if (error)
5395 					goto error0;
5396 				goto nodelete;
5397 			}
5398 		}
5399 
5400 delete:
5401 		if (wasdel) {
5402 			xfs_bmap_del_extent_delay(ip, whichfork, &icur, &got,
5403 					&del, flags);
5404 		} else {
5405 			error = xfs_bmap_del_extent_real(ip, tp, &icur, cur,
5406 					&del, &tmp_logflags, whichfork,
5407 					flags);
5408 			logflags |= tmp_logflags;
5409 			if (error)
5410 				goto error0;
5411 		}
5412 
5413 		end = del.br_startoff - 1;
5414 nodelete:
5415 		/*
5416 		 * If not done go on to the next (previous) record.
5417 		 */
5418 		if (end != (xfs_fileoff_t)-1 && end >= start) {
5419 			if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5420 			    (got.br_startoff > end &&
5421 			     !xfs_iext_prev_extent(ifp, &icur, &got))) {
5422 				done = true;
5423 				break;
5424 			}
5425 			extno++;
5426 		}
5427 	}
5428 	if (done || end == (xfs_fileoff_t)-1 || end < start)
5429 		*rlen = 0;
5430 	else
5431 		*rlen = end - start + 1;
5432 
5433 	/*
5434 	 * Convert to a btree if necessary.
5435 	 */
5436 	if (xfs_bmap_needs_btree(ip, whichfork)) {
5437 		ASSERT(cur == NULL);
5438 		error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
5439 				&tmp_logflags, whichfork);
5440 		logflags |= tmp_logflags;
5441 	} else {
5442 		error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags,
5443 			whichfork);
5444 	}
5445 
5446 error0:
5447 	/*
5448 	 * Log everything.  Do this after conversion, there's no point in
5449 	 * logging the extent records if we've converted to btree format.
5450 	 */
5451 	if ((logflags & xfs_ilog_fext(whichfork)) &&
5452 	    ifp->if_format != XFS_DINODE_FMT_EXTENTS)
5453 		logflags &= ~xfs_ilog_fext(whichfork);
5454 	else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5455 		 ifp->if_format != XFS_DINODE_FMT_BTREE)
5456 		logflags &= ~xfs_ilog_fbroot(whichfork);
5457 	/*
5458 	 * Log inode even in the error case, if the transaction
5459 	 * is dirty we'll need to shut down the filesystem.
5460 	 */
5461 	if (logflags)
5462 		xfs_trans_log_inode(tp, ip, logflags);
5463 	if (cur) {
5464 		if (!error)
5465 			cur->bc_bmap.allocated = 0;
5466 		xfs_btree_del_cursor(cur, error);
5467 	}
5468 	return error;
5469 }
5470 
5471 /* Unmap a range of a file. */
5472 int
xfs_bunmapi(xfs_trans_t * tp,struct xfs_inode * ip,xfs_fileoff_t bno,xfs_filblks_t len,uint32_t flags,xfs_extnum_t nexts,int * done)5473 xfs_bunmapi(
5474 	xfs_trans_t		*tp,
5475 	struct xfs_inode	*ip,
5476 	xfs_fileoff_t		bno,
5477 	xfs_filblks_t		len,
5478 	uint32_t		flags,
5479 	xfs_extnum_t		nexts,
5480 	int			*done)
5481 {
5482 	int			error;
5483 
5484 	error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts);
5485 	*done = (len == 0);
5486 	return error;
5487 }
5488 
5489 /*
5490  * Determine whether an extent shift can be accomplished by a merge with the
5491  * extent that precedes the target hole of the shift.
5492  */
5493 STATIC bool
xfs_bmse_can_merge(struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * left,struct xfs_bmbt_irec * got,xfs_fileoff_t shift)5494 xfs_bmse_can_merge(
5495 	struct xfs_inode	*ip,
5496 	int			whichfork,
5497 	struct xfs_bmbt_irec	*left,	/* preceding extent */
5498 	struct xfs_bmbt_irec	*got,	/* current extent to shift */
5499 	xfs_fileoff_t		shift)	/* shift fsb */
5500 {
5501 	xfs_fileoff_t		startoff;
5502 
5503 	startoff = got->br_startoff - shift;
5504 
5505 	/*
5506 	 * The extent, once shifted, must be adjacent in-file and on-disk with
5507 	 * the preceding extent.
5508 	 */
5509 	if ((left->br_startoff + left->br_blockcount != startoff) ||
5510 	    (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5511 	    (left->br_state != got->br_state) ||
5512 	    (left->br_blockcount + got->br_blockcount > XFS_MAX_BMBT_EXTLEN) ||
5513 	    !xfs_bmap_same_rtgroup(ip, whichfork, left, got))
5514 		return false;
5515 
5516 	return true;
5517 }
5518 
5519 /*
5520  * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5521  * hole in the file. If an extent shift would result in the extent being fully
5522  * adjacent to the extent that currently precedes the hole, we can merge with
5523  * the preceding extent rather than do the shift.
5524  *
5525  * This function assumes the caller has verified a shift-by-merge is possible
5526  * with the provided extents via xfs_bmse_can_merge().
5527  */
5528 STATIC int
xfs_bmse_merge(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,xfs_fileoff_t shift,struct xfs_iext_cursor * icur,struct xfs_bmbt_irec * got,struct xfs_bmbt_irec * left,struct xfs_btree_cur * cur,int * logflags)5529 xfs_bmse_merge(
5530 	struct xfs_trans		*tp,
5531 	struct xfs_inode		*ip,
5532 	int				whichfork,
5533 	xfs_fileoff_t			shift,		/* shift fsb */
5534 	struct xfs_iext_cursor		*icur,
5535 	struct xfs_bmbt_irec		*got,		/* extent to shift */
5536 	struct xfs_bmbt_irec		*left,		/* preceding extent */
5537 	struct xfs_btree_cur		*cur,
5538 	int				*logflags)	/* output */
5539 {
5540 	struct xfs_ifork		*ifp = xfs_ifork_ptr(ip, whichfork);
5541 	struct xfs_bmbt_irec		new;
5542 	xfs_filblks_t			blockcount;
5543 	int				error, i;
5544 	struct xfs_mount		*mp = ip->i_mount;
5545 
5546 	blockcount = left->br_blockcount + got->br_blockcount;
5547 
5548 	xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
5549 	ASSERT(xfs_bmse_can_merge(ip, whichfork, left, got, shift));
5550 
5551 	new = *left;
5552 	new.br_blockcount = blockcount;
5553 
5554 	/*
5555 	 * Update the on-disk extent count, the btree if necessary and log the
5556 	 * inode.
5557 	 */
5558 	ifp->if_nextents--;
5559 	*logflags |= XFS_ILOG_CORE;
5560 	if (!cur) {
5561 		*logflags |= XFS_ILOG_DEXT;
5562 		goto done;
5563 	}
5564 
5565 	/* lookup and remove the extent to merge */
5566 	error = xfs_bmbt_lookup_eq(cur, got, &i);
5567 	if (error)
5568 		return error;
5569 	if (XFS_IS_CORRUPT(mp, i != 1)) {
5570 		xfs_btree_mark_sick(cur);
5571 		return -EFSCORRUPTED;
5572 	}
5573 
5574 	error = xfs_btree_delete(cur, &i);
5575 	if (error)
5576 		return error;
5577 	if (XFS_IS_CORRUPT(mp, i != 1)) {
5578 		xfs_btree_mark_sick(cur);
5579 		return -EFSCORRUPTED;
5580 	}
5581 
5582 	/* lookup and update size of the previous extent */
5583 	error = xfs_bmbt_lookup_eq(cur, left, &i);
5584 	if (error)
5585 		return error;
5586 	if (XFS_IS_CORRUPT(mp, i != 1)) {
5587 		xfs_btree_mark_sick(cur);
5588 		return -EFSCORRUPTED;
5589 	}
5590 
5591 	error = xfs_bmbt_update(cur, &new);
5592 	if (error)
5593 		return error;
5594 
5595 	/* change to extent format if required after extent removal */
5596 	error = xfs_bmap_btree_to_extents(tp, ip, cur, logflags, whichfork);
5597 	if (error)
5598 		return error;
5599 
5600 done:
5601 	xfs_iext_remove(ip, icur, 0);
5602 	xfs_iext_prev(ifp, icur);
5603 	xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5604 			&new);
5605 
5606 	/* update reverse mapping. rmap functions merge the rmaps for us */
5607 	xfs_rmap_unmap_extent(tp, ip, whichfork, got);
5608 	memcpy(&new, got, sizeof(new));
5609 	new.br_startoff = left->br_startoff + left->br_blockcount;
5610 	xfs_rmap_map_extent(tp, ip, whichfork, &new);
5611 	return 0;
5612 }
5613 
5614 static int
xfs_bmap_shift_update_extent(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_iext_cursor * icur,struct xfs_bmbt_irec * got,struct xfs_btree_cur * cur,int * logflags,xfs_fileoff_t startoff)5615 xfs_bmap_shift_update_extent(
5616 	struct xfs_trans	*tp,
5617 	struct xfs_inode	*ip,
5618 	int			whichfork,
5619 	struct xfs_iext_cursor	*icur,
5620 	struct xfs_bmbt_irec	*got,
5621 	struct xfs_btree_cur	*cur,
5622 	int			*logflags,
5623 	xfs_fileoff_t		startoff)
5624 {
5625 	struct xfs_mount	*mp = ip->i_mount;
5626 	struct xfs_bmbt_irec	prev = *got;
5627 	int			error, i;
5628 
5629 	*logflags |= XFS_ILOG_CORE;
5630 
5631 	got->br_startoff = startoff;
5632 
5633 	if (cur) {
5634 		error = xfs_bmbt_lookup_eq(cur, &prev, &i);
5635 		if (error)
5636 			return error;
5637 		if (XFS_IS_CORRUPT(mp, i != 1)) {
5638 			xfs_btree_mark_sick(cur);
5639 			return -EFSCORRUPTED;
5640 		}
5641 
5642 		error = xfs_bmbt_update(cur, got);
5643 		if (error)
5644 			return error;
5645 	} else {
5646 		*logflags |= XFS_ILOG_DEXT;
5647 	}
5648 
5649 	xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5650 			got);
5651 
5652 	/* update reverse mapping */
5653 	xfs_rmap_unmap_extent(tp, ip, whichfork, &prev);
5654 	xfs_rmap_map_extent(tp, ip, whichfork, got);
5655 	return 0;
5656 }
5657 
5658 int
xfs_bmap_collapse_extents(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t * next_fsb,xfs_fileoff_t offset_shift_fsb,bool * done)5659 xfs_bmap_collapse_extents(
5660 	struct xfs_trans	*tp,
5661 	struct xfs_inode	*ip,
5662 	xfs_fileoff_t		*next_fsb,
5663 	xfs_fileoff_t		offset_shift_fsb,
5664 	bool			*done)
5665 {
5666 	int			whichfork = XFS_DATA_FORK;
5667 	struct xfs_mount	*mp = ip->i_mount;
5668 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
5669 	struct xfs_btree_cur	*cur = NULL;
5670 	struct xfs_bmbt_irec	got, prev;
5671 	struct xfs_iext_cursor	icur;
5672 	xfs_fileoff_t		new_startoff;
5673 	int			error = 0;
5674 	int			logflags = 0;
5675 
5676 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5677 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
5678 		xfs_bmap_mark_sick(ip, whichfork);
5679 		return -EFSCORRUPTED;
5680 	}
5681 
5682 	if (xfs_is_shutdown(mp))
5683 		return -EIO;
5684 
5685 	xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
5686 
5687 	error = xfs_iread_extents(tp, ip, whichfork);
5688 	if (error)
5689 		return error;
5690 
5691 	if (ifp->if_format == XFS_DINODE_FMT_BTREE)
5692 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5693 
5694 	if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5695 		*done = true;
5696 		goto del_cursor;
5697 	}
5698 	if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
5699 		xfs_bmap_mark_sick(ip, whichfork);
5700 		error = -EFSCORRUPTED;
5701 		goto del_cursor;
5702 	}
5703 
5704 	new_startoff = got.br_startoff - offset_shift_fsb;
5705 	if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
5706 		if (new_startoff < prev.br_startoff + prev.br_blockcount) {
5707 			error = -EINVAL;
5708 			goto del_cursor;
5709 		}
5710 
5711 		if (xfs_bmse_can_merge(ip, whichfork, &prev, &got,
5712 				offset_shift_fsb)) {
5713 			error = xfs_bmse_merge(tp, ip, whichfork,
5714 					offset_shift_fsb, &icur, &got, &prev,
5715 					cur, &logflags);
5716 			if (error)
5717 				goto del_cursor;
5718 			goto done;
5719 		}
5720 	} else {
5721 		if (got.br_startoff < offset_shift_fsb) {
5722 			error = -EINVAL;
5723 			goto del_cursor;
5724 		}
5725 	}
5726 
5727 	error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
5728 			cur, &logflags, new_startoff);
5729 	if (error)
5730 		goto del_cursor;
5731 
5732 done:
5733 	if (!xfs_iext_next_extent(ifp, &icur, &got)) {
5734 		*done = true;
5735 		goto del_cursor;
5736 	}
5737 
5738 	*next_fsb = got.br_startoff;
5739 del_cursor:
5740 	if (cur)
5741 		xfs_btree_del_cursor(cur, error);
5742 	if (logflags)
5743 		xfs_trans_log_inode(tp, ip, logflags);
5744 	return error;
5745 }
5746 
5747 /* Make sure we won't be right-shifting an extent past the maximum bound. */
5748 int
xfs_bmap_can_insert_extents(struct xfs_inode * ip,xfs_fileoff_t off,xfs_fileoff_t shift)5749 xfs_bmap_can_insert_extents(
5750 	struct xfs_inode	*ip,
5751 	xfs_fileoff_t		off,
5752 	xfs_fileoff_t		shift)
5753 {
5754 	struct xfs_bmbt_irec	got;
5755 	int			is_empty;
5756 	int			error = 0;
5757 
5758 	xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL);
5759 
5760 	if (xfs_is_shutdown(ip->i_mount))
5761 		return -EIO;
5762 
5763 	xfs_ilock(ip, XFS_ILOCK_EXCL);
5764 	error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty);
5765 	if (!error && !is_empty && got.br_startoff >= off &&
5766 	    ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff)
5767 		error = -EINVAL;
5768 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
5769 
5770 	return error;
5771 }
5772 
5773 int
xfs_bmap_insert_extents(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t * next_fsb,xfs_fileoff_t offset_shift_fsb,bool * done,xfs_fileoff_t stop_fsb)5774 xfs_bmap_insert_extents(
5775 	struct xfs_trans	*tp,
5776 	struct xfs_inode	*ip,
5777 	xfs_fileoff_t		*next_fsb,
5778 	xfs_fileoff_t		offset_shift_fsb,
5779 	bool			*done,
5780 	xfs_fileoff_t		stop_fsb)
5781 {
5782 	int			whichfork = XFS_DATA_FORK;
5783 	struct xfs_mount	*mp = ip->i_mount;
5784 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
5785 	struct xfs_btree_cur	*cur = NULL;
5786 	struct xfs_bmbt_irec	got, next;
5787 	struct xfs_iext_cursor	icur;
5788 	xfs_fileoff_t		new_startoff;
5789 	int			error = 0;
5790 	int			logflags = 0;
5791 
5792 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5793 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
5794 		xfs_bmap_mark_sick(ip, whichfork);
5795 		return -EFSCORRUPTED;
5796 	}
5797 
5798 	if (xfs_is_shutdown(mp))
5799 		return -EIO;
5800 
5801 	xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
5802 
5803 	error = xfs_iread_extents(tp, ip, whichfork);
5804 	if (error)
5805 		return error;
5806 
5807 	if (ifp->if_format == XFS_DINODE_FMT_BTREE)
5808 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5809 
5810 	if (*next_fsb == NULLFSBLOCK) {
5811 		xfs_iext_last(ifp, &icur);
5812 		if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5813 		    stop_fsb > got.br_startoff) {
5814 			*done = true;
5815 			goto del_cursor;
5816 		}
5817 	} else {
5818 		if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5819 			*done = true;
5820 			goto del_cursor;
5821 		}
5822 	}
5823 	if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
5824 		xfs_bmap_mark_sick(ip, whichfork);
5825 		error = -EFSCORRUPTED;
5826 		goto del_cursor;
5827 	}
5828 
5829 	if (XFS_IS_CORRUPT(mp, stop_fsb > got.br_startoff)) {
5830 		xfs_bmap_mark_sick(ip, whichfork);
5831 		error = -EFSCORRUPTED;
5832 		goto del_cursor;
5833 	}
5834 
5835 	new_startoff = got.br_startoff + offset_shift_fsb;
5836 	if (xfs_iext_peek_next_extent(ifp, &icur, &next)) {
5837 		if (new_startoff + got.br_blockcount > next.br_startoff) {
5838 			error = -EINVAL;
5839 			goto del_cursor;
5840 		}
5841 
5842 		/*
5843 		 * Unlike a left shift (which involves a hole punch), a right
5844 		 * shift does not modify extent neighbors in any way.  We should
5845 		 * never find mergeable extents in this scenario.  Check anyways
5846 		 * and warn if we encounter two extents that could be one.
5847 		 */
5848 		if (xfs_bmse_can_merge(ip, whichfork, &got, &next,
5849 				offset_shift_fsb))
5850 			WARN_ON_ONCE(1);
5851 	}
5852 
5853 	error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
5854 			cur, &logflags, new_startoff);
5855 	if (error)
5856 		goto del_cursor;
5857 
5858 	if (!xfs_iext_prev_extent(ifp, &icur, &got) ||
5859 	    stop_fsb >= got.br_startoff + got.br_blockcount) {
5860 		*done = true;
5861 		goto del_cursor;
5862 	}
5863 
5864 	*next_fsb = got.br_startoff;
5865 del_cursor:
5866 	if (cur)
5867 		xfs_btree_del_cursor(cur, error);
5868 	if (logflags)
5869 		xfs_trans_log_inode(tp, ip, logflags);
5870 	return error;
5871 }
5872 
5873 /*
5874  * Splits an extent into two extents at split_fsb block such that it is the
5875  * first block of the current_ext. @ext is a target extent to be split.
5876  * @split_fsb is a block where the extents is split.  If split_fsb lies in a
5877  * hole or the first block of extents, just return 0.
5878  */
5879 int
xfs_bmap_split_extent(struct xfs_trans * tp,struct xfs_inode * ip,xfs_fileoff_t split_fsb)5880 xfs_bmap_split_extent(
5881 	struct xfs_trans	*tp,
5882 	struct xfs_inode	*ip,
5883 	xfs_fileoff_t		split_fsb)
5884 {
5885 	int				whichfork = XFS_DATA_FORK;
5886 	struct xfs_ifork		*ifp = xfs_ifork_ptr(ip, whichfork);
5887 	struct xfs_btree_cur		*cur = NULL;
5888 	struct xfs_bmbt_irec		got;
5889 	struct xfs_bmbt_irec		new; /* split extent */
5890 	struct xfs_mount		*mp = ip->i_mount;
5891 	xfs_fsblock_t			gotblkcnt; /* new block count for got */
5892 	struct xfs_iext_cursor		icur;
5893 	int				error = 0;
5894 	int				logflags = 0;
5895 	int				i = 0;
5896 
5897 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5898 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
5899 		xfs_bmap_mark_sick(ip, whichfork);
5900 		return -EFSCORRUPTED;
5901 	}
5902 
5903 	if (xfs_is_shutdown(mp))
5904 		return -EIO;
5905 
5906 	/* Read in all the extents */
5907 	error = xfs_iread_extents(tp, ip, whichfork);
5908 	if (error)
5909 		return error;
5910 
5911 	/*
5912 	 * If there are not extents, or split_fsb lies in a hole we are done.
5913 	 */
5914 	if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) ||
5915 	    got.br_startoff >= split_fsb)
5916 		return 0;
5917 
5918 	gotblkcnt = split_fsb - got.br_startoff;
5919 	new.br_startoff = split_fsb;
5920 	new.br_startblock = got.br_startblock + gotblkcnt;
5921 	new.br_blockcount = got.br_blockcount - gotblkcnt;
5922 	new.br_state = got.br_state;
5923 
5924 	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
5925 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5926 		error = xfs_bmbt_lookup_eq(cur, &got, &i);
5927 		if (error)
5928 			goto del_cursor;
5929 		if (XFS_IS_CORRUPT(mp, i != 1)) {
5930 			xfs_btree_mark_sick(cur);
5931 			error = -EFSCORRUPTED;
5932 			goto del_cursor;
5933 		}
5934 	}
5935 
5936 	got.br_blockcount = gotblkcnt;
5937 	xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur,
5938 			&got);
5939 
5940 	logflags = XFS_ILOG_CORE;
5941 	if (cur) {
5942 		error = xfs_bmbt_update(cur, &got);
5943 		if (error)
5944 			goto del_cursor;
5945 	} else
5946 		logflags |= XFS_ILOG_DEXT;
5947 
5948 	/* Add new extent */
5949 	xfs_iext_next(ifp, &icur);
5950 	xfs_iext_insert(ip, &icur, &new, 0);
5951 	ifp->if_nextents++;
5952 
5953 	if (cur) {
5954 		error = xfs_bmbt_lookup_eq(cur, &new, &i);
5955 		if (error)
5956 			goto del_cursor;
5957 		if (XFS_IS_CORRUPT(mp, i != 0)) {
5958 			xfs_btree_mark_sick(cur);
5959 			error = -EFSCORRUPTED;
5960 			goto del_cursor;
5961 		}
5962 		error = xfs_btree_insert(cur, &i);
5963 		if (error)
5964 			goto del_cursor;
5965 		if (XFS_IS_CORRUPT(mp, i != 1)) {
5966 			xfs_btree_mark_sick(cur);
5967 			error = -EFSCORRUPTED;
5968 			goto del_cursor;
5969 		}
5970 	}
5971 
5972 	/*
5973 	 * Convert to a btree if necessary.
5974 	 */
5975 	if (xfs_bmap_needs_btree(ip, whichfork)) {
5976 		int tmp_logflags; /* partial log flag return val */
5977 
5978 		ASSERT(cur == NULL);
5979 		error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
5980 				&tmp_logflags, whichfork);
5981 		logflags |= tmp_logflags;
5982 	}
5983 
5984 del_cursor:
5985 	if (cur) {
5986 		cur->bc_bmap.allocated = 0;
5987 		xfs_btree_del_cursor(cur, error);
5988 	}
5989 
5990 	if (logflags)
5991 		xfs_trans_log_inode(tp, ip, logflags);
5992 	return error;
5993 }
5994 
5995 /* Record a bmap intent. */
5996 static inline void
__xfs_bmap_add(struct xfs_trans * tp,enum xfs_bmap_intent_type type,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * bmap)5997 __xfs_bmap_add(
5998 	struct xfs_trans		*tp,
5999 	enum xfs_bmap_intent_type	type,
6000 	struct xfs_inode		*ip,
6001 	int				whichfork,
6002 	struct xfs_bmbt_irec		*bmap)
6003 {
6004 	struct xfs_bmap_intent		*bi;
6005 
6006 	if ((whichfork != XFS_DATA_FORK && whichfork != XFS_ATTR_FORK) ||
6007 	    bmap->br_startblock == HOLESTARTBLOCK ||
6008 	    bmap->br_startblock == DELAYSTARTBLOCK)
6009 		return;
6010 
6011 	bi = kmem_cache_alloc(xfs_bmap_intent_cache, GFP_KERNEL | __GFP_NOFAIL);
6012 	INIT_LIST_HEAD(&bi->bi_list);
6013 	bi->bi_type = type;
6014 	bi->bi_owner = ip;
6015 	bi->bi_whichfork = whichfork;
6016 	bi->bi_bmap = *bmap;
6017 
6018 	xfs_bmap_defer_add(tp, bi);
6019 }
6020 
6021 /* Map an extent into a file. */
6022 void
xfs_bmap_map_extent(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * PREV)6023 xfs_bmap_map_extent(
6024 	struct xfs_trans	*tp,
6025 	struct xfs_inode	*ip,
6026 	int			whichfork,
6027 	struct xfs_bmbt_irec	*PREV)
6028 {
6029 	__xfs_bmap_add(tp, XFS_BMAP_MAP, ip, whichfork, PREV);
6030 }
6031 
6032 /* Unmap an extent out of a file. */
6033 void
xfs_bmap_unmap_extent(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * PREV)6034 xfs_bmap_unmap_extent(
6035 	struct xfs_trans	*tp,
6036 	struct xfs_inode	*ip,
6037 	int			whichfork,
6038 	struct xfs_bmbt_irec	*PREV)
6039 {
6040 	__xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, whichfork, PREV);
6041 }
6042 
6043 /*
6044  * Process one of the deferred bmap operations.  We pass back the
6045  * btree cursor to maintain our lock on the bmapbt between calls.
6046  */
6047 int
xfs_bmap_finish_one(struct xfs_trans * tp,struct xfs_bmap_intent * bi)6048 xfs_bmap_finish_one(
6049 	struct xfs_trans		*tp,
6050 	struct xfs_bmap_intent		*bi)
6051 {
6052 	struct xfs_bmbt_irec		*bmap = &bi->bi_bmap;
6053 	int				error = 0;
6054 	int				flags = 0;
6055 
6056 	if (bi->bi_whichfork == XFS_ATTR_FORK)
6057 		flags |= XFS_BMAPI_ATTRFORK;
6058 
6059 	ASSERT(tp->t_highest_agno == NULLAGNUMBER);
6060 
6061 	trace_xfs_bmap_deferred(bi);
6062 
6063 	if (XFS_TEST_ERROR(false, tp->t_mountp, XFS_ERRTAG_BMAP_FINISH_ONE))
6064 		return -EIO;
6065 
6066 	switch (bi->bi_type) {
6067 	case XFS_BMAP_MAP:
6068 		if (bi->bi_bmap.br_state == XFS_EXT_UNWRITTEN)
6069 			flags |= XFS_BMAPI_PREALLOC;
6070 		error = xfs_bmapi_remap(tp, bi->bi_owner, bmap->br_startoff,
6071 				bmap->br_blockcount, bmap->br_startblock,
6072 				flags);
6073 		bmap->br_blockcount = 0;
6074 		break;
6075 	case XFS_BMAP_UNMAP:
6076 		error = __xfs_bunmapi(tp, bi->bi_owner, bmap->br_startoff,
6077 				&bmap->br_blockcount, flags | XFS_BMAPI_REMAP,
6078 				1);
6079 		break;
6080 	default:
6081 		ASSERT(0);
6082 		xfs_bmap_mark_sick(bi->bi_owner, bi->bi_whichfork);
6083 		error = -EFSCORRUPTED;
6084 	}
6085 
6086 	return error;
6087 }
6088 
6089 /* Check that an extent does not have invalid flags or bad ranges. */
6090 xfs_failaddr_t
xfs_bmap_validate_extent_raw(struct xfs_mount * mp,bool rtfile,int whichfork,struct xfs_bmbt_irec * irec)6091 xfs_bmap_validate_extent_raw(
6092 	struct xfs_mount	*mp,
6093 	bool			rtfile,
6094 	int			whichfork,
6095 	struct xfs_bmbt_irec	*irec)
6096 {
6097 	if (!xfs_verify_fileext(mp, irec->br_startoff, irec->br_blockcount))
6098 		return __this_address;
6099 
6100 	if (rtfile && whichfork == XFS_DATA_FORK) {
6101 		if (!xfs_verify_rtbext(mp, irec->br_startblock,
6102 					   irec->br_blockcount))
6103 			return __this_address;
6104 	} else {
6105 		if (!xfs_verify_fsbext(mp, irec->br_startblock,
6106 					   irec->br_blockcount))
6107 			return __this_address;
6108 	}
6109 	if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK)
6110 		return __this_address;
6111 	return NULL;
6112 }
6113 
6114 int __init
xfs_bmap_intent_init_cache(void)6115 xfs_bmap_intent_init_cache(void)
6116 {
6117 	xfs_bmap_intent_cache = kmem_cache_create("xfs_bmap_intent",
6118 			sizeof(struct xfs_bmap_intent),
6119 			0, 0, NULL);
6120 
6121 	return xfs_bmap_intent_cache != NULL ? 0 : -ENOMEM;
6122 }
6123 
6124 void
xfs_bmap_intent_destroy_cache(void)6125 xfs_bmap_intent_destroy_cache(void)
6126 {
6127 	kmem_cache_destroy(xfs_bmap_intent_cache);
6128 	xfs_bmap_intent_cache = NULL;
6129 }
6130 
6131 /* Check that an inode's extent does not have invalid flags or bad ranges. */
6132 xfs_failaddr_t
xfs_bmap_validate_extent(struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * irec)6133 xfs_bmap_validate_extent(
6134 	struct xfs_inode	*ip,
6135 	int			whichfork,
6136 	struct xfs_bmbt_irec	*irec)
6137 {
6138 	return xfs_bmap_validate_extent_raw(ip->i_mount,
6139 			XFS_IS_REALTIME_INODE(ip), whichfork, irec);
6140 }
6141 
6142 /*
6143  * Used in xfs_itruncate_extents().  This is the maximum number of extents
6144  * freed from a file in a single transaction.
6145  */
6146 #define	XFS_ITRUNC_MAX_EXTENTS	2
6147 
6148 /*
6149  * Unmap every extent in part of an inode's fork.  We don't do any higher level
6150  * invalidation work at all.
6151  */
6152 int
xfs_bunmapi_range(struct xfs_trans ** tpp,struct xfs_inode * ip,uint32_t flags,xfs_fileoff_t startoff,xfs_fileoff_t endoff)6153 xfs_bunmapi_range(
6154 	struct xfs_trans	**tpp,
6155 	struct xfs_inode	*ip,
6156 	uint32_t		flags,
6157 	xfs_fileoff_t		startoff,
6158 	xfs_fileoff_t		endoff)
6159 {
6160 	xfs_filblks_t		unmap_len = endoff - startoff + 1;
6161 	int			error = 0;
6162 
6163 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
6164 
6165 	while (unmap_len > 0) {
6166 		ASSERT((*tpp)->t_highest_agno == NULLAGNUMBER);
6167 		error = __xfs_bunmapi(*tpp, ip, startoff, &unmap_len, flags,
6168 				XFS_ITRUNC_MAX_EXTENTS);
6169 		if (error)
6170 			goto out;
6171 
6172 		/* free the just unmapped extents */
6173 		error = xfs_defer_finish(tpp);
6174 		if (error)
6175 			goto out;
6176 		cond_resched();
6177 	}
6178 out:
6179 	return error;
6180 }
6181 
6182 struct xfs_bmap_query_range {
6183 	xfs_bmap_query_range_fn	fn;
6184 	void			*priv;
6185 };
6186 
6187 /* Format btree record and pass to our callback. */
6188 STATIC int
xfs_bmap_query_range_helper(struct xfs_btree_cur * cur,const union xfs_btree_rec * rec,void * priv)6189 xfs_bmap_query_range_helper(
6190 	struct xfs_btree_cur		*cur,
6191 	const union xfs_btree_rec	*rec,
6192 	void				*priv)
6193 {
6194 	struct xfs_bmap_query_range	*query = priv;
6195 	struct xfs_bmbt_irec		irec;
6196 	xfs_failaddr_t			fa;
6197 
6198 	xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
6199 	fa = xfs_bmap_validate_extent(cur->bc_ino.ip, cur->bc_ino.whichfork,
6200 			&irec);
6201 	if (fa) {
6202 		xfs_btree_mark_sick(cur);
6203 		return xfs_bmap_complain_bad_rec(cur->bc_ino.ip,
6204 				cur->bc_ino.whichfork, fa, &irec);
6205 	}
6206 
6207 	return query->fn(cur, &irec, query->priv);
6208 }
6209 
6210 /* Find all bmaps. */
6211 int
xfs_bmap_query_all(struct xfs_btree_cur * cur,xfs_bmap_query_range_fn fn,void * priv)6212 xfs_bmap_query_all(
6213 	struct xfs_btree_cur		*cur,
6214 	xfs_bmap_query_range_fn		fn,
6215 	void				*priv)
6216 {
6217 	struct xfs_bmap_query_range	query = {
6218 		.priv			= priv,
6219 		.fn			= fn,
6220 	};
6221 
6222 	return xfs_btree_query_all(cur, xfs_bmap_query_range_helper, &query);
6223 }
6224 
6225 /* Helper function to extract extent size hint from inode */
6226 xfs_extlen_t
xfs_get_extsz_hint(struct xfs_inode * ip)6227 xfs_get_extsz_hint(
6228 	struct xfs_inode	*ip)
6229 {
6230 	/*
6231 	 * No point in aligning allocations if we need to COW to actually
6232 	 * write to them.
6233 	 */
6234 	if (!xfs_is_always_cow_inode(ip) &&
6235 	    (ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize)
6236 		return ip->i_extsize;
6237 	if (XFS_IS_REALTIME_INODE(ip) &&
6238 	    ip->i_mount->m_sb.sb_rextsize > 1)
6239 		return ip->i_mount->m_sb.sb_rextsize;
6240 	return 0;
6241 }
6242 
6243 /*
6244  * Helper function to extract CoW extent size hint from inode.
6245  * Between the extent size hint and the CoW extent size hint, we
6246  * return the greater of the two.  If the value is zero (automatic),
6247  * use the default size.
6248  */
6249 xfs_extlen_t
xfs_get_cowextsz_hint(struct xfs_inode * ip)6250 xfs_get_cowextsz_hint(
6251 	struct xfs_inode	*ip)
6252 {
6253 	xfs_extlen_t		a, b;
6254 
6255 	a = 0;
6256 	if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
6257 		a = ip->i_cowextsize;
6258 	if (XFS_IS_REALTIME_INODE(ip)) {
6259 		b = 0;
6260 		if (ip->i_diflags & XFS_DIFLAG_EXTSIZE)
6261 			b = ip->i_extsize;
6262 	} else {
6263 		b = xfs_get_extsz_hint(ip);
6264 	}
6265 
6266 	a = max(a, b);
6267 	if (a == 0)
6268 		return XFS_DEFAULT_COWEXTSZ_HINT;
6269 	return a;
6270 }
6271