xref: /linux/fs/xfs/libxfs/xfs_alloc.c (revision 73a8fd93c421c4a6ac2c581c4d3478d3d68a0def)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_shared.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_mount.h"
14 #include "xfs_defer.h"
15 #include "xfs_btree.h"
16 #include "xfs_rmap.h"
17 #include "xfs_alloc_btree.h"
18 #include "xfs_alloc.h"
19 #include "xfs_extent_busy.h"
20 #include "xfs_errortag.h"
21 #include "xfs_error.h"
22 #include "xfs_trace.h"
23 #include "xfs_trans.h"
24 #include "xfs_buf_item.h"
25 #include "xfs_log.h"
26 #include "xfs_ag.h"
27 #include "xfs_ag_resv.h"
28 #include "xfs_bmap.h"
29 #include "xfs_health.h"
30 
31 struct kmem_cache	*xfs_extfree_item_cache;
32 
33 struct workqueue_struct *xfs_alloc_wq;
34 
35 #define XFS_ABSDIFF(a,b)	(((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
36 
37 #define	XFSA_FIXUP_BNO_OK	1
38 #define	XFSA_FIXUP_CNT_OK	2
39 
40 /*
41  * Size of the AGFL.  For CRC-enabled filesystes we steal a couple of slots in
42  * the beginning of the block for a proper header with the location information
43  * and CRC.
44  */
45 unsigned int
46 xfs_agfl_size(
47 	struct xfs_mount	*mp)
48 {
49 	unsigned int		size = mp->m_sb.sb_sectsize;
50 
51 	if (xfs_has_crc(mp))
52 		size -= sizeof(struct xfs_agfl);
53 
54 	return size / sizeof(xfs_agblock_t);
55 }
56 
57 unsigned int
58 xfs_refc_block(
59 	struct xfs_mount	*mp)
60 {
61 	if (xfs_has_rmapbt(mp))
62 		return XFS_RMAP_BLOCK(mp) + 1;
63 	if (xfs_has_finobt(mp))
64 		return XFS_FIBT_BLOCK(mp) + 1;
65 	return XFS_IBT_BLOCK(mp) + 1;
66 }
67 
68 xfs_extlen_t
69 xfs_prealloc_blocks(
70 	struct xfs_mount	*mp)
71 {
72 	if (xfs_has_reflink(mp))
73 		return xfs_refc_block(mp) + 1;
74 	if (xfs_has_rmapbt(mp))
75 		return XFS_RMAP_BLOCK(mp) + 1;
76 	if (xfs_has_finobt(mp))
77 		return XFS_FIBT_BLOCK(mp) + 1;
78 	return XFS_IBT_BLOCK(mp) + 1;
79 }
80 
81 /*
82  * The number of blocks per AG that we withhold from xfs_mod_fdblocks to
83  * guarantee that we can refill the AGFL prior to allocating space in a nearly
84  * full AG.  Although the space described by the free space btrees, the
85  * blocks used by the freesp btrees themselves, and the blocks owned by the
86  * AGFL are counted in the ondisk fdblocks, it's a mistake to let the ondisk
87  * free space in the AG drop so low that the free space btrees cannot refill an
88  * empty AGFL up to the minimum level.  Rather than grind through empty AGs
89  * until the fs goes down, we subtract this many AG blocks from the incore
90  * fdblocks to ensure user allocation does not overcommit the space the
91  * filesystem needs for the AGFLs.  The rmap btree uses a per-AG reservation to
92  * withhold space from xfs_mod_fdblocks, so we do not account for that here.
93  */
94 #define XFS_ALLOCBT_AGFL_RESERVE	4
95 
96 /*
97  * Compute the number of blocks that we set aside to guarantee the ability to
98  * refill the AGFL and handle a full bmap btree split.
99  *
100  * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
101  * AGF buffer (PV 947395), we place constraints on the relationship among
102  * actual allocations for data blocks, freelist blocks, and potential file data
103  * bmap btree blocks. However, these restrictions may result in no actual space
104  * allocated for a delayed extent, for example, a data block in a certain AG is
105  * allocated but there is no additional block for the additional bmap btree
106  * block due to a split of the bmap btree of the file. The result of this may
107  * lead to an infinite loop when the file gets flushed to disk and all delayed
108  * extents need to be actually allocated. To get around this, we explicitly set
109  * aside a few blocks which will not be reserved in delayed allocation.
110  *
111  * For each AG, we need to reserve enough blocks to replenish a totally empty
112  * AGFL and 4 more to handle a potential split of the file's bmap btree.
113  */
114 unsigned int
115 xfs_alloc_set_aside(
116 	struct xfs_mount	*mp)
117 {
118 	return mp->m_sb.sb_agcount * (XFS_ALLOCBT_AGFL_RESERVE + 4);
119 }
120 
121 /*
122  * When deciding how much space to allocate out of an AG, we limit the
123  * allocation maximum size to the size the AG. However, we cannot use all the
124  * blocks in the AG - some are permanently used by metadata. These
125  * blocks are generally:
126  *	- the AG superblock, AGF, AGI and AGFL
127  *	- the AGF (bno and cnt) and AGI btree root blocks, and optionally
128  *	  the AGI free inode and rmap btree root blocks.
129  *	- blocks on the AGFL according to xfs_alloc_set_aside() limits
130  *	- the rmapbt root block
131  *
132  * The AG headers are sector sized, so the amount of space they take up is
133  * dependent on filesystem geometry. The others are all single blocks.
134  */
135 unsigned int
136 xfs_alloc_ag_max_usable(
137 	struct xfs_mount	*mp)
138 {
139 	unsigned int		blocks;
140 
141 	blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
142 	blocks += XFS_ALLOCBT_AGFL_RESERVE;
143 	blocks += 3;			/* AGF, AGI btree root blocks */
144 	if (xfs_has_finobt(mp))
145 		blocks++;		/* finobt root block */
146 	if (xfs_has_rmapbt(mp))
147 		blocks++;		/* rmap root block */
148 	if (xfs_has_reflink(mp))
149 		blocks++;		/* refcount root block */
150 
151 	return mp->m_sb.sb_agblocks - blocks;
152 }
153 
154 
155 static int
156 xfs_alloc_lookup(
157 	struct xfs_btree_cur	*cur,
158 	xfs_lookup_t		dir,
159 	xfs_agblock_t		bno,
160 	xfs_extlen_t		len,
161 	int			*stat)
162 {
163 	int			error;
164 
165 	cur->bc_rec.a.ar_startblock = bno;
166 	cur->bc_rec.a.ar_blockcount = len;
167 	error = xfs_btree_lookup(cur, dir, stat);
168 	cur->bc_ag.abt.active = (*stat == 1);
169 	return error;
170 }
171 
172 /*
173  * Lookup the record equal to [bno, len] in the btree given by cur.
174  */
175 static inline int				/* error */
176 xfs_alloc_lookup_eq(
177 	struct xfs_btree_cur	*cur,	/* btree cursor */
178 	xfs_agblock_t		bno,	/* starting block of extent */
179 	xfs_extlen_t		len,	/* length of extent */
180 	int			*stat)	/* success/failure */
181 {
182 	return xfs_alloc_lookup(cur, XFS_LOOKUP_EQ, bno, len, stat);
183 }
184 
185 /*
186  * Lookup the first record greater than or equal to [bno, len]
187  * in the btree given by cur.
188  */
189 int				/* error */
190 xfs_alloc_lookup_ge(
191 	struct xfs_btree_cur	*cur,	/* btree cursor */
192 	xfs_agblock_t		bno,	/* starting block of extent */
193 	xfs_extlen_t		len,	/* length of extent */
194 	int			*stat)	/* success/failure */
195 {
196 	return xfs_alloc_lookup(cur, XFS_LOOKUP_GE, bno, len, stat);
197 }
198 
199 /*
200  * Lookup the first record less than or equal to [bno, len]
201  * in the btree given by cur.
202  */
203 int					/* error */
204 xfs_alloc_lookup_le(
205 	struct xfs_btree_cur	*cur,	/* btree cursor */
206 	xfs_agblock_t		bno,	/* starting block of extent */
207 	xfs_extlen_t		len,	/* length of extent */
208 	int			*stat)	/* success/failure */
209 {
210 	return xfs_alloc_lookup(cur, XFS_LOOKUP_LE, bno, len, stat);
211 }
212 
213 static inline bool
214 xfs_alloc_cur_active(
215 	struct xfs_btree_cur	*cur)
216 {
217 	return cur && cur->bc_ag.abt.active;
218 }
219 
220 /*
221  * Update the record referred to by cur to the value given
222  * by [bno, len].
223  * This either works (return 0) or gets an EFSCORRUPTED error.
224  */
225 STATIC int				/* error */
226 xfs_alloc_update(
227 	struct xfs_btree_cur	*cur,	/* btree cursor */
228 	xfs_agblock_t		bno,	/* starting block of extent */
229 	xfs_extlen_t		len)	/* length of extent */
230 {
231 	union xfs_btree_rec	rec;
232 
233 	rec.alloc.ar_startblock = cpu_to_be32(bno);
234 	rec.alloc.ar_blockcount = cpu_to_be32(len);
235 	return xfs_btree_update(cur, &rec);
236 }
237 
238 /* Convert the ondisk btree record to its incore representation. */
239 void
240 xfs_alloc_btrec_to_irec(
241 	const union xfs_btree_rec	*rec,
242 	struct xfs_alloc_rec_incore	*irec)
243 {
244 	irec->ar_startblock = be32_to_cpu(rec->alloc.ar_startblock);
245 	irec->ar_blockcount = be32_to_cpu(rec->alloc.ar_blockcount);
246 }
247 
248 /* Simple checks for free space records. */
249 xfs_failaddr_t
250 xfs_alloc_check_irec(
251 	struct xfs_perag			*pag,
252 	const struct xfs_alloc_rec_incore	*irec)
253 {
254 	if (irec->ar_blockcount == 0)
255 		return __this_address;
256 
257 	/* check for valid extent range, including overflow */
258 	if (!xfs_verify_agbext(pag, irec->ar_startblock, irec->ar_blockcount))
259 		return __this_address;
260 
261 	return NULL;
262 }
263 
264 static inline int
265 xfs_alloc_complain_bad_rec(
266 	struct xfs_btree_cur		*cur,
267 	xfs_failaddr_t			fa,
268 	const struct xfs_alloc_rec_incore *irec)
269 {
270 	struct xfs_mount		*mp = cur->bc_mp;
271 
272 	xfs_warn(mp,
273 		"%s Freespace BTree record corruption in AG %d detected at %pS!",
274 		cur->bc_btnum == XFS_BTNUM_BNO ? "Block" : "Size",
275 		cur->bc_ag.pag->pag_agno, fa);
276 	xfs_warn(mp,
277 		"start block 0x%x block count 0x%x", irec->ar_startblock,
278 		irec->ar_blockcount);
279 	xfs_btree_mark_sick(cur);
280 	return -EFSCORRUPTED;
281 }
282 
283 /*
284  * Get the data from the pointed-to record.
285  */
286 int					/* error */
287 xfs_alloc_get_rec(
288 	struct xfs_btree_cur	*cur,	/* btree cursor */
289 	xfs_agblock_t		*bno,	/* output: starting block of extent */
290 	xfs_extlen_t		*len,	/* output: length of extent */
291 	int			*stat)	/* output: success/failure */
292 {
293 	struct xfs_alloc_rec_incore irec;
294 	union xfs_btree_rec	*rec;
295 	xfs_failaddr_t		fa;
296 	int			error;
297 
298 	error = xfs_btree_get_rec(cur, &rec, stat);
299 	if (error || !(*stat))
300 		return error;
301 
302 	xfs_alloc_btrec_to_irec(rec, &irec);
303 	fa = xfs_alloc_check_irec(cur->bc_ag.pag, &irec);
304 	if (fa)
305 		return xfs_alloc_complain_bad_rec(cur, fa, &irec);
306 
307 	*bno = irec.ar_startblock;
308 	*len = irec.ar_blockcount;
309 	return 0;
310 }
311 
312 /*
313  * Compute aligned version of the found extent.
314  * Takes alignment and min length into account.
315  */
316 STATIC bool
317 xfs_alloc_compute_aligned(
318 	xfs_alloc_arg_t	*args,		/* allocation argument structure */
319 	xfs_agblock_t	foundbno,	/* starting block in found extent */
320 	xfs_extlen_t	foundlen,	/* length in found extent */
321 	xfs_agblock_t	*resbno,	/* result block number */
322 	xfs_extlen_t	*reslen,	/* result length */
323 	unsigned	*busy_gen)
324 {
325 	xfs_agblock_t	bno = foundbno;
326 	xfs_extlen_t	len = foundlen;
327 	xfs_extlen_t	diff;
328 	bool		busy;
329 
330 	/* Trim busy sections out of found extent */
331 	busy = xfs_extent_busy_trim(args, &bno, &len, busy_gen);
332 
333 	/*
334 	 * If we have a largish extent that happens to start before min_agbno,
335 	 * see if we can shift it into range...
336 	 */
337 	if (bno < args->min_agbno && bno + len > args->min_agbno) {
338 		diff = args->min_agbno - bno;
339 		if (len > diff) {
340 			bno += diff;
341 			len -= diff;
342 		}
343 	}
344 
345 	if (args->alignment > 1 && len >= args->minlen) {
346 		xfs_agblock_t	aligned_bno = roundup(bno, args->alignment);
347 
348 		diff = aligned_bno - bno;
349 
350 		*resbno = aligned_bno;
351 		*reslen = diff >= len ? 0 : len - diff;
352 	} else {
353 		*resbno = bno;
354 		*reslen = len;
355 	}
356 
357 	return busy;
358 }
359 
360 /*
361  * Compute best start block and diff for "near" allocations.
362  * freelen >= wantlen already checked by caller.
363  */
364 STATIC xfs_extlen_t			/* difference value (absolute) */
365 xfs_alloc_compute_diff(
366 	xfs_agblock_t	wantbno,	/* target starting block */
367 	xfs_extlen_t	wantlen,	/* target length */
368 	xfs_extlen_t	alignment,	/* target alignment */
369 	int		datatype,	/* are we allocating data? */
370 	xfs_agblock_t	freebno,	/* freespace's starting block */
371 	xfs_extlen_t	freelen,	/* freespace's length */
372 	xfs_agblock_t	*newbnop)	/* result: best start block from free */
373 {
374 	xfs_agblock_t	freeend;	/* end of freespace extent */
375 	xfs_agblock_t	newbno1;	/* return block number */
376 	xfs_agblock_t	newbno2;	/* other new block number */
377 	xfs_extlen_t	newlen1=0;	/* length with newbno1 */
378 	xfs_extlen_t	newlen2=0;	/* length with newbno2 */
379 	xfs_agblock_t	wantend;	/* end of target extent */
380 	bool		userdata = datatype & XFS_ALLOC_USERDATA;
381 
382 	ASSERT(freelen >= wantlen);
383 	freeend = freebno + freelen;
384 	wantend = wantbno + wantlen;
385 	/*
386 	 * We want to allocate from the start of a free extent if it is past
387 	 * the desired block or if we are allocating user data and the free
388 	 * extent is before desired block. The second case is there to allow
389 	 * for contiguous allocation from the remaining free space if the file
390 	 * grows in the short term.
391 	 */
392 	if (freebno >= wantbno || (userdata && freeend < wantend)) {
393 		if ((newbno1 = roundup(freebno, alignment)) >= freeend)
394 			newbno1 = NULLAGBLOCK;
395 	} else if (freeend >= wantend && alignment > 1) {
396 		newbno1 = roundup(wantbno, alignment);
397 		newbno2 = newbno1 - alignment;
398 		if (newbno1 >= freeend)
399 			newbno1 = NULLAGBLOCK;
400 		else
401 			newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
402 		if (newbno2 < freebno)
403 			newbno2 = NULLAGBLOCK;
404 		else
405 			newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
406 		if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
407 			if (newlen1 < newlen2 ||
408 			    (newlen1 == newlen2 &&
409 			     XFS_ABSDIFF(newbno1, wantbno) >
410 			     XFS_ABSDIFF(newbno2, wantbno)))
411 				newbno1 = newbno2;
412 		} else if (newbno2 != NULLAGBLOCK)
413 			newbno1 = newbno2;
414 	} else if (freeend >= wantend) {
415 		newbno1 = wantbno;
416 	} else if (alignment > 1) {
417 		newbno1 = roundup(freeend - wantlen, alignment);
418 		if (newbno1 > freeend - wantlen &&
419 		    newbno1 - alignment >= freebno)
420 			newbno1 -= alignment;
421 		else if (newbno1 >= freeend)
422 			newbno1 = NULLAGBLOCK;
423 	} else
424 		newbno1 = freeend - wantlen;
425 	*newbnop = newbno1;
426 	return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
427 }
428 
429 /*
430  * Fix up the length, based on mod and prod.
431  * len should be k * prod + mod for some k.
432  * If len is too small it is returned unchanged.
433  * If len hits maxlen it is left alone.
434  */
435 STATIC void
436 xfs_alloc_fix_len(
437 	xfs_alloc_arg_t	*args)		/* allocation argument structure */
438 {
439 	xfs_extlen_t	k;
440 	xfs_extlen_t	rlen;
441 
442 	ASSERT(args->mod < args->prod);
443 	rlen = args->len;
444 	ASSERT(rlen >= args->minlen);
445 	ASSERT(rlen <= args->maxlen);
446 	if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
447 	    (args->mod == 0 && rlen < args->prod))
448 		return;
449 	k = rlen % args->prod;
450 	if (k == args->mod)
451 		return;
452 	if (k > args->mod)
453 		rlen = rlen - (k - args->mod);
454 	else
455 		rlen = rlen - args->prod + (args->mod - k);
456 	/* casts to (int) catch length underflows */
457 	if ((int)rlen < (int)args->minlen)
458 		return;
459 	ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
460 	ASSERT(rlen % args->prod == args->mod);
461 	ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
462 		rlen + args->minleft);
463 	args->len = rlen;
464 }
465 
466 /*
467  * Update the two btrees, logically removing from freespace the extent
468  * starting at rbno, rlen blocks.  The extent is contained within the
469  * actual (current) free extent fbno for flen blocks.
470  * Flags are passed in indicating whether the cursors are set to the
471  * relevant records.
472  */
473 STATIC int				/* error code */
474 xfs_alloc_fixup_trees(
475 	struct xfs_btree_cur *cnt_cur,	/* cursor for by-size btree */
476 	struct xfs_btree_cur *bno_cur,	/* cursor for by-block btree */
477 	xfs_agblock_t	fbno,		/* starting block of free extent */
478 	xfs_extlen_t	flen,		/* length of free extent */
479 	xfs_agblock_t	rbno,		/* starting block of returned extent */
480 	xfs_extlen_t	rlen,		/* length of returned extent */
481 	int		flags)		/* flags, XFSA_FIXUP_... */
482 {
483 	int		error;		/* error code */
484 	int		i;		/* operation results */
485 	xfs_agblock_t	nfbno1;		/* first new free startblock */
486 	xfs_agblock_t	nfbno2;		/* second new free startblock */
487 	xfs_extlen_t	nflen1=0;	/* first new free length */
488 	xfs_extlen_t	nflen2=0;	/* second new free length */
489 	struct xfs_mount *mp;
490 
491 	mp = cnt_cur->bc_mp;
492 
493 	/*
494 	 * Look up the record in the by-size tree if necessary.
495 	 */
496 	if (flags & XFSA_FIXUP_CNT_OK) {
497 #ifdef DEBUG
498 		if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
499 			return error;
500 		if (XFS_IS_CORRUPT(mp,
501 				   i != 1 ||
502 				   nfbno1 != fbno ||
503 				   nflen1 != flen)) {
504 			xfs_btree_mark_sick(cnt_cur);
505 			return -EFSCORRUPTED;
506 		}
507 #endif
508 	} else {
509 		if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
510 			return error;
511 		if (XFS_IS_CORRUPT(mp, i != 1)) {
512 			xfs_btree_mark_sick(cnt_cur);
513 			return -EFSCORRUPTED;
514 		}
515 	}
516 	/*
517 	 * Look up the record in the by-block tree if necessary.
518 	 */
519 	if (flags & XFSA_FIXUP_BNO_OK) {
520 #ifdef DEBUG
521 		if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
522 			return error;
523 		if (XFS_IS_CORRUPT(mp,
524 				   i != 1 ||
525 				   nfbno1 != fbno ||
526 				   nflen1 != flen)) {
527 			xfs_btree_mark_sick(bno_cur);
528 			return -EFSCORRUPTED;
529 		}
530 #endif
531 	} else {
532 		if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
533 			return error;
534 		if (XFS_IS_CORRUPT(mp, i != 1)) {
535 			xfs_btree_mark_sick(bno_cur);
536 			return -EFSCORRUPTED;
537 		}
538 	}
539 
540 #ifdef DEBUG
541 	if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
542 		struct xfs_btree_block	*bnoblock;
543 		struct xfs_btree_block	*cntblock;
544 
545 		bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_levels[0].bp);
546 		cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_levels[0].bp);
547 
548 		if (XFS_IS_CORRUPT(mp,
549 				   bnoblock->bb_numrecs !=
550 				   cntblock->bb_numrecs)) {
551 			xfs_btree_mark_sick(bno_cur);
552 			return -EFSCORRUPTED;
553 		}
554 	}
555 #endif
556 
557 	/*
558 	 * Deal with all four cases: the allocated record is contained
559 	 * within the freespace record, so we can have new freespace
560 	 * at either (or both) end, or no freespace remaining.
561 	 */
562 	if (rbno == fbno && rlen == flen)
563 		nfbno1 = nfbno2 = NULLAGBLOCK;
564 	else if (rbno == fbno) {
565 		nfbno1 = rbno + rlen;
566 		nflen1 = flen - rlen;
567 		nfbno2 = NULLAGBLOCK;
568 	} else if (rbno + rlen == fbno + flen) {
569 		nfbno1 = fbno;
570 		nflen1 = flen - rlen;
571 		nfbno2 = NULLAGBLOCK;
572 	} else {
573 		nfbno1 = fbno;
574 		nflen1 = rbno - fbno;
575 		nfbno2 = rbno + rlen;
576 		nflen2 = (fbno + flen) - nfbno2;
577 	}
578 	/*
579 	 * Delete the entry from the by-size btree.
580 	 */
581 	if ((error = xfs_btree_delete(cnt_cur, &i)))
582 		return error;
583 	if (XFS_IS_CORRUPT(mp, i != 1)) {
584 		xfs_btree_mark_sick(cnt_cur);
585 		return -EFSCORRUPTED;
586 	}
587 	/*
588 	 * Add new by-size btree entry(s).
589 	 */
590 	if (nfbno1 != NULLAGBLOCK) {
591 		if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
592 			return error;
593 		if (XFS_IS_CORRUPT(mp, i != 0)) {
594 			xfs_btree_mark_sick(cnt_cur);
595 			return -EFSCORRUPTED;
596 		}
597 		if ((error = xfs_btree_insert(cnt_cur, &i)))
598 			return error;
599 		if (XFS_IS_CORRUPT(mp, i != 1)) {
600 			xfs_btree_mark_sick(cnt_cur);
601 			return -EFSCORRUPTED;
602 		}
603 	}
604 	if (nfbno2 != NULLAGBLOCK) {
605 		if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
606 			return error;
607 		if (XFS_IS_CORRUPT(mp, i != 0)) {
608 			xfs_btree_mark_sick(cnt_cur);
609 			return -EFSCORRUPTED;
610 		}
611 		if ((error = xfs_btree_insert(cnt_cur, &i)))
612 			return error;
613 		if (XFS_IS_CORRUPT(mp, i != 1)) {
614 			xfs_btree_mark_sick(cnt_cur);
615 			return -EFSCORRUPTED;
616 		}
617 	}
618 	/*
619 	 * Fix up the by-block btree entry(s).
620 	 */
621 	if (nfbno1 == NULLAGBLOCK) {
622 		/*
623 		 * No remaining freespace, just delete the by-block tree entry.
624 		 */
625 		if ((error = xfs_btree_delete(bno_cur, &i)))
626 			return error;
627 		if (XFS_IS_CORRUPT(mp, i != 1)) {
628 			xfs_btree_mark_sick(bno_cur);
629 			return -EFSCORRUPTED;
630 		}
631 	} else {
632 		/*
633 		 * Update the by-block entry to start later|be shorter.
634 		 */
635 		if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
636 			return error;
637 	}
638 	if (nfbno2 != NULLAGBLOCK) {
639 		/*
640 		 * 2 resulting free entries, need to add one.
641 		 */
642 		if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
643 			return error;
644 		if (XFS_IS_CORRUPT(mp, i != 0)) {
645 			xfs_btree_mark_sick(bno_cur);
646 			return -EFSCORRUPTED;
647 		}
648 		if ((error = xfs_btree_insert(bno_cur, &i)))
649 			return error;
650 		if (XFS_IS_CORRUPT(mp, i != 1)) {
651 			xfs_btree_mark_sick(bno_cur);
652 			return -EFSCORRUPTED;
653 		}
654 	}
655 	return 0;
656 }
657 
658 /*
659  * We do not verify the AGFL contents against AGF-based index counters here,
660  * even though we may have access to the perag that contains shadow copies. We
661  * don't know if the AGF based counters have been checked, and if they have they
662  * still may be inconsistent because they haven't yet been reset on the first
663  * allocation after the AGF has been read in.
664  *
665  * This means we can only check that all agfl entries contain valid or null
666  * values because we can't reliably determine the active range to exclude
667  * NULLAGBNO as a valid value.
668  *
669  * However, we can't even do that for v4 format filesystems because there are
670  * old versions of mkfs out there that does not initialise the AGFL to known,
671  * verifiable values. HEnce we can't tell the difference between a AGFL block
672  * allocated by mkfs and a corrupted AGFL block here on v4 filesystems.
673  *
674  * As a result, we can only fully validate AGFL block numbers when we pull them
675  * from the freelist in xfs_alloc_get_freelist().
676  */
677 static xfs_failaddr_t
678 xfs_agfl_verify(
679 	struct xfs_buf	*bp)
680 {
681 	struct xfs_mount *mp = bp->b_mount;
682 	struct xfs_agfl	*agfl = XFS_BUF_TO_AGFL(bp);
683 	__be32		*agfl_bno = xfs_buf_to_agfl_bno(bp);
684 	int		i;
685 
686 	if (!xfs_has_crc(mp))
687 		return NULL;
688 
689 	if (!xfs_verify_magic(bp, agfl->agfl_magicnum))
690 		return __this_address;
691 	if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
692 		return __this_address;
693 	/*
694 	 * during growfs operations, the perag is not fully initialised,
695 	 * so we can't use it for any useful checking. growfs ensures we can't
696 	 * use it by using uncached buffers that don't have the perag attached
697 	 * so we can detect and avoid this problem.
698 	 */
699 	if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
700 		return __this_address;
701 
702 	for (i = 0; i < xfs_agfl_size(mp); i++) {
703 		if (be32_to_cpu(agfl_bno[i]) != NULLAGBLOCK &&
704 		    be32_to_cpu(agfl_bno[i]) >= mp->m_sb.sb_agblocks)
705 			return __this_address;
706 	}
707 
708 	if (!xfs_log_check_lsn(mp, be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn)))
709 		return __this_address;
710 	return NULL;
711 }
712 
713 static void
714 xfs_agfl_read_verify(
715 	struct xfs_buf	*bp)
716 {
717 	struct xfs_mount *mp = bp->b_mount;
718 	xfs_failaddr_t	fa;
719 
720 	/*
721 	 * There is no verification of non-crc AGFLs because mkfs does not
722 	 * initialise the AGFL to zero or NULL. Hence the only valid part of the
723 	 * AGFL is what the AGF says is active. We can't get to the AGF, so we
724 	 * can't verify just those entries are valid.
725 	 */
726 	if (!xfs_has_crc(mp))
727 		return;
728 
729 	if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
730 		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
731 	else {
732 		fa = xfs_agfl_verify(bp);
733 		if (fa)
734 			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
735 	}
736 }
737 
738 static void
739 xfs_agfl_write_verify(
740 	struct xfs_buf	*bp)
741 {
742 	struct xfs_mount	*mp = bp->b_mount;
743 	struct xfs_buf_log_item	*bip = bp->b_log_item;
744 	xfs_failaddr_t		fa;
745 
746 	/* no verification of non-crc AGFLs */
747 	if (!xfs_has_crc(mp))
748 		return;
749 
750 	fa = xfs_agfl_verify(bp);
751 	if (fa) {
752 		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
753 		return;
754 	}
755 
756 	if (bip)
757 		XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
758 
759 	xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
760 }
761 
762 const struct xfs_buf_ops xfs_agfl_buf_ops = {
763 	.name = "xfs_agfl",
764 	.magic = { cpu_to_be32(XFS_AGFL_MAGIC), cpu_to_be32(XFS_AGFL_MAGIC) },
765 	.verify_read = xfs_agfl_read_verify,
766 	.verify_write = xfs_agfl_write_verify,
767 	.verify_struct = xfs_agfl_verify,
768 };
769 
770 /*
771  * Read in the allocation group free block array.
772  */
773 int
774 xfs_alloc_read_agfl(
775 	struct xfs_perag	*pag,
776 	struct xfs_trans	*tp,
777 	struct xfs_buf		**bpp)
778 {
779 	struct xfs_mount	*mp = pag->pag_mount;
780 	struct xfs_buf		*bp;
781 	int			error;
782 
783 	error = xfs_trans_read_buf(
784 			mp, tp, mp->m_ddev_targp,
785 			XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGFL_DADDR(mp)),
786 			XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
787 	if (xfs_metadata_is_sick(error))
788 		xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL);
789 	if (error)
790 		return error;
791 	xfs_buf_set_ref(bp, XFS_AGFL_REF);
792 	*bpp = bp;
793 	return 0;
794 }
795 
796 STATIC int
797 xfs_alloc_update_counters(
798 	struct xfs_trans	*tp,
799 	struct xfs_buf		*agbp,
800 	long			len)
801 {
802 	struct xfs_agf		*agf = agbp->b_addr;
803 
804 	agbp->b_pag->pagf_freeblks += len;
805 	be32_add_cpu(&agf->agf_freeblks, len);
806 
807 	if (unlikely(be32_to_cpu(agf->agf_freeblks) >
808 		     be32_to_cpu(agf->agf_length))) {
809 		xfs_buf_mark_corrupt(agbp);
810 		xfs_ag_mark_sick(agbp->b_pag, XFS_SICK_AG_AGF);
811 		return -EFSCORRUPTED;
812 	}
813 
814 	xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
815 	return 0;
816 }
817 
818 /*
819  * Block allocation algorithm and data structures.
820  */
821 struct xfs_alloc_cur {
822 	struct xfs_btree_cur		*cnt;	/* btree cursors */
823 	struct xfs_btree_cur		*bnolt;
824 	struct xfs_btree_cur		*bnogt;
825 	xfs_extlen_t			cur_len;/* current search length */
826 	xfs_agblock_t			rec_bno;/* extent startblock */
827 	xfs_extlen_t			rec_len;/* extent length */
828 	xfs_agblock_t			bno;	/* alloc bno */
829 	xfs_extlen_t			len;	/* alloc len */
830 	xfs_extlen_t			diff;	/* diff from search bno */
831 	unsigned int			busy_gen;/* busy state */
832 	bool				busy;
833 };
834 
835 /*
836  * Set up cursors, etc. in the extent allocation cursor. This function can be
837  * called multiple times to reset an initialized structure without having to
838  * reallocate cursors.
839  */
840 static int
841 xfs_alloc_cur_setup(
842 	struct xfs_alloc_arg	*args,
843 	struct xfs_alloc_cur	*acur)
844 {
845 	int			error;
846 	int			i;
847 
848 	acur->cur_len = args->maxlen;
849 	acur->rec_bno = 0;
850 	acur->rec_len = 0;
851 	acur->bno = 0;
852 	acur->len = 0;
853 	acur->diff = -1;
854 	acur->busy = false;
855 	acur->busy_gen = 0;
856 
857 	/*
858 	 * Perform an initial cntbt lookup to check for availability of maxlen
859 	 * extents. If this fails, we'll return -ENOSPC to signal the caller to
860 	 * attempt a small allocation.
861 	 */
862 	if (!acur->cnt)
863 		acur->cnt = xfs_allocbt_init_cursor(args->mp, args->tp,
864 					args->agbp, args->pag, XFS_BTNUM_CNT);
865 	error = xfs_alloc_lookup_ge(acur->cnt, 0, args->maxlen, &i);
866 	if (error)
867 		return error;
868 
869 	/*
870 	 * Allocate the bnobt left and right search cursors.
871 	 */
872 	if (!acur->bnolt)
873 		acur->bnolt = xfs_allocbt_init_cursor(args->mp, args->tp,
874 					args->agbp, args->pag, XFS_BTNUM_BNO);
875 	if (!acur->bnogt)
876 		acur->bnogt = xfs_allocbt_init_cursor(args->mp, args->tp,
877 					args->agbp, args->pag, XFS_BTNUM_BNO);
878 	return i == 1 ? 0 : -ENOSPC;
879 }
880 
881 static void
882 xfs_alloc_cur_close(
883 	struct xfs_alloc_cur	*acur,
884 	bool			error)
885 {
886 	int			cur_error = XFS_BTREE_NOERROR;
887 
888 	if (error)
889 		cur_error = XFS_BTREE_ERROR;
890 
891 	if (acur->cnt)
892 		xfs_btree_del_cursor(acur->cnt, cur_error);
893 	if (acur->bnolt)
894 		xfs_btree_del_cursor(acur->bnolt, cur_error);
895 	if (acur->bnogt)
896 		xfs_btree_del_cursor(acur->bnogt, cur_error);
897 	acur->cnt = acur->bnolt = acur->bnogt = NULL;
898 }
899 
900 /*
901  * Check an extent for allocation and track the best available candidate in the
902  * allocation structure. The cursor is deactivated if it has entered an out of
903  * range state based on allocation arguments. Optionally return the extent
904  * extent geometry and allocation status if requested by the caller.
905  */
906 static int
907 xfs_alloc_cur_check(
908 	struct xfs_alloc_arg	*args,
909 	struct xfs_alloc_cur	*acur,
910 	struct xfs_btree_cur	*cur,
911 	int			*new)
912 {
913 	int			error, i;
914 	xfs_agblock_t		bno, bnoa, bnew;
915 	xfs_extlen_t		len, lena, diff = -1;
916 	bool			busy;
917 	unsigned		busy_gen = 0;
918 	bool			deactivate = false;
919 	bool			isbnobt = cur->bc_btnum == XFS_BTNUM_BNO;
920 
921 	*new = 0;
922 
923 	error = xfs_alloc_get_rec(cur, &bno, &len, &i);
924 	if (error)
925 		return error;
926 	if (XFS_IS_CORRUPT(args->mp, i != 1)) {
927 		xfs_btree_mark_sick(cur);
928 		return -EFSCORRUPTED;
929 	}
930 
931 	/*
932 	 * Check minlen and deactivate a cntbt cursor if out of acceptable size
933 	 * range (i.e., walking backwards looking for a minlen extent).
934 	 */
935 	if (len < args->minlen) {
936 		deactivate = !isbnobt;
937 		goto out;
938 	}
939 
940 	busy = xfs_alloc_compute_aligned(args, bno, len, &bnoa, &lena,
941 					 &busy_gen);
942 	acur->busy |= busy;
943 	if (busy)
944 		acur->busy_gen = busy_gen;
945 	/* deactivate a bnobt cursor outside of locality range */
946 	if (bnoa < args->min_agbno || bnoa > args->max_agbno) {
947 		deactivate = isbnobt;
948 		goto out;
949 	}
950 	if (lena < args->minlen)
951 		goto out;
952 
953 	args->len = XFS_EXTLEN_MIN(lena, args->maxlen);
954 	xfs_alloc_fix_len(args);
955 	ASSERT(args->len >= args->minlen);
956 	if (args->len < acur->len)
957 		goto out;
958 
959 	/*
960 	 * We have an aligned record that satisfies minlen and beats or matches
961 	 * the candidate extent size. Compare locality for near allocation mode.
962 	 */
963 	diff = xfs_alloc_compute_diff(args->agbno, args->len,
964 				      args->alignment, args->datatype,
965 				      bnoa, lena, &bnew);
966 	if (bnew == NULLAGBLOCK)
967 		goto out;
968 
969 	/*
970 	 * Deactivate a bnobt cursor with worse locality than the current best.
971 	 */
972 	if (diff > acur->diff) {
973 		deactivate = isbnobt;
974 		goto out;
975 	}
976 
977 	ASSERT(args->len > acur->len ||
978 	       (args->len == acur->len && diff <= acur->diff));
979 	acur->rec_bno = bno;
980 	acur->rec_len = len;
981 	acur->bno = bnew;
982 	acur->len = args->len;
983 	acur->diff = diff;
984 	*new = 1;
985 
986 	/*
987 	 * We're done if we found a perfect allocation. This only deactivates
988 	 * the current cursor, but this is just an optimization to terminate a
989 	 * cntbt search that otherwise runs to the edge of the tree.
990 	 */
991 	if (acur->diff == 0 && acur->len == args->maxlen)
992 		deactivate = true;
993 out:
994 	if (deactivate)
995 		cur->bc_ag.abt.active = false;
996 	trace_xfs_alloc_cur_check(args->mp, cur->bc_btnum, bno, len, diff,
997 				  *new);
998 	return 0;
999 }
1000 
1001 /*
1002  * Complete an allocation of a candidate extent. Remove the extent from both
1003  * trees and update the args structure.
1004  */
1005 STATIC int
1006 xfs_alloc_cur_finish(
1007 	struct xfs_alloc_arg	*args,
1008 	struct xfs_alloc_cur	*acur)
1009 {
1010 	struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
1011 	int			error;
1012 
1013 	ASSERT(acur->cnt && acur->bnolt);
1014 	ASSERT(acur->bno >= acur->rec_bno);
1015 	ASSERT(acur->bno + acur->len <= acur->rec_bno + acur->rec_len);
1016 	ASSERT(acur->rec_bno + acur->rec_len <= be32_to_cpu(agf->agf_length));
1017 
1018 	error = xfs_alloc_fixup_trees(acur->cnt, acur->bnolt, acur->rec_bno,
1019 				      acur->rec_len, acur->bno, acur->len, 0);
1020 	if (error)
1021 		return error;
1022 
1023 	args->agbno = acur->bno;
1024 	args->len = acur->len;
1025 	args->wasfromfl = 0;
1026 
1027 	trace_xfs_alloc_cur(args);
1028 	return 0;
1029 }
1030 
1031 /*
1032  * Locality allocation lookup algorithm. This expects a cntbt cursor and uses
1033  * bno optimized lookup to search for extents with ideal size and locality.
1034  */
1035 STATIC int
1036 xfs_alloc_cntbt_iter(
1037 	struct xfs_alloc_arg		*args,
1038 	struct xfs_alloc_cur		*acur)
1039 {
1040 	struct xfs_btree_cur	*cur = acur->cnt;
1041 	xfs_agblock_t		bno;
1042 	xfs_extlen_t		len, cur_len;
1043 	int			error;
1044 	int			i;
1045 
1046 	if (!xfs_alloc_cur_active(cur))
1047 		return 0;
1048 
1049 	/* locality optimized lookup */
1050 	cur_len = acur->cur_len;
1051 	error = xfs_alloc_lookup_ge(cur, args->agbno, cur_len, &i);
1052 	if (error)
1053 		return error;
1054 	if (i == 0)
1055 		return 0;
1056 	error = xfs_alloc_get_rec(cur, &bno, &len, &i);
1057 	if (error)
1058 		return error;
1059 
1060 	/* check the current record and update search length from it */
1061 	error = xfs_alloc_cur_check(args, acur, cur, &i);
1062 	if (error)
1063 		return error;
1064 	ASSERT(len >= acur->cur_len);
1065 	acur->cur_len = len;
1066 
1067 	/*
1068 	 * We looked up the first record >= [agbno, len] above. The agbno is a
1069 	 * secondary key and so the current record may lie just before or after
1070 	 * agbno. If it is past agbno, check the previous record too so long as
1071 	 * the length matches as it may be closer. Don't check a smaller record
1072 	 * because that could deactivate our cursor.
1073 	 */
1074 	if (bno > args->agbno) {
1075 		error = xfs_btree_decrement(cur, 0, &i);
1076 		if (!error && i) {
1077 			error = xfs_alloc_get_rec(cur, &bno, &len, &i);
1078 			if (!error && i && len == acur->cur_len)
1079 				error = xfs_alloc_cur_check(args, acur, cur,
1080 							    &i);
1081 		}
1082 		if (error)
1083 			return error;
1084 	}
1085 
1086 	/*
1087 	 * Increment the search key until we find at least one allocation
1088 	 * candidate or if the extent we found was larger. Otherwise, double the
1089 	 * search key to optimize the search. Efficiency is more important here
1090 	 * than absolute best locality.
1091 	 */
1092 	cur_len <<= 1;
1093 	if (!acur->len || acur->cur_len >= cur_len)
1094 		acur->cur_len++;
1095 	else
1096 		acur->cur_len = cur_len;
1097 
1098 	return error;
1099 }
1100 
1101 /*
1102  * Deal with the case where only small freespaces remain. Either return the
1103  * contents of the last freespace record, or allocate space from the freelist if
1104  * there is nothing in the tree.
1105  */
1106 STATIC int			/* error */
1107 xfs_alloc_ag_vextent_small(
1108 	struct xfs_alloc_arg	*args,	/* allocation argument structure */
1109 	struct xfs_btree_cur	*ccur,	/* optional by-size cursor */
1110 	xfs_agblock_t		*fbnop,	/* result block number */
1111 	xfs_extlen_t		*flenp,	/* result length */
1112 	int			*stat)	/* status: 0-freelist, 1-normal/none */
1113 {
1114 	struct xfs_agf		*agf = args->agbp->b_addr;
1115 	int			error = 0;
1116 	xfs_agblock_t		fbno = NULLAGBLOCK;
1117 	xfs_extlen_t		flen = 0;
1118 	int			i = 0;
1119 
1120 	/*
1121 	 * If a cntbt cursor is provided, try to allocate the largest record in
1122 	 * the tree. Try the AGFL if the cntbt is empty, otherwise fail the
1123 	 * allocation. Make sure to respect minleft even when pulling from the
1124 	 * freelist.
1125 	 */
1126 	if (ccur)
1127 		error = xfs_btree_decrement(ccur, 0, &i);
1128 	if (error)
1129 		goto error;
1130 	if (i) {
1131 		error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i);
1132 		if (error)
1133 			goto error;
1134 		if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1135 			xfs_btree_mark_sick(ccur);
1136 			error = -EFSCORRUPTED;
1137 			goto error;
1138 		}
1139 		goto out;
1140 	}
1141 
1142 	if (args->minlen != 1 || args->alignment != 1 ||
1143 	    args->resv == XFS_AG_RESV_AGFL ||
1144 	    be32_to_cpu(agf->agf_flcount) <= args->minleft)
1145 		goto out;
1146 
1147 	error = xfs_alloc_get_freelist(args->pag, args->tp, args->agbp,
1148 			&fbno, 0);
1149 	if (error)
1150 		goto error;
1151 	if (fbno == NULLAGBLOCK)
1152 		goto out;
1153 
1154 	xfs_extent_busy_reuse(args->mp, args->pag, fbno, 1,
1155 			      (args->datatype & XFS_ALLOC_NOBUSY));
1156 
1157 	if (args->datatype & XFS_ALLOC_USERDATA) {
1158 		struct xfs_buf	*bp;
1159 
1160 		error = xfs_trans_get_buf(args->tp, args->mp->m_ddev_targp,
1161 				XFS_AGB_TO_DADDR(args->mp, args->agno, fbno),
1162 				args->mp->m_bsize, 0, &bp);
1163 		if (error)
1164 			goto error;
1165 		xfs_trans_binval(args->tp, bp);
1166 	}
1167 	*fbnop = args->agbno = fbno;
1168 	*flenp = args->len = 1;
1169 	if (XFS_IS_CORRUPT(args->mp, fbno >= be32_to_cpu(agf->agf_length))) {
1170 		xfs_btree_mark_sick(ccur);
1171 		error = -EFSCORRUPTED;
1172 		goto error;
1173 	}
1174 	args->wasfromfl = 1;
1175 	trace_xfs_alloc_small_freelist(args);
1176 
1177 	/*
1178 	 * If we're feeding an AGFL block to something that doesn't live in the
1179 	 * free space, we need to clear out the OWN_AG rmap.
1180 	 */
1181 	error = xfs_rmap_free(args->tp, args->agbp, args->pag, fbno, 1,
1182 			      &XFS_RMAP_OINFO_AG);
1183 	if (error)
1184 		goto error;
1185 
1186 	*stat = 0;
1187 	return 0;
1188 
1189 out:
1190 	/*
1191 	 * Can't do the allocation, give up.
1192 	 */
1193 	if (flen < args->minlen) {
1194 		args->agbno = NULLAGBLOCK;
1195 		trace_xfs_alloc_small_notenough(args);
1196 		flen = 0;
1197 	}
1198 	*fbnop = fbno;
1199 	*flenp = flen;
1200 	*stat = 1;
1201 	trace_xfs_alloc_small_done(args);
1202 	return 0;
1203 
1204 error:
1205 	trace_xfs_alloc_small_error(args);
1206 	return error;
1207 }
1208 
1209 /*
1210  * Allocate a variable extent at exactly agno/bno.
1211  * Extent's length (returned in *len) will be between minlen and maxlen,
1212  * and of the form k * prod + mod unless there's nothing that large.
1213  * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
1214  */
1215 STATIC int			/* error */
1216 xfs_alloc_ag_vextent_exact(
1217 	xfs_alloc_arg_t	*args)	/* allocation argument structure */
1218 {
1219 	struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
1220 	struct xfs_btree_cur *bno_cur;/* by block-number btree cursor */
1221 	struct xfs_btree_cur *cnt_cur;/* by count btree cursor */
1222 	int		error;
1223 	xfs_agblock_t	fbno;	/* start block of found extent */
1224 	xfs_extlen_t	flen;	/* length of found extent */
1225 	xfs_agblock_t	tbno;	/* start block of busy extent */
1226 	xfs_extlen_t	tlen;	/* length of busy extent */
1227 	xfs_agblock_t	tend;	/* end block of busy extent */
1228 	int		i;	/* success/failure of operation */
1229 	unsigned	busy_gen;
1230 
1231 	ASSERT(args->alignment == 1);
1232 
1233 	/*
1234 	 * Allocate/initialize a cursor for the by-number freespace btree.
1235 	 */
1236 	bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1237 					  args->pag, XFS_BTNUM_BNO);
1238 
1239 	/*
1240 	 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
1241 	 * Look for the closest free block <= bno, it must contain bno
1242 	 * if any free block does.
1243 	 */
1244 	error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
1245 	if (error)
1246 		goto error0;
1247 	if (!i)
1248 		goto not_found;
1249 
1250 	/*
1251 	 * Grab the freespace record.
1252 	 */
1253 	error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
1254 	if (error)
1255 		goto error0;
1256 	if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1257 		xfs_btree_mark_sick(bno_cur);
1258 		error = -EFSCORRUPTED;
1259 		goto error0;
1260 	}
1261 	ASSERT(fbno <= args->agbno);
1262 
1263 	/*
1264 	 * Check for overlapping busy extents.
1265 	 */
1266 	tbno = fbno;
1267 	tlen = flen;
1268 	xfs_extent_busy_trim(args, &tbno, &tlen, &busy_gen);
1269 
1270 	/*
1271 	 * Give up if the start of the extent is busy, or the freespace isn't
1272 	 * long enough for the minimum request.
1273 	 */
1274 	if (tbno > args->agbno)
1275 		goto not_found;
1276 	if (tlen < args->minlen)
1277 		goto not_found;
1278 	tend = tbno + tlen;
1279 	if (tend < args->agbno + args->minlen)
1280 		goto not_found;
1281 
1282 	/*
1283 	 * End of extent will be smaller of the freespace end and the
1284 	 * maximal requested end.
1285 	 *
1286 	 * Fix the length according to mod and prod if given.
1287 	 */
1288 	args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
1289 						- args->agbno;
1290 	xfs_alloc_fix_len(args);
1291 	ASSERT(args->agbno + args->len <= tend);
1292 
1293 	/*
1294 	 * We are allocating agbno for args->len
1295 	 * Allocate/initialize a cursor for the by-size btree.
1296 	 */
1297 	cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1298 					args->pag, XFS_BTNUM_CNT);
1299 	ASSERT(args->agbno + args->len <= be32_to_cpu(agf->agf_length));
1300 	error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
1301 				      args->len, XFSA_FIXUP_BNO_OK);
1302 	if (error) {
1303 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1304 		goto error0;
1305 	}
1306 
1307 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1308 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1309 
1310 	args->wasfromfl = 0;
1311 	trace_xfs_alloc_exact_done(args);
1312 	return 0;
1313 
1314 not_found:
1315 	/* Didn't find it, return null. */
1316 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1317 	args->agbno = NULLAGBLOCK;
1318 	trace_xfs_alloc_exact_notfound(args);
1319 	return 0;
1320 
1321 error0:
1322 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1323 	trace_xfs_alloc_exact_error(args);
1324 	return error;
1325 }
1326 
1327 /*
1328  * Search a given number of btree records in a given direction. Check each
1329  * record against the good extent we've already found.
1330  */
1331 STATIC int
1332 xfs_alloc_walk_iter(
1333 	struct xfs_alloc_arg	*args,
1334 	struct xfs_alloc_cur	*acur,
1335 	struct xfs_btree_cur	*cur,
1336 	bool			increment,
1337 	bool			find_one, /* quit on first candidate */
1338 	int			count,    /* rec count (-1 for infinite) */
1339 	int			*stat)
1340 {
1341 	int			error;
1342 	int			i;
1343 
1344 	*stat = 0;
1345 
1346 	/*
1347 	 * Search so long as the cursor is active or we find a better extent.
1348 	 * The cursor is deactivated if it extends beyond the range of the
1349 	 * current allocation candidate.
1350 	 */
1351 	while (xfs_alloc_cur_active(cur) && count) {
1352 		error = xfs_alloc_cur_check(args, acur, cur, &i);
1353 		if (error)
1354 			return error;
1355 		if (i == 1) {
1356 			*stat = 1;
1357 			if (find_one)
1358 				break;
1359 		}
1360 		if (!xfs_alloc_cur_active(cur))
1361 			break;
1362 
1363 		if (increment)
1364 			error = xfs_btree_increment(cur, 0, &i);
1365 		else
1366 			error = xfs_btree_decrement(cur, 0, &i);
1367 		if (error)
1368 			return error;
1369 		if (i == 0)
1370 			cur->bc_ag.abt.active = false;
1371 
1372 		if (count > 0)
1373 			count--;
1374 	}
1375 
1376 	return 0;
1377 }
1378 
1379 /*
1380  * Search the by-bno and by-size btrees in parallel in search of an extent with
1381  * ideal locality based on the NEAR mode ->agbno locality hint.
1382  */
1383 STATIC int
1384 xfs_alloc_ag_vextent_locality(
1385 	struct xfs_alloc_arg	*args,
1386 	struct xfs_alloc_cur	*acur,
1387 	int			*stat)
1388 {
1389 	struct xfs_btree_cur	*fbcur = NULL;
1390 	int			error;
1391 	int			i;
1392 	bool			fbinc;
1393 
1394 	ASSERT(acur->len == 0);
1395 
1396 	*stat = 0;
1397 
1398 	error = xfs_alloc_lookup_ge(acur->cnt, args->agbno, acur->cur_len, &i);
1399 	if (error)
1400 		return error;
1401 	error = xfs_alloc_lookup_le(acur->bnolt, args->agbno, 0, &i);
1402 	if (error)
1403 		return error;
1404 	error = xfs_alloc_lookup_ge(acur->bnogt, args->agbno, 0, &i);
1405 	if (error)
1406 		return error;
1407 
1408 	/*
1409 	 * Search the bnobt and cntbt in parallel. Search the bnobt left and
1410 	 * right and lookup the closest extent to the locality hint for each
1411 	 * extent size key in the cntbt. The entire search terminates
1412 	 * immediately on a bnobt hit because that means we've found best case
1413 	 * locality. Otherwise the search continues until the cntbt cursor runs
1414 	 * off the end of the tree. If no allocation candidate is found at this
1415 	 * point, give up on locality, walk backwards from the end of the cntbt
1416 	 * and take the first available extent.
1417 	 *
1418 	 * The parallel tree searches balance each other out to provide fairly
1419 	 * consistent performance for various situations. The bnobt search can
1420 	 * have pathological behavior in the worst case scenario of larger
1421 	 * allocation requests and fragmented free space. On the other hand, the
1422 	 * bnobt is able to satisfy most smaller allocation requests much more
1423 	 * quickly than the cntbt. The cntbt search can sift through fragmented
1424 	 * free space and sets of free extents for larger allocation requests
1425 	 * more quickly than the bnobt. Since the locality hint is just a hint
1426 	 * and we don't want to scan the entire bnobt for perfect locality, the
1427 	 * cntbt search essentially bounds the bnobt search such that we can
1428 	 * find good enough locality at reasonable performance in most cases.
1429 	 */
1430 	while (xfs_alloc_cur_active(acur->bnolt) ||
1431 	       xfs_alloc_cur_active(acur->bnogt) ||
1432 	       xfs_alloc_cur_active(acur->cnt)) {
1433 
1434 		trace_xfs_alloc_cur_lookup(args);
1435 
1436 		/*
1437 		 * Search the bnobt left and right. In the case of a hit, finish
1438 		 * the search in the opposite direction and we're done.
1439 		 */
1440 		error = xfs_alloc_walk_iter(args, acur, acur->bnolt, false,
1441 					    true, 1, &i);
1442 		if (error)
1443 			return error;
1444 		if (i == 1) {
1445 			trace_xfs_alloc_cur_left(args);
1446 			fbcur = acur->bnogt;
1447 			fbinc = true;
1448 			break;
1449 		}
1450 		error = xfs_alloc_walk_iter(args, acur, acur->bnogt, true, true,
1451 					    1, &i);
1452 		if (error)
1453 			return error;
1454 		if (i == 1) {
1455 			trace_xfs_alloc_cur_right(args);
1456 			fbcur = acur->bnolt;
1457 			fbinc = false;
1458 			break;
1459 		}
1460 
1461 		/*
1462 		 * Check the extent with best locality based on the current
1463 		 * extent size search key and keep track of the best candidate.
1464 		 */
1465 		error = xfs_alloc_cntbt_iter(args, acur);
1466 		if (error)
1467 			return error;
1468 		if (!xfs_alloc_cur_active(acur->cnt)) {
1469 			trace_xfs_alloc_cur_lookup_done(args);
1470 			break;
1471 		}
1472 	}
1473 
1474 	/*
1475 	 * If we failed to find anything due to busy extents, return empty
1476 	 * handed so the caller can flush and retry. If no busy extents were
1477 	 * found, walk backwards from the end of the cntbt as a last resort.
1478 	 */
1479 	if (!xfs_alloc_cur_active(acur->cnt) && !acur->len && !acur->busy) {
1480 		error = xfs_btree_decrement(acur->cnt, 0, &i);
1481 		if (error)
1482 			return error;
1483 		if (i) {
1484 			acur->cnt->bc_ag.abt.active = true;
1485 			fbcur = acur->cnt;
1486 			fbinc = false;
1487 		}
1488 	}
1489 
1490 	/*
1491 	 * Search in the opposite direction for a better entry in the case of
1492 	 * a bnobt hit or walk backwards from the end of the cntbt.
1493 	 */
1494 	if (fbcur) {
1495 		error = xfs_alloc_walk_iter(args, acur, fbcur, fbinc, true, -1,
1496 					    &i);
1497 		if (error)
1498 			return error;
1499 	}
1500 
1501 	if (acur->len)
1502 		*stat = 1;
1503 
1504 	return 0;
1505 }
1506 
1507 /* Check the last block of the cnt btree for allocations. */
1508 static int
1509 xfs_alloc_ag_vextent_lastblock(
1510 	struct xfs_alloc_arg	*args,
1511 	struct xfs_alloc_cur	*acur,
1512 	xfs_agblock_t		*bno,
1513 	xfs_extlen_t		*len,
1514 	bool			*allocated)
1515 {
1516 	int			error;
1517 	int			i;
1518 
1519 #ifdef DEBUG
1520 	/* Randomly don't execute the first algorithm. */
1521 	if (get_random_u32_below(2))
1522 		return 0;
1523 #endif
1524 
1525 	/*
1526 	 * Start from the entry that lookup found, sequence through all larger
1527 	 * free blocks.  If we're actually pointing at a record smaller than
1528 	 * maxlen, go to the start of this block, and skip all those smaller
1529 	 * than minlen.
1530 	 */
1531 	if (*len || args->alignment > 1) {
1532 		acur->cnt->bc_levels[0].ptr = 1;
1533 		do {
1534 			error = xfs_alloc_get_rec(acur->cnt, bno, len, &i);
1535 			if (error)
1536 				return error;
1537 			if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1538 				xfs_btree_mark_sick(acur->cnt);
1539 				return -EFSCORRUPTED;
1540 			}
1541 			if (*len >= args->minlen)
1542 				break;
1543 			error = xfs_btree_increment(acur->cnt, 0, &i);
1544 			if (error)
1545 				return error;
1546 		} while (i);
1547 		ASSERT(*len >= args->minlen);
1548 		if (!i)
1549 			return 0;
1550 	}
1551 
1552 	error = xfs_alloc_walk_iter(args, acur, acur->cnt, true, false, -1, &i);
1553 	if (error)
1554 		return error;
1555 
1556 	/*
1557 	 * It didn't work.  We COULD be in a case where there's a good record
1558 	 * somewhere, so try again.
1559 	 */
1560 	if (acur->len == 0)
1561 		return 0;
1562 
1563 	trace_xfs_alloc_near_first(args);
1564 	*allocated = true;
1565 	return 0;
1566 }
1567 
1568 /*
1569  * Allocate a variable extent near bno in the allocation group agno.
1570  * Extent's length (returned in len) will be between minlen and maxlen,
1571  * and of the form k * prod + mod unless there's nothing that large.
1572  * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1573  */
1574 STATIC int
1575 xfs_alloc_ag_vextent_near(
1576 	struct xfs_alloc_arg	*args,
1577 	uint32_t		alloc_flags)
1578 {
1579 	struct xfs_alloc_cur	acur = {};
1580 	int			error;		/* error code */
1581 	int			i;		/* result code, temporary */
1582 	xfs_agblock_t		bno;
1583 	xfs_extlen_t		len;
1584 
1585 	/* handle uninitialized agbno range so caller doesn't have to */
1586 	if (!args->min_agbno && !args->max_agbno)
1587 		args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
1588 	ASSERT(args->min_agbno <= args->max_agbno);
1589 
1590 	/* clamp agbno to the range if it's outside */
1591 	if (args->agbno < args->min_agbno)
1592 		args->agbno = args->min_agbno;
1593 	if (args->agbno > args->max_agbno)
1594 		args->agbno = args->max_agbno;
1595 
1596 	/* Retry once quickly if we find busy extents before blocking. */
1597 	alloc_flags |= XFS_ALLOC_FLAG_TRYFLUSH;
1598 restart:
1599 	len = 0;
1600 
1601 	/*
1602 	 * Set up cursors and see if there are any free extents as big as
1603 	 * maxlen. If not, pick the last entry in the tree unless the tree is
1604 	 * empty.
1605 	 */
1606 	error = xfs_alloc_cur_setup(args, &acur);
1607 	if (error == -ENOSPC) {
1608 		error = xfs_alloc_ag_vextent_small(args, acur.cnt, &bno,
1609 				&len, &i);
1610 		if (error)
1611 			goto out;
1612 		if (i == 0 || len == 0) {
1613 			trace_xfs_alloc_near_noentry(args);
1614 			goto out;
1615 		}
1616 		ASSERT(i == 1);
1617 	} else if (error) {
1618 		goto out;
1619 	}
1620 
1621 	/*
1622 	 * First algorithm.
1623 	 * If the requested extent is large wrt the freespaces available
1624 	 * in this a.g., then the cursor will be pointing to a btree entry
1625 	 * near the right edge of the tree.  If it's in the last btree leaf
1626 	 * block, then we just examine all the entries in that block
1627 	 * that are big enough, and pick the best one.
1628 	 */
1629 	if (xfs_btree_islastblock(acur.cnt, 0)) {
1630 		bool		allocated = false;
1631 
1632 		error = xfs_alloc_ag_vextent_lastblock(args, &acur, &bno, &len,
1633 				&allocated);
1634 		if (error)
1635 			goto out;
1636 		if (allocated)
1637 			goto alloc_finish;
1638 	}
1639 
1640 	/*
1641 	 * Second algorithm. Combined cntbt and bnobt search to find ideal
1642 	 * locality.
1643 	 */
1644 	error = xfs_alloc_ag_vextent_locality(args, &acur, &i);
1645 	if (error)
1646 		goto out;
1647 
1648 	/*
1649 	 * If we couldn't get anything, give up.
1650 	 */
1651 	if (!acur.len) {
1652 		if (acur.busy) {
1653 			/*
1654 			 * Our only valid extents must have been busy. Flush and
1655 			 * retry the allocation again. If we get an -EAGAIN
1656 			 * error, we're being told that a deadlock was avoided
1657 			 * and the current transaction needs committing before
1658 			 * the allocation can be retried.
1659 			 */
1660 			trace_xfs_alloc_near_busy(args);
1661 			error = xfs_extent_busy_flush(args->tp, args->pag,
1662 					acur.busy_gen, alloc_flags);
1663 			if (error)
1664 				goto out;
1665 
1666 			alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH;
1667 			goto restart;
1668 		}
1669 		trace_xfs_alloc_size_neither(args);
1670 		args->agbno = NULLAGBLOCK;
1671 		goto out;
1672 	}
1673 
1674 alloc_finish:
1675 	/* fix up btrees on a successful allocation */
1676 	error = xfs_alloc_cur_finish(args, &acur);
1677 
1678 out:
1679 	xfs_alloc_cur_close(&acur, error);
1680 	return error;
1681 }
1682 
1683 /*
1684  * Allocate a variable extent anywhere in the allocation group agno.
1685  * Extent's length (returned in len) will be between minlen and maxlen,
1686  * and of the form k * prod + mod unless there's nothing that large.
1687  * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1688  */
1689 static int
1690 xfs_alloc_ag_vextent_size(
1691 	struct xfs_alloc_arg	*args,
1692 	uint32_t		alloc_flags)
1693 {
1694 	struct xfs_agf		*agf = args->agbp->b_addr;
1695 	struct xfs_btree_cur	*bno_cur;
1696 	struct xfs_btree_cur	*cnt_cur;
1697 	xfs_agblock_t		fbno;		/* start of found freespace */
1698 	xfs_extlen_t		flen;		/* length of found freespace */
1699 	xfs_agblock_t		rbno;		/* returned block number */
1700 	xfs_extlen_t		rlen;		/* length of returned extent */
1701 	bool			busy;
1702 	unsigned		busy_gen;
1703 	int			error;
1704 	int			i;
1705 
1706 	/* Retry once quickly if we find busy extents before blocking. */
1707 	alloc_flags |= XFS_ALLOC_FLAG_TRYFLUSH;
1708 restart:
1709 	/*
1710 	 * Allocate and initialize a cursor for the by-size btree.
1711 	 */
1712 	cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1713 					args->pag, XFS_BTNUM_CNT);
1714 	bno_cur = NULL;
1715 
1716 	/*
1717 	 * Look for an entry >= maxlen+alignment-1 blocks.
1718 	 */
1719 	if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1720 			args->maxlen + args->alignment - 1, &i)))
1721 		goto error0;
1722 
1723 	/*
1724 	 * If none then we have to settle for a smaller extent. In the case that
1725 	 * there are no large extents, this will return the last entry in the
1726 	 * tree unless the tree is empty. In the case that there are only busy
1727 	 * large extents, this will return the largest small extent unless there
1728 	 * are no smaller extents available.
1729 	 */
1730 	if (!i) {
1731 		error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1732 						   &fbno, &flen, &i);
1733 		if (error)
1734 			goto error0;
1735 		if (i == 0 || flen == 0) {
1736 			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1737 			trace_xfs_alloc_size_noentry(args);
1738 			return 0;
1739 		}
1740 		ASSERT(i == 1);
1741 		busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno,
1742 				&rlen, &busy_gen);
1743 	} else {
1744 		/*
1745 		 * Search for a non-busy extent that is large enough.
1746 		 */
1747 		for (;;) {
1748 			error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1749 			if (error)
1750 				goto error0;
1751 			if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1752 				xfs_btree_mark_sick(cnt_cur);
1753 				error = -EFSCORRUPTED;
1754 				goto error0;
1755 			}
1756 
1757 			busy = xfs_alloc_compute_aligned(args, fbno, flen,
1758 					&rbno, &rlen, &busy_gen);
1759 
1760 			if (rlen >= args->maxlen)
1761 				break;
1762 
1763 			error = xfs_btree_increment(cnt_cur, 0, &i);
1764 			if (error)
1765 				goto error0;
1766 			if (i)
1767 				continue;
1768 
1769 			/*
1770 			 * Our only valid extents must have been busy. Flush and
1771 			 * retry the allocation again. If we get an -EAGAIN
1772 			 * error, we're being told that a deadlock was avoided
1773 			 * and the current transaction needs committing before
1774 			 * the allocation can be retried.
1775 			 */
1776 			trace_xfs_alloc_size_busy(args);
1777 			error = xfs_extent_busy_flush(args->tp, args->pag,
1778 					busy_gen, alloc_flags);
1779 			if (error)
1780 				goto error0;
1781 
1782 			alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH;
1783 			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1784 			goto restart;
1785 		}
1786 	}
1787 
1788 	/*
1789 	 * In the first case above, we got the last entry in the
1790 	 * by-size btree.  Now we check to see if the space hits maxlen
1791 	 * once aligned; if not, we search left for something better.
1792 	 * This can't happen in the second case above.
1793 	 */
1794 	rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1795 	if (XFS_IS_CORRUPT(args->mp,
1796 			   rlen != 0 &&
1797 			   (rlen > flen ||
1798 			    rbno + rlen > fbno + flen))) {
1799 		xfs_btree_mark_sick(cnt_cur);
1800 		error = -EFSCORRUPTED;
1801 		goto error0;
1802 	}
1803 	if (rlen < args->maxlen) {
1804 		xfs_agblock_t	bestfbno;
1805 		xfs_extlen_t	bestflen;
1806 		xfs_agblock_t	bestrbno;
1807 		xfs_extlen_t	bestrlen;
1808 
1809 		bestrlen = rlen;
1810 		bestrbno = rbno;
1811 		bestflen = flen;
1812 		bestfbno = fbno;
1813 		for (;;) {
1814 			if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1815 				goto error0;
1816 			if (i == 0)
1817 				break;
1818 			if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1819 					&i)))
1820 				goto error0;
1821 			if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1822 				xfs_btree_mark_sick(cnt_cur);
1823 				error = -EFSCORRUPTED;
1824 				goto error0;
1825 			}
1826 			if (flen < bestrlen)
1827 				break;
1828 			busy = xfs_alloc_compute_aligned(args, fbno, flen,
1829 					&rbno, &rlen, &busy_gen);
1830 			rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1831 			if (XFS_IS_CORRUPT(args->mp,
1832 					   rlen != 0 &&
1833 					   (rlen > flen ||
1834 					    rbno + rlen > fbno + flen))) {
1835 				xfs_btree_mark_sick(cnt_cur);
1836 				error = -EFSCORRUPTED;
1837 				goto error0;
1838 			}
1839 			if (rlen > bestrlen) {
1840 				bestrlen = rlen;
1841 				bestrbno = rbno;
1842 				bestflen = flen;
1843 				bestfbno = fbno;
1844 				if (rlen == args->maxlen)
1845 					break;
1846 			}
1847 		}
1848 		if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1849 				&i)))
1850 			goto error0;
1851 		if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1852 			xfs_btree_mark_sick(cnt_cur);
1853 			error = -EFSCORRUPTED;
1854 			goto error0;
1855 		}
1856 		rlen = bestrlen;
1857 		rbno = bestrbno;
1858 		flen = bestflen;
1859 		fbno = bestfbno;
1860 	}
1861 	args->wasfromfl = 0;
1862 	/*
1863 	 * Fix up the length.
1864 	 */
1865 	args->len = rlen;
1866 	if (rlen < args->minlen) {
1867 		if (busy) {
1868 			/*
1869 			 * Our only valid extents must have been busy. Flush and
1870 			 * retry the allocation again. If we get an -EAGAIN
1871 			 * error, we're being told that a deadlock was avoided
1872 			 * and the current transaction needs committing before
1873 			 * the allocation can be retried.
1874 			 */
1875 			trace_xfs_alloc_size_busy(args);
1876 			error = xfs_extent_busy_flush(args->tp, args->pag,
1877 					busy_gen, alloc_flags);
1878 			if (error)
1879 				goto error0;
1880 
1881 			alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH;
1882 			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1883 			goto restart;
1884 		}
1885 		goto out_nominleft;
1886 	}
1887 	xfs_alloc_fix_len(args);
1888 
1889 	rlen = args->len;
1890 	if (XFS_IS_CORRUPT(args->mp, rlen > flen)) {
1891 		xfs_btree_mark_sick(cnt_cur);
1892 		error = -EFSCORRUPTED;
1893 		goto error0;
1894 	}
1895 	/*
1896 	 * Allocate and initialize a cursor for the by-block tree.
1897 	 */
1898 	bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1899 					args->pag, XFS_BTNUM_BNO);
1900 	if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1901 			rbno, rlen, XFSA_FIXUP_CNT_OK)))
1902 		goto error0;
1903 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1904 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1905 	cnt_cur = bno_cur = NULL;
1906 	args->len = rlen;
1907 	args->agbno = rbno;
1908 	if (XFS_IS_CORRUPT(args->mp,
1909 			   args->agbno + args->len >
1910 			   be32_to_cpu(agf->agf_length))) {
1911 		xfs_ag_mark_sick(args->pag, XFS_SICK_AG_BNOBT);
1912 		error = -EFSCORRUPTED;
1913 		goto error0;
1914 	}
1915 	trace_xfs_alloc_size_done(args);
1916 	return 0;
1917 
1918 error0:
1919 	trace_xfs_alloc_size_error(args);
1920 	if (cnt_cur)
1921 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1922 	if (bno_cur)
1923 		xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1924 	return error;
1925 
1926 out_nominleft:
1927 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1928 	trace_xfs_alloc_size_nominleft(args);
1929 	args->agbno = NULLAGBLOCK;
1930 	return 0;
1931 }
1932 
1933 /*
1934  * Free the extent starting at agno/bno for length.
1935  */
1936 STATIC int
1937 xfs_free_ag_extent(
1938 	struct xfs_trans		*tp,
1939 	struct xfs_buf			*agbp,
1940 	xfs_agnumber_t			agno,
1941 	xfs_agblock_t			bno,
1942 	xfs_extlen_t			len,
1943 	const struct xfs_owner_info	*oinfo,
1944 	enum xfs_ag_resv_type		type)
1945 {
1946 	struct xfs_mount		*mp;
1947 	struct xfs_btree_cur		*bno_cur;
1948 	struct xfs_btree_cur		*cnt_cur;
1949 	xfs_agblock_t			gtbno; /* start of right neighbor */
1950 	xfs_extlen_t			gtlen; /* length of right neighbor */
1951 	xfs_agblock_t			ltbno; /* start of left neighbor */
1952 	xfs_extlen_t			ltlen; /* length of left neighbor */
1953 	xfs_agblock_t			nbno; /* new starting block of freesp */
1954 	xfs_extlen_t			nlen; /* new length of freespace */
1955 	int				haveleft; /* have a left neighbor */
1956 	int				haveright; /* have a right neighbor */
1957 	int				i;
1958 	int				error;
1959 	struct xfs_perag		*pag = agbp->b_pag;
1960 
1961 	bno_cur = cnt_cur = NULL;
1962 	mp = tp->t_mountp;
1963 
1964 	if (!xfs_rmap_should_skip_owner_update(oinfo)) {
1965 		error = xfs_rmap_free(tp, agbp, pag, bno, len, oinfo);
1966 		if (error)
1967 			goto error0;
1968 	}
1969 
1970 	/*
1971 	 * Allocate and initialize a cursor for the by-block btree.
1972 	 */
1973 	bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_BNO);
1974 	/*
1975 	 * Look for a neighboring block on the left (lower block numbers)
1976 	 * that is contiguous with this space.
1977 	 */
1978 	if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1979 		goto error0;
1980 	if (haveleft) {
1981 		/*
1982 		 * There is a block to our left.
1983 		 */
1984 		if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i)))
1985 			goto error0;
1986 		if (XFS_IS_CORRUPT(mp, i != 1)) {
1987 			xfs_btree_mark_sick(bno_cur);
1988 			error = -EFSCORRUPTED;
1989 			goto error0;
1990 		}
1991 		/*
1992 		 * It's not contiguous, though.
1993 		 */
1994 		if (ltbno + ltlen < bno)
1995 			haveleft = 0;
1996 		else {
1997 			/*
1998 			 * If this failure happens the request to free this
1999 			 * space was invalid, it's (partly) already free.
2000 			 * Very bad.
2001 			 */
2002 			if (XFS_IS_CORRUPT(mp, ltbno + ltlen > bno)) {
2003 				xfs_btree_mark_sick(bno_cur);
2004 				error = -EFSCORRUPTED;
2005 				goto error0;
2006 			}
2007 		}
2008 	}
2009 	/*
2010 	 * Look for a neighboring block on the right (higher block numbers)
2011 	 * that is contiguous with this space.
2012 	 */
2013 	if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
2014 		goto error0;
2015 	if (haveright) {
2016 		/*
2017 		 * There is a block to our right.
2018 		 */
2019 		if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i)))
2020 			goto error0;
2021 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2022 			xfs_btree_mark_sick(bno_cur);
2023 			error = -EFSCORRUPTED;
2024 			goto error0;
2025 		}
2026 		/*
2027 		 * It's not contiguous, though.
2028 		 */
2029 		if (bno + len < gtbno)
2030 			haveright = 0;
2031 		else {
2032 			/*
2033 			 * If this failure happens the request to free this
2034 			 * space was invalid, it's (partly) already free.
2035 			 * Very bad.
2036 			 */
2037 			if (XFS_IS_CORRUPT(mp, bno + len > gtbno)) {
2038 				xfs_btree_mark_sick(bno_cur);
2039 				error = -EFSCORRUPTED;
2040 				goto error0;
2041 			}
2042 		}
2043 	}
2044 	/*
2045 	 * Now allocate and initialize a cursor for the by-size tree.
2046 	 */
2047 	cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_CNT);
2048 	/*
2049 	 * Have both left and right contiguous neighbors.
2050 	 * Merge all three into a single free block.
2051 	 */
2052 	if (haveleft && haveright) {
2053 		/*
2054 		 * Delete the old by-size entry on the left.
2055 		 */
2056 		if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
2057 			goto error0;
2058 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2059 			xfs_btree_mark_sick(cnt_cur);
2060 			error = -EFSCORRUPTED;
2061 			goto error0;
2062 		}
2063 		if ((error = xfs_btree_delete(cnt_cur, &i)))
2064 			goto error0;
2065 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2066 			xfs_btree_mark_sick(cnt_cur);
2067 			error = -EFSCORRUPTED;
2068 			goto error0;
2069 		}
2070 		/*
2071 		 * Delete the old by-size entry on the right.
2072 		 */
2073 		if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
2074 			goto error0;
2075 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2076 			xfs_btree_mark_sick(cnt_cur);
2077 			error = -EFSCORRUPTED;
2078 			goto error0;
2079 		}
2080 		if ((error = xfs_btree_delete(cnt_cur, &i)))
2081 			goto error0;
2082 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2083 			xfs_btree_mark_sick(cnt_cur);
2084 			error = -EFSCORRUPTED;
2085 			goto error0;
2086 		}
2087 		/*
2088 		 * Delete the old by-block entry for the right block.
2089 		 */
2090 		if ((error = xfs_btree_delete(bno_cur, &i)))
2091 			goto error0;
2092 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2093 			xfs_btree_mark_sick(bno_cur);
2094 			error = -EFSCORRUPTED;
2095 			goto error0;
2096 		}
2097 		/*
2098 		 * Move the by-block cursor back to the left neighbor.
2099 		 */
2100 		if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
2101 			goto error0;
2102 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2103 			xfs_btree_mark_sick(bno_cur);
2104 			error = -EFSCORRUPTED;
2105 			goto error0;
2106 		}
2107 #ifdef DEBUG
2108 		/*
2109 		 * Check that this is the right record: delete didn't
2110 		 * mangle the cursor.
2111 		 */
2112 		{
2113 			xfs_agblock_t	xxbno;
2114 			xfs_extlen_t	xxlen;
2115 
2116 			if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
2117 					&i)))
2118 				goto error0;
2119 			if (XFS_IS_CORRUPT(mp,
2120 					   i != 1 ||
2121 					   xxbno != ltbno ||
2122 					   xxlen != ltlen)) {
2123 				xfs_btree_mark_sick(bno_cur);
2124 				error = -EFSCORRUPTED;
2125 				goto error0;
2126 			}
2127 		}
2128 #endif
2129 		/*
2130 		 * Update remaining by-block entry to the new, joined block.
2131 		 */
2132 		nbno = ltbno;
2133 		nlen = len + ltlen + gtlen;
2134 		if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2135 			goto error0;
2136 	}
2137 	/*
2138 	 * Have only a left contiguous neighbor.
2139 	 * Merge it together with the new freespace.
2140 	 */
2141 	else if (haveleft) {
2142 		/*
2143 		 * Delete the old by-size entry on the left.
2144 		 */
2145 		if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
2146 			goto error0;
2147 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2148 			xfs_btree_mark_sick(cnt_cur);
2149 			error = -EFSCORRUPTED;
2150 			goto error0;
2151 		}
2152 		if ((error = xfs_btree_delete(cnt_cur, &i)))
2153 			goto error0;
2154 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2155 			xfs_btree_mark_sick(cnt_cur);
2156 			error = -EFSCORRUPTED;
2157 			goto error0;
2158 		}
2159 		/*
2160 		 * Back up the by-block cursor to the left neighbor, and
2161 		 * update its length.
2162 		 */
2163 		if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
2164 			goto error0;
2165 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2166 			xfs_btree_mark_sick(bno_cur);
2167 			error = -EFSCORRUPTED;
2168 			goto error0;
2169 		}
2170 		nbno = ltbno;
2171 		nlen = len + ltlen;
2172 		if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2173 			goto error0;
2174 	}
2175 	/*
2176 	 * Have only a right contiguous neighbor.
2177 	 * Merge it together with the new freespace.
2178 	 */
2179 	else if (haveright) {
2180 		/*
2181 		 * Delete the old by-size entry on the right.
2182 		 */
2183 		if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
2184 			goto error0;
2185 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2186 			xfs_btree_mark_sick(cnt_cur);
2187 			error = -EFSCORRUPTED;
2188 			goto error0;
2189 		}
2190 		if ((error = xfs_btree_delete(cnt_cur, &i)))
2191 			goto error0;
2192 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2193 			xfs_btree_mark_sick(cnt_cur);
2194 			error = -EFSCORRUPTED;
2195 			goto error0;
2196 		}
2197 		/*
2198 		 * Update the starting block and length of the right
2199 		 * neighbor in the by-block tree.
2200 		 */
2201 		nbno = bno;
2202 		nlen = len + gtlen;
2203 		if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2204 			goto error0;
2205 	}
2206 	/*
2207 	 * No contiguous neighbors.
2208 	 * Insert the new freespace into the by-block tree.
2209 	 */
2210 	else {
2211 		nbno = bno;
2212 		nlen = len;
2213 		if ((error = xfs_btree_insert(bno_cur, &i)))
2214 			goto error0;
2215 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2216 			xfs_btree_mark_sick(bno_cur);
2217 			error = -EFSCORRUPTED;
2218 			goto error0;
2219 		}
2220 	}
2221 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
2222 	bno_cur = NULL;
2223 	/*
2224 	 * In all cases we need to insert the new freespace in the by-size tree.
2225 	 */
2226 	if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
2227 		goto error0;
2228 	if (XFS_IS_CORRUPT(mp, i != 0)) {
2229 		xfs_btree_mark_sick(cnt_cur);
2230 		error = -EFSCORRUPTED;
2231 		goto error0;
2232 	}
2233 	if ((error = xfs_btree_insert(cnt_cur, &i)))
2234 		goto error0;
2235 	if (XFS_IS_CORRUPT(mp, i != 1)) {
2236 		xfs_btree_mark_sick(cnt_cur);
2237 		error = -EFSCORRUPTED;
2238 		goto error0;
2239 	}
2240 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
2241 	cnt_cur = NULL;
2242 
2243 	/*
2244 	 * Update the freespace totals in the ag and superblock.
2245 	 */
2246 	error = xfs_alloc_update_counters(tp, agbp, len);
2247 	xfs_ag_resv_free_extent(agbp->b_pag, type, tp, len);
2248 	if (error)
2249 		goto error0;
2250 
2251 	XFS_STATS_INC(mp, xs_freex);
2252 	XFS_STATS_ADD(mp, xs_freeb, len);
2253 
2254 	trace_xfs_free_extent(mp, agno, bno, len, type, haveleft, haveright);
2255 
2256 	return 0;
2257 
2258  error0:
2259 	trace_xfs_free_extent(mp, agno, bno, len, type, -1, -1);
2260 	if (bno_cur)
2261 		xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
2262 	if (cnt_cur)
2263 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
2264 	return error;
2265 }
2266 
2267 /*
2268  * Visible (exported) allocation/free functions.
2269  * Some of these are used just by xfs_alloc_btree.c and this file.
2270  */
2271 
2272 /*
2273  * Compute and fill in value of m_alloc_maxlevels.
2274  */
2275 void
2276 xfs_alloc_compute_maxlevels(
2277 	xfs_mount_t	*mp)	/* file system mount structure */
2278 {
2279 	mp->m_alloc_maxlevels = xfs_btree_compute_maxlevels(mp->m_alloc_mnr,
2280 			(mp->m_sb.sb_agblocks + 1) / 2);
2281 	ASSERT(mp->m_alloc_maxlevels <= xfs_allocbt_maxlevels_ondisk());
2282 }
2283 
2284 /*
2285  * Find the length of the longest extent in an AG.  The 'need' parameter
2286  * specifies how much space we're going to need for the AGFL and the
2287  * 'reserved' parameter tells us how many blocks in this AG are reserved for
2288  * other callers.
2289  */
2290 xfs_extlen_t
2291 xfs_alloc_longest_free_extent(
2292 	struct xfs_perag	*pag,
2293 	xfs_extlen_t		need,
2294 	xfs_extlen_t		reserved)
2295 {
2296 	xfs_extlen_t		delta = 0;
2297 
2298 	/*
2299 	 * If the AGFL needs a recharge, we'll have to subtract that from the
2300 	 * longest extent.
2301 	 */
2302 	if (need > pag->pagf_flcount)
2303 		delta = need - pag->pagf_flcount;
2304 
2305 	/*
2306 	 * If we cannot maintain others' reservations with space from the
2307 	 * not-longest freesp extents, we'll have to subtract /that/ from
2308 	 * the longest extent too.
2309 	 */
2310 	if (pag->pagf_freeblks - pag->pagf_longest < reserved)
2311 		delta += reserved - (pag->pagf_freeblks - pag->pagf_longest);
2312 
2313 	/*
2314 	 * If the longest extent is long enough to satisfy all the
2315 	 * reservations and AGFL rules in place, we can return this extent.
2316 	 */
2317 	if (pag->pagf_longest > delta)
2318 		return min_t(xfs_extlen_t, pag->pag_mount->m_ag_max_usable,
2319 				pag->pagf_longest - delta);
2320 
2321 	/* Otherwise, let the caller try for 1 block if there's space. */
2322 	return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
2323 }
2324 
2325 /*
2326  * Compute the minimum length of the AGFL in the given AG.  If @pag is NULL,
2327  * return the largest possible minimum length.
2328  */
2329 unsigned int
2330 xfs_alloc_min_freelist(
2331 	struct xfs_mount	*mp,
2332 	struct xfs_perag	*pag)
2333 {
2334 	/* AG btrees have at least 1 level. */
2335 	static const uint8_t	fake_levels[XFS_BTNUM_AGF] = {1, 1, 1};
2336 	const uint8_t		*levels = pag ? pag->pagf_levels : fake_levels;
2337 	unsigned int		min_free;
2338 
2339 	ASSERT(mp->m_alloc_maxlevels > 0);
2340 
2341 	/*
2342 	 * For a btree shorter than the maximum height, the worst case is that
2343 	 * every level gets split and a new level is added, then while inserting
2344 	 * another entry to refill the AGFL, every level under the old root gets
2345 	 * split again. This is:
2346 	 *
2347 	 *   (full height split reservation) + (AGFL refill split height)
2348 	 * = (current height + 1) + (current height - 1)
2349 	 * = (new height) + (new height - 2)
2350 	 * = 2 * new height - 2
2351 	 *
2352 	 * For a btree of maximum height, the worst case is that every level
2353 	 * under the root gets split, then while inserting another entry to
2354 	 * refill the AGFL, every level under the root gets split again. This is
2355 	 * also:
2356 	 *
2357 	 *   2 * (current height - 1)
2358 	 * = 2 * (new height - 1)
2359 	 * = 2 * new height - 2
2360 	 */
2361 
2362 	/* space needed by-bno freespace btree */
2363 	min_free = min_t(unsigned int, levels[XFS_BTNUM_BNOi] + 1,
2364 				       mp->m_alloc_maxlevels) * 2 - 2;
2365 	/* space needed by-size freespace btree */
2366 	min_free += min_t(unsigned int, levels[XFS_BTNUM_CNTi] + 1,
2367 				       mp->m_alloc_maxlevels) * 2 - 2;
2368 	/* space needed reverse mapping used space btree */
2369 	if (xfs_has_rmapbt(mp))
2370 		min_free += min_t(unsigned int, levels[XFS_BTNUM_RMAPi] + 1,
2371 						mp->m_rmap_maxlevels) * 2 - 2;
2372 
2373 	return min_free;
2374 }
2375 
2376 /*
2377  * Check if the operation we are fixing up the freelist for should go ahead or
2378  * not. If we are freeing blocks, we always allow it, otherwise the allocation
2379  * is dependent on whether the size and shape of free space available will
2380  * permit the requested allocation to take place.
2381  */
2382 static bool
2383 xfs_alloc_space_available(
2384 	struct xfs_alloc_arg	*args,
2385 	xfs_extlen_t		min_free,
2386 	int			flags)
2387 {
2388 	struct xfs_perag	*pag = args->pag;
2389 	xfs_extlen_t		alloc_len, longest;
2390 	xfs_extlen_t		reservation; /* blocks that are still reserved */
2391 	int			available;
2392 	xfs_extlen_t		agflcount;
2393 
2394 	if (flags & XFS_ALLOC_FLAG_FREEING)
2395 		return true;
2396 
2397 	reservation = xfs_ag_resv_needed(pag, args->resv);
2398 
2399 	/* do we have enough contiguous free space for the allocation? */
2400 	alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
2401 	longest = xfs_alloc_longest_free_extent(pag, min_free, reservation);
2402 	if (longest < alloc_len)
2403 		return false;
2404 
2405 	/*
2406 	 * Do we have enough free space remaining for the allocation? Don't
2407 	 * account extra agfl blocks because we are about to defer free them,
2408 	 * making them unavailable until the current transaction commits.
2409 	 */
2410 	agflcount = min_t(xfs_extlen_t, pag->pagf_flcount, min_free);
2411 	available = (int)(pag->pagf_freeblks + agflcount -
2412 			  reservation - min_free - args->minleft);
2413 	if (available < (int)max(args->total, alloc_len))
2414 		return false;
2415 
2416 	/*
2417 	 * Clamp maxlen to the amount of free space available for the actual
2418 	 * extent allocation.
2419 	 */
2420 	if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
2421 		args->maxlen = available;
2422 		ASSERT(args->maxlen > 0);
2423 		ASSERT(args->maxlen >= args->minlen);
2424 	}
2425 
2426 	return true;
2427 }
2428 
2429 int
2430 xfs_free_agfl_block(
2431 	struct xfs_trans	*tp,
2432 	xfs_agnumber_t		agno,
2433 	xfs_agblock_t		agbno,
2434 	struct xfs_buf		*agbp,
2435 	struct xfs_owner_info	*oinfo)
2436 {
2437 	int			error;
2438 	struct xfs_buf		*bp;
2439 
2440 	error = xfs_free_ag_extent(tp, agbp, agno, agbno, 1, oinfo,
2441 				   XFS_AG_RESV_AGFL);
2442 	if (error)
2443 		return error;
2444 
2445 	error = xfs_trans_get_buf(tp, tp->t_mountp->m_ddev_targp,
2446 			XFS_AGB_TO_DADDR(tp->t_mountp, agno, agbno),
2447 			tp->t_mountp->m_bsize, 0, &bp);
2448 	if (error)
2449 		return error;
2450 	xfs_trans_binval(tp, bp);
2451 
2452 	return 0;
2453 }
2454 
2455 /*
2456  * Check the agfl fields of the agf for inconsistency or corruption.
2457  *
2458  * The original purpose was to detect an agfl header padding mismatch between
2459  * current and early v5 kernels. This problem manifests as a 1-slot size
2460  * difference between the on-disk flcount and the active [first, last] range of
2461  * a wrapped agfl.
2462  *
2463  * However, we need to use these same checks to catch agfl count corruptions
2464  * unrelated to padding. This could occur on any v4 or v5 filesystem, so either
2465  * way, we need to reset the agfl and warn the user.
2466  *
2467  * Return true if a reset is required before the agfl can be used, false
2468  * otherwise.
2469  */
2470 static bool
2471 xfs_agfl_needs_reset(
2472 	struct xfs_mount	*mp,
2473 	struct xfs_agf		*agf)
2474 {
2475 	uint32_t		f = be32_to_cpu(agf->agf_flfirst);
2476 	uint32_t		l = be32_to_cpu(agf->agf_fllast);
2477 	uint32_t		c = be32_to_cpu(agf->agf_flcount);
2478 	int			agfl_size = xfs_agfl_size(mp);
2479 	int			active;
2480 
2481 	/*
2482 	 * The agf read verifier catches severe corruption of these fields.
2483 	 * Repeat some sanity checks to cover a packed -> unpacked mismatch if
2484 	 * the verifier allows it.
2485 	 */
2486 	if (f >= agfl_size || l >= agfl_size)
2487 		return true;
2488 	if (c > agfl_size)
2489 		return true;
2490 
2491 	/*
2492 	 * Check consistency between the on-disk count and the active range. An
2493 	 * agfl padding mismatch manifests as an inconsistent flcount.
2494 	 */
2495 	if (c && l >= f)
2496 		active = l - f + 1;
2497 	else if (c)
2498 		active = agfl_size - f + l + 1;
2499 	else
2500 		active = 0;
2501 
2502 	return active != c;
2503 }
2504 
2505 /*
2506  * Reset the agfl to an empty state. Ignore/drop any existing blocks since the
2507  * agfl content cannot be trusted. Warn the user that a repair is required to
2508  * recover leaked blocks.
2509  *
2510  * The purpose of this mechanism is to handle filesystems affected by the agfl
2511  * header padding mismatch problem. A reset keeps the filesystem online with a
2512  * relatively minor free space accounting inconsistency rather than suffer the
2513  * inevitable crash from use of an invalid agfl block.
2514  */
2515 static void
2516 xfs_agfl_reset(
2517 	struct xfs_trans	*tp,
2518 	struct xfs_buf		*agbp,
2519 	struct xfs_perag	*pag)
2520 {
2521 	struct xfs_mount	*mp = tp->t_mountp;
2522 	struct xfs_agf		*agf = agbp->b_addr;
2523 
2524 	ASSERT(xfs_perag_agfl_needs_reset(pag));
2525 	trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
2526 
2527 	xfs_warn(mp,
2528 	       "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
2529 	       "Please unmount and run xfs_repair.",
2530 	         pag->pag_agno, pag->pagf_flcount);
2531 
2532 	agf->agf_flfirst = 0;
2533 	agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1);
2534 	agf->agf_flcount = 0;
2535 	xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
2536 				    XFS_AGF_FLCOUNT);
2537 
2538 	pag->pagf_flcount = 0;
2539 	clear_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
2540 }
2541 
2542 /*
2543  * Defer an AGFL block free. This is effectively equivalent to
2544  * xfs_free_extent_later() with some special handling particular to AGFL blocks.
2545  *
2546  * Deferring AGFL frees helps prevent log reservation overruns due to too many
2547  * allocation operations in a transaction. AGFL frees are prone to this problem
2548  * because for one they are always freed one at a time. Further, an immediate
2549  * AGFL block free can cause a btree join and require another block free before
2550  * the real allocation can proceed. Deferring the free disconnects freeing up
2551  * the AGFL slot from freeing the block.
2552  */
2553 static int
2554 xfs_defer_agfl_block(
2555 	struct xfs_trans		*tp,
2556 	xfs_agnumber_t			agno,
2557 	xfs_agblock_t			agbno,
2558 	struct xfs_owner_info		*oinfo)
2559 {
2560 	struct xfs_mount		*mp = tp->t_mountp;
2561 	struct xfs_extent_free_item	*xefi;
2562 	xfs_fsblock_t			fsbno = XFS_AGB_TO_FSB(mp, agno, agbno);
2563 
2564 	ASSERT(xfs_extfree_item_cache != NULL);
2565 	ASSERT(oinfo != NULL);
2566 
2567 	if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, fsbno)))
2568 		return -EFSCORRUPTED;
2569 
2570 	xefi = kmem_cache_zalloc(xfs_extfree_item_cache,
2571 			       GFP_KERNEL | __GFP_NOFAIL);
2572 	xefi->xefi_startblock = fsbno;
2573 	xefi->xefi_blockcount = 1;
2574 	xefi->xefi_owner = oinfo->oi_owner;
2575 	xefi->xefi_agresv = XFS_AG_RESV_AGFL;
2576 
2577 	trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
2578 
2579 	xfs_extent_free_get_group(mp, xefi);
2580 	xfs_defer_add(tp, &xefi->xefi_list, &xfs_agfl_free_defer_type);
2581 	return 0;
2582 }
2583 
2584 /*
2585  * Add the extent to the list of extents to be free at transaction end.
2586  * The list is maintained sorted (by block number).
2587  */
2588 static int
2589 xfs_defer_extent_free(
2590 	struct xfs_trans		*tp,
2591 	xfs_fsblock_t			bno,
2592 	xfs_filblks_t			len,
2593 	const struct xfs_owner_info	*oinfo,
2594 	enum xfs_ag_resv_type		type,
2595 	bool				skip_discard,
2596 	struct xfs_defer_pending	**dfpp)
2597 {
2598 	struct xfs_extent_free_item	*xefi;
2599 	struct xfs_mount		*mp = tp->t_mountp;
2600 #ifdef DEBUG
2601 	xfs_agnumber_t			agno;
2602 	xfs_agblock_t			agbno;
2603 
2604 	ASSERT(bno != NULLFSBLOCK);
2605 	ASSERT(len > 0);
2606 	ASSERT(len <= XFS_MAX_BMBT_EXTLEN);
2607 	ASSERT(!isnullstartblock(bno));
2608 	agno = XFS_FSB_TO_AGNO(mp, bno);
2609 	agbno = XFS_FSB_TO_AGBNO(mp, bno);
2610 	ASSERT(agno < mp->m_sb.sb_agcount);
2611 	ASSERT(agbno < mp->m_sb.sb_agblocks);
2612 	ASSERT(len < mp->m_sb.sb_agblocks);
2613 	ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
2614 #endif
2615 	ASSERT(xfs_extfree_item_cache != NULL);
2616 	ASSERT(type != XFS_AG_RESV_AGFL);
2617 
2618 	if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbext(mp, bno, len)))
2619 		return -EFSCORRUPTED;
2620 
2621 	xefi = kmem_cache_zalloc(xfs_extfree_item_cache,
2622 			       GFP_KERNEL | __GFP_NOFAIL);
2623 	xefi->xefi_startblock = bno;
2624 	xefi->xefi_blockcount = (xfs_extlen_t)len;
2625 	xefi->xefi_agresv = type;
2626 	if (skip_discard)
2627 		xefi->xefi_flags |= XFS_EFI_SKIP_DISCARD;
2628 	if (oinfo) {
2629 		ASSERT(oinfo->oi_offset == 0);
2630 
2631 		if (oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK)
2632 			xefi->xefi_flags |= XFS_EFI_ATTR_FORK;
2633 		if (oinfo->oi_flags & XFS_OWNER_INFO_BMBT_BLOCK)
2634 			xefi->xefi_flags |= XFS_EFI_BMBT_BLOCK;
2635 		xefi->xefi_owner = oinfo->oi_owner;
2636 	} else {
2637 		xefi->xefi_owner = XFS_RMAP_OWN_NULL;
2638 	}
2639 	trace_xfs_bmap_free_defer(mp,
2640 			XFS_FSB_TO_AGNO(tp->t_mountp, bno), 0,
2641 			XFS_FSB_TO_AGBNO(tp->t_mountp, bno), len);
2642 
2643 	xfs_extent_free_get_group(mp, xefi);
2644 	*dfpp = xfs_defer_add(tp, &xefi->xefi_list, &xfs_extent_free_defer_type);
2645 	return 0;
2646 }
2647 
2648 int
2649 xfs_free_extent_later(
2650 	struct xfs_trans		*tp,
2651 	xfs_fsblock_t			bno,
2652 	xfs_filblks_t			len,
2653 	const struct xfs_owner_info	*oinfo,
2654 	enum xfs_ag_resv_type		type,
2655 	bool				skip_discard)
2656 {
2657 	struct xfs_defer_pending	*dontcare = NULL;
2658 
2659 	return xfs_defer_extent_free(tp, bno, len, oinfo, type, skip_discard,
2660 			&dontcare);
2661 }
2662 
2663 /*
2664  * Set up automatic freeing of unwritten space in the filesystem.
2665  *
2666  * This function attached a paused deferred extent free item to the
2667  * transaction.  Pausing means that the EFI will be logged in the next
2668  * transaction commit, but the pending EFI will not be finished until the
2669  * pending item is unpaused.
2670  *
2671  * If the system goes down after the EFI has been persisted to the log but
2672  * before the pending item is unpaused, log recovery will find the EFI, fail to
2673  * find the EFD, and free the space.
2674  *
2675  * If the pending item is unpaused, the next transaction commit will log an EFD
2676  * without freeing the space.
2677  *
2678  * Caller must ensure that the tp, fsbno, len, oinfo, and resv flags of the
2679  * @args structure are set to the relevant values.
2680  */
2681 int
2682 xfs_alloc_schedule_autoreap(
2683 	const struct xfs_alloc_arg	*args,
2684 	bool				skip_discard,
2685 	struct xfs_alloc_autoreap	*aarp)
2686 {
2687 	int				error;
2688 
2689 	error = xfs_defer_extent_free(args->tp, args->fsbno, args->len,
2690 			&args->oinfo, args->resv, skip_discard, &aarp->dfp);
2691 	if (error)
2692 		return error;
2693 
2694 	xfs_defer_item_pause(args->tp, aarp->dfp);
2695 	return 0;
2696 }
2697 
2698 /*
2699  * Cancel automatic freeing of unwritten space in the filesystem.
2700  *
2701  * Earlier, we created a paused deferred extent free item and attached it to
2702  * this transaction so that we could automatically roll back a new space
2703  * allocation if the system went down.  Now we want to cancel the paused work
2704  * item by marking the EFI stale so we don't actually free the space, unpausing
2705  * the pending item and logging an EFD.
2706  *
2707  * The caller generally should have already mapped the space into the ondisk
2708  * filesystem.  If the reserved space was partially used, the caller must call
2709  * xfs_free_extent_later to create a new EFI to free the unused space.
2710  */
2711 void
2712 xfs_alloc_cancel_autoreap(
2713 	struct xfs_trans		*tp,
2714 	struct xfs_alloc_autoreap	*aarp)
2715 {
2716 	struct xfs_defer_pending	*dfp = aarp->dfp;
2717 	struct xfs_extent_free_item	*xefi;
2718 
2719 	if (!dfp)
2720 		return;
2721 
2722 	list_for_each_entry(xefi, &dfp->dfp_work, xefi_list)
2723 		xefi->xefi_flags |= XFS_EFI_CANCELLED;
2724 
2725 	xfs_defer_item_unpause(tp, dfp);
2726 }
2727 
2728 /*
2729  * Commit automatic freeing of unwritten space in the filesystem.
2730  *
2731  * This unpauses an earlier _schedule_autoreap and commits to freeing the
2732  * allocated space.  Call this if none of the reserved space was used.
2733  */
2734 void
2735 xfs_alloc_commit_autoreap(
2736 	struct xfs_trans		*tp,
2737 	struct xfs_alloc_autoreap	*aarp)
2738 {
2739 	if (aarp->dfp)
2740 		xfs_defer_item_unpause(tp, aarp->dfp);
2741 }
2742 
2743 #ifdef DEBUG
2744 /*
2745  * Check if an AGF has a free extent record whose length is equal to
2746  * args->minlen.
2747  */
2748 STATIC int
2749 xfs_exact_minlen_extent_available(
2750 	struct xfs_alloc_arg	*args,
2751 	struct xfs_buf		*agbp,
2752 	int			*stat)
2753 {
2754 	struct xfs_btree_cur	*cnt_cur;
2755 	xfs_agblock_t		fbno;
2756 	xfs_extlen_t		flen;
2757 	int			error = 0;
2758 
2759 	cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, agbp,
2760 					args->pag, XFS_BTNUM_CNT);
2761 	error = xfs_alloc_lookup_ge(cnt_cur, 0, args->minlen, stat);
2762 	if (error)
2763 		goto out;
2764 
2765 	if (*stat == 0) {
2766 		xfs_btree_mark_sick(cnt_cur);
2767 		error = -EFSCORRUPTED;
2768 		goto out;
2769 	}
2770 
2771 	error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, stat);
2772 	if (error)
2773 		goto out;
2774 
2775 	if (*stat == 1 && flen != args->minlen)
2776 		*stat = 0;
2777 
2778 out:
2779 	xfs_btree_del_cursor(cnt_cur, error);
2780 
2781 	return error;
2782 }
2783 #endif
2784 
2785 /*
2786  * Decide whether to use this allocation group for this allocation.
2787  * If so, fix up the btree freelist's size.
2788  */
2789 int			/* error */
2790 xfs_alloc_fix_freelist(
2791 	struct xfs_alloc_arg	*args,	/* allocation argument structure */
2792 	uint32_t		alloc_flags)
2793 {
2794 	struct xfs_mount	*mp = args->mp;
2795 	struct xfs_perag	*pag = args->pag;
2796 	struct xfs_trans	*tp = args->tp;
2797 	struct xfs_buf		*agbp = NULL;
2798 	struct xfs_buf		*agflbp = NULL;
2799 	struct xfs_alloc_arg	targs;	/* local allocation arguments */
2800 	xfs_agblock_t		bno;	/* freelist block */
2801 	xfs_extlen_t		need;	/* total blocks needed in freelist */
2802 	int			error = 0;
2803 
2804 	/* deferred ops (AGFL block frees) require permanent transactions */
2805 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
2806 
2807 	if (!xfs_perag_initialised_agf(pag)) {
2808 		error = xfs_alloc_read_agf(pag, tp, alloc_flags, &agbp);
2809 		if (error) {
2810 			/* Couldn't lock the AGF so skip this AG. */
2811 			if (error == -EAGAIN)
2812 				error = 0;
2813 			goto out_no_agbp;
2814 		}
2815 	}
2816 
2817 	/*
2818 	 * If this is a metadata preferred pag and we are user data then try
2819 	 * somewhere else if we are not being asked to try harder at this
2820 	 * point
2821 	 */
2822 	if (xfs_perag_prefers_metadata(pag) &&
2823 	    (args->datatype & XFS_ALLOC_USERDATA) &&
2824 	    (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK)) {
2825 		ASSERT(!(alloc_flags & XFS_ALLOC_FLAG_FREEING));
2826 		goto out_agbp_relse;
2827 	}
2828 
2829 	need = xfs_alloc_min_freelist(mp, pag);
2830 	if (!xfs_alloc_space_available(args, need, alloc_flags |
2831 			XFS_ALLOC_FLAG_CHECK))
2832 		goto out_agbp_relse;
2833 
2834 	/*
2835 	 * Get the a.g. freespace buffer.
2836 	 * Can fail if we're not blocking on locks, and it's held.
2837 	 */
2838 	if (!agbp) {
2839 		error = xfs_alloc_read_agf(pag, tp, alloc_flags, &agbp);
2840 		if (error) {
2841 			/* Couldn't lock the AGF so skip this AG. */
2842 			if (error == -EAGAIN)
2843 				error = 0;
2844 			goto out_no_agbp;
2845 		}
2846 	}
2847 
2848 	/* reset a padding mismatched agfl before final free space check */
2849 	if (xfs_perag_agfl_needs_reset(pag))
2850 		xfs_agfl_reset(tp, agbp, pag);
2851 
2852 	/* If there isn't enough total space or single-extent, reject it. */
2853 	need = xfs_alloc_min_freelist(mp, pag);
2854 	if (!xfs_alloc_space_available(args, need, alloc_flags))
2855 		goto out_agbp_relse;
2856 
2857 #ifdef DEBUG
2858 	if (args->alloc_minlen_only) {
2859 		int stat;
2860 
2861 		error = xfs_exact_minlen_extent_available(args, agbp, &stat);
2862 		if (error || !stat)
2863 			goto out_agbp_relse;
2864 	}
2865 #endif
2866 	/*
2867 	 * Make the freelist shorter if it's too long.
2868 	 *
2869 	 * Note that from this point onwards, we will always release the agf and
2870 	 * agfl buffers on error. This handles the case where we error out and
2871 	 * the buffers are clean or may not have been joined to the transaction
2872 	 * and hence need to be released manually. If they have been joined to
2873 	 * the transaction, then xfs_trans_brelse() will handle them
2874 	 * appropriately based on the recursion count and dirty state of the
2875 	 * buffer.
2876 	 *
2877 	 * XXX (dgc): When we have lots of free space, does this buy us
2878 	 * anything other than extra overhead when we need to put more blocks
2879 	 * back on the free list? Maybe we should only do this when space is
2880 	 * getting low or the AGFL is more than half full?
2881 	 *
2882 	 * The NOSHRINK flag prevents the AGFL from being shrunk if it's too
2883 	 * big; the NORMAP flag prevents AGFL expand/shrink operations from
2884 	 * updating the rmapbt.  Both flags are used in xfs_repair while we're
2885 	 * rebuilding the rmapbt, and neither are used by the kernel.  They're
2886 	 * both required to ensure that rmaps are correctly recorded for the
2887 	 * regenerated AGFL, bnobt, and cntbt.  See repair/phase5.c and
2888 	 * repair/rmap.c in xfsprogs for details.
2889 	 */
2890 	memset(&targs, 0, sizeof(targs));
2891 	/* struct copy below */
2892 	if (alloc_flags & XFS_ALLOC_FLAG_NORMAP)
2893 		targs.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
2894 	else
2895 		targs.oinfo = XFS_RMAP_OINFO_AG;
2896 	while (!(alloc_flags & XFS_ALLOC_FLAG_NOSHRINK) &&
2897 			pag->pagf_flcount > need) {
2898 		error = xfs_alloc_get_freelist(pag, tp, agbp, &bno, 0);
2899 		if (error)
2900 			goto out_agbp_relse;
2901 
2902 		/* defer agfl frees */
2903 		error = xfs_defer_agfl_block(tp, args->agno, bno, &targs.oinfo);
2904 		if (error)
2905 			goto out_agbp_relse;
2906 	}
2907 
2908 	targs.tp = tp;
2909 	targs.mp = mp;
2910 	targs.agbp = agbp;
2911 	targs.agno = args->agno;
2912 	targs.alignment = targs.minlen = targs.prod = 1;
2913 	targs.pag = pag;
2914 	error = xfs_alloc_read_agfl(pag, tp, &agflbp);
2915 	if (error)
2916 		goto out_agbp_relse;
2917 
2918 	/* Make the freelist longer if it's too short. */
2919 	while (pag->pagf_flcount < need) {
2920 		targs.agbno = 0;
2921 		targs.maxlen = need - pag->pagf_flcount;
2922 		targs.resv = XFS_AG_RESV_AGFL;
2923 
2924 		/* Allocate as many blocks as possible at once. */
2925 		error = xfs_alloc_ag_vextent_size(&targs, alloc_flags);
2926 		if (error)
2927 			goto out_agflbp_relse;
2928 
2929 		/*
2930 		 * Stop if we run out.  Won't happen if callers are obeying
2931 		 * the restrictions correctly.  Can happen for free calls
2932 		 * on a completely full ag.
2933 		 */
2934 		if (targs.agbno == NULLAGBLOCK) {
2935 			if (alloc_flags & XFS_ALLOC_FLAG_FREEING)
2936 				break;
2937 			goto out_agflbp_relse;
2938 		}
2939 
2940 		if (!xfs_rmap_should_skip_owner_update(&targs.oinfo)) {
2941 			error = xfs_rmap_alloc(tp, agbp, pag,
2942 				       targs.agbno, targs.len, &targs.oinfo);
2943 			if (error)
2944 				goto out_agflbp_relse;
2945 		}
2946 		error = xfs_alloc_update_counters(tp, agbp,
2947 						  -((long)(targs.len)));
2948 		if (error)
2949 			goto out_agflbp_relse;
2950 
2951 		/*
2952 		 * Put each allocated block on the list.
2953 		 */
2954 		for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
2955 			error = xfs_alloc_put_freelist(pag, tp, agbp,
2956 							agflbp, bno, 0);
2957 			if (error)
2958 				goto out_agflbp_relse;
2959 		}
2960 	}
2961 	xfs_trans_brelse(tp, agflbp);
2962 	args->agbp = agbp;
2963 	return 0;
2964 
2965 out_agflbp_relse:
2966 	xfs_trans_brelse(tp, agflbp);
2967 out_agbp_relse:
2968 	if (agbp)
2969 		xfs_trans_brelse(tp, agbp);
2970 out_no_agbp:
2971 	args->agbp = NULL;
2972 	return error;
2973 }
2974 
2975 /*
2976  * Get a block from the freelist.
2977  * Returns with the buffer for the block gotten.
2978  */
2979 int
2980 xfs_alloc_get_freelist(
2981 	struct xfs_perag	*pag,
2982 	struct xfs_trans	*tp,
2983 	struct xfs_buf		*agbp,
2984 	xfs_agblock_t		*bnop,
2985 	int			btreeblk)
2986 {
2987 	struct xfs_agf		*agf = agbp->b_addr;
2988 	struct xfs_buf		*agflbp;
2989 	xfs_agblock_t		bno;
2990 	__be32			*agfl_bno;
2991 	int			error;
2992 	uint32_t		logflags;
2993 	struct xfs_mount	*mp = tp->t_mountp;
2994 
2995 	/*
2996 	 * Freelist is empty, give up.
2997 	 */
2998 	if (!agf->agf_flcount) {
2999 		*bnop = NULLAGBLOCK;
3000 		return 0;
3001 	}
3002 	/*
3003 	 * Read the array of free blocks.
3004 	 */
3005 	error = xfs_alloc_read_agfl(pag, tp, &agflbp);
3006 	if (error)
3007 		return error;
3008 
3009 
3010 	/*
3011 	 * Get the block number and update the data structures.
3012 	 */
3013 	agfl_bno = xfs_buf_to_agfl_bno(agflbp);
3014 	bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
3015 	if (XFS_IS_CORRUPT(tp->t_mountp, !xfs_verify_agbno(pag, bno)))
3016 		return -EFSCORRUPTED;
3017 
3018 	be32_add_cpu(&agf->agf_flfirst, 1);
3019 	xfs_trans_brelse(tp, agflbp);
3020 	if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
3021 		agf->agf_flfirst = 0;
3022 
3023 	ASSERT(!xfs_perag_agfl_needs_reset(pag));
3024 	be32_add_cpu(&agf->agf_flcount, -1);
3025 	pag->pagf_flcount--;
3026 
3027 	logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
3028 	if (btreeblk) {
3029 		be32_add_cpu(&agf->agf_btreeblks, 1);
3030 		pag->pagf_btreeblks++;
3031 		logflags |= XFS_AGF_BTREEBLKS;
3032 	}
3033 
3034 	xfs_alloc_log_agf(tp, agbp, logflags);
3035 	*bnop = bno;
3036 
3037 	return 0;
3038 }
3039 
3040 /*
3041  * Log the given fields from the agf structure.
3042  */
3043 void
3044 xfs_alloc_log_agf(
3045 	struct xfs_trans	*tp,
3046 	struct xfs_buf		*bp,
3047 	uint32_t		fields)
3048 {
3049 	int	first;		/* first byte offset */
3050 	int	last;		/* last byte offset */
3051 	static const short	offsets[] = {
3052 		offsetof(xfs_agf_t, agf_magicnum),
3053 		offsetof(xfs_agf_t, agf_versionnum),
3054 		offsetof(xfs_agf_t, agf_seqno),
3055 		offsetof(xfs_agf_t, agf_length),
3056 		offsetof(xfs_agf_t, agf_roots[0]),
3057 		offsetof(xfs_agf_t, agf_levels[0]),
3058 		offsetof(xfs_agf_t, agf_flfirst),
3059 		offsetof(xfs_agf_t, agf_fllast),
3060 		offsetof(xfs_agf_t, agf_flcount),
3061 		offsetof(xfs_agf_t, agf_freeblks),
3062 		offsetof(xfs_agf_t, agf_longest),
3063 		offsetof(xfs_agf_t, agf_btreeblks),
3064 		offsetof(xfs_agf_t, agf_uuid),
3065 		offsetof(xfs_agf_t, agf_rmap_blocks),
3066 		offsetof(xfs_agf_t, agf_refcount_blocks),
3067 		offsetof(xfs_agf_t, agf_refcount_root),
3068 		offsetof(xfs_agf_t, agf_refcount_level),
3069 		/* needed so that we don't log the whole rest of the structure: */
3070 		offsetof(xfs_agf_t, agf_spare64),
3071 		sizeof(xfs_agf_t)
3072 	};
3073 
3074 	trace_xfs_agf(tp->t_mountp, bp->b_addr, fields, _RET_IP_);
3075 
3076 	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
3077 
3078 	xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
3079 	xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
3080 }
3081 
3082 /*
3083  * Put the block on the freelist for the allocation group.
3084  */
3085 int
3086 xfs_alloc_put_freelist(
3087 	struct xfs_perag	*pag,
3088 	struct xfs_trans	*tp,
3089 	struct xfs_buf		*agbp,
3090 	struct xfs_buf		*agflbp,
3091 	xfs_agblock_t		bno,
3092 	int			btreeblk)
3093 {
3094 	struct xfs_mount	*mp = tp->t_mountp;
3095 	struct xfs_agf		*agf = agbp->b_addr;
3096 	__be32			*blockp;
3097 	int			error;
3098 	uint32_t		logflags;
3099 	__be32			*agfl_bno;
3100 	int			startoff;
3101 
3102 	if (!agflbp) {
3103 		error = xfs_alloc_read_agfl(pag, tp, &agflbp);
3104 		if (error)
3105 			return error;
3106 	}
3107 
3108 	be32_add_cpu(&agf->agf_fllast, 1);
3109 	if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
3110 		agf->agf_fllast = 0;
3111 
3112 	ASSERT(!xfs_perag_agfl_needs_reset(pag));
3113 	be32_add_cpu(&agf->agf_flcount, 1);
3114 	pag->pagf_flcount++;
3115 
3116 	logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
3117 	if (btreeblk) {
3118 		be32_add_cpu(&agf->agf_btreeblks, -1);
3119 		pag->pagf_btreeblks--;
3120 		logflags |= XFS_AGF_BTREEBLKS;
3121 	}
3122 
3123 	xfs_alloc_log_agf(tp, agbp, logflags);
3124 
3125 	ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
3126 
3127 	agfl_bno = xfs_buf_to_agfl_bno(agflbp);
3128 	blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
3129 	*blockp = cpu_to_be32(bno);
3130 	startoff = (char *)blockp - (char *)agflbp->b_addr;
3131 
3132 	xfs_alloc_log_agf(tp, agbp, logflags);
3133 
3134 	xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
3135 	xfs_trans_log_buf(tp, agflbp, startoff,
3136 			  startoff + sizeof(xfs_agblock_t) - 1);
3137 	return 0;
3138 }
3139 
3140 /*
3141  * Check that this AGF/AGI header's sequence number and length matches the AG
3142  * number and size in fsblocks.
3143  */
3144 xfs_failaddr_t
3145 xfs_validate_ag_length(
3146 	struct xfs_buf		*bp,
3147 	uint32_t		seqno,
3148 	uint32_t		length)
3149 {
3150 	struct xfs_mount	*mp = bp->b_mount;
3151 	/*
3152 	 * During growfs operations, the perag is not fully initialised,
3153 	 * so we can't use it for any useful checking. growfs ensures we can't
3154 	 * use it by using uncached buffers that don't have the perag attached
3155 	 * so we can detect and avoid this problem.
3156 	 */
3157 	if (bp->b_pag && seqno != bp->b_pag->pag_agno)
3158 		return __this_address;
3159 
3160 	/*
3161 	 * Only the last AG in the filesystem is allowed to be shorter
3162 	 * than the AG size recorded in the superblock.
3163 	 */
3164 	if (length != mp->m_sb.sb_agblocks) {
3165 		/*
3166 		 * During growfs, the new last AG can get here before we
3167 		 * have updated the superblock. Give it a pass on the seqno
3168 		 * check.
3169 		 */
3170 		if (bp->b_pag && seqno != mp->m_sb.sb_agcount - 1)
3171 			return __this_address;
3172 		if (length < XFS_MIN_AG_BLOCKS)
3173 			return __this_address;
3174 		if (length > mp->m_sb.sb_agblocks)
3175 			return __this_address;
3176 	}
3177 
3178 	return NULL;
3179 }
3180 
3181 /*
3182  * Verify the AGF is consistent.
3183  *
3184  * We do not verify the AGFL indexes in the AGF are fully consistent here
3185  * because of issues with variable on-disk structure sizes. Instead, we check
3186  * the agfl indexes for consistency when we initialise the perag from the AGF
3187  * information after a read completes.
3188  *
3189  * If the index is inconsistent, then we mark the perag as needing an AGFL
3190  * reset. The first AGFL update performed then resets the AGFL indexes and
3191  * refills the AGFL with known good free blocks, allowing the filesystem to
3192  * continue operating normally at the cost of a few leaked free space blocks.
3193  */
3194 static xfs_failaddr_t
3195 xfs_agf_verify(
3196 	struct xfs_buf		*bp)
3197 {
3198 	struct xfs_mount	*mp = bp->b_mount;
3199 	struct xfs_agf		*agf = bp->b_addr;
3200 	xfs_failaddr_t		fa;
3201 	uint32_t		agf_seqno = be32_to_cpu(agf->agf_seqno);
3202 	uint32_t		agf_length = be32_to_cpu(agf->agf_length);
3203 
3204 	if (xfs_has_crc(mp)) {
3205 		if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
3206 			return __this_address;
3207 		if (!xfs_log_check_lsn(mp, be64_to_cpu(agf->agf_lsn)))
3208 			return __this_address;
3209 	}
3210 
3211 	if (!xfs_verify_magic(bp, agf->agf_magicnum))
3212 		return __this_address;
3213 
3214 	if (!XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)))
3215 		return __this_address;
3216 
3217 	/*
3218 	 * Both agf_seqno and agf_length need to validated before anything else
3219 	 * block number related in the AGF or AGFL can be checked.
3220 	 */
3221 	fa = xfs_validate_ag_length(bp, agf_seqno, agf_length);
3222 	if (fa)
3223 		return fa;
3224 
3225 	if (be32_to_cpu(agf->agf_flfirst) >= xfs_agfl_size(mp))
3226 		return __this_address;
3227 	if (be32_to_cpu(agf->agf_fllast) >= xfs_agfl_size(mp))
3228 		return __this_address;
3229 	if (be32_to_cpu(agf->agf_flcount) > xfs_agfl_size(mp))
3230 		return __this_address;
3231 
3232 	if (be32_to_cpu(agf->agf_freeblks) < be32_to_cpu(agf->agf_longest) ||
3233 	    be32_to_cpu(agf->agf_freeblks) > agf_length)
3234 		return __this_address;
3235 
3236 	if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
3237 	    be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
3238 	    be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) >
3239 						mp->m_alloc_maxlevels ||
3240 	    be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) >
3241 						mp->m_alloc_maxlevels)
3242 		return __this_address;
3243 
3244 	if (xfs_has_lazysbcount(mp) &&
3245 	    be32_to_cpu(agf->agf_btreeblks) > agf_length)
3246 		return __this_address;
3247 
3248 	if (xfs_has_rmapbt(mp)) {
3249 		if (be32_to_cpu(agf->agf_rmap_blocks) > agf_length)
3250 			return __this_address;
3251 
3252 		if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
3253 		    be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) >
3254 							mp->m_rmap_maxlevels)
3255 			return __this_address;
3256 	}
3257 
3258 	if (xfs_has_reflink(mp)) {
3259 		if (be32_to_cpu(agf->agf_refcount_blocks) > agf_length)
3260 			return __this_address;
3261 
3262 		if (be32_to_cpu(agf->agf_refcount_level) < 1 ||
3263 		    be32_to_cpu(agf->agf_refcount_level) > mp->m_refc_maxlevels)
3264 			return __this_address;
3265 	}
3266 
3267 	return NULL;
3268 }
3269 
3270 static void
3271 xfs_agf_read_verify(
3272 	struct xfs_buf	*bp)
3273 {
3274 	struct xfs_mount *mp = bp->b_mount;
3275 	xfs_failaddr_t	fa;
3276 
3277 	if (xfs_has_crc(mp) &&
3278 	    !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
3279 		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
3280 	else {
3281 		fa = xfs_agf_verify(bp);
3282 		if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_ALLOC_READ_AGF))
3283 			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
3284 	}
3285 }
3286 
3287 static void
3288 xfs_agf_write_verify(
3289 	struct xfs_buf	*bp)
3290 {
3291 	struct xfs_mount	*mp = bp->b_mount;
3292 	struct xfs_buf_log_item	*bip = bp->b_log_item;
3293 	struct xfs_agf		*agf = bp->b_addr;
3294 	xfs_failaddr_t		fa;
3295 
3296 	fa = xfs_agf_verify(bp);
3297 	if (fa) {
3298 		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
3299 		return;
3300 	}
3301 
3302 	if (!xfs_has_crc(mp))
3303 		return;
3304 
3305 	if (bip)
3306 		agf->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
3307 
3308 	xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
3309 }
3310 
3311 const struct xfs_buf_ops xfs_agf_buf_ops = {
3312 	.name = "xfs_agf",
3313 	.magic = { cpu_to_be32(XFS_AGF_MAGIC), cpu_to_be32(XFS_AGF_MAGIC) },
3314 	.verify_read = xfs_agf_read_verify,
3315 	.verify_write = xfs_agf_write_verify,
3316 	.verify_struct = xfs_agf_verify,
3317 };
3318 
3319 /*
3320  * Read in the allocation group header (free/alloc section).
3321  */
3322 int
3323 xfs_read_agf(
3324 	struct xfs_perag	*pag,
3325 	struct xfs_trans	*tp,
3326 	int			flags,
3327 	struct xfs_buf		**agfbpp)
3328 {
3329 	struct xfs_mount	*mp = pag->pag_mount;
3330 	int			error;
3331 
3332 	trace_xfs_read_agf(pag->pag_mount, pag->pag_agno);
3333 
3334 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
3335 			XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGF_DADDR(mp)),
3336 			XFS_FSS_TO_BB(mp, 1), flags, agfbpp, &xfs_agf_buf_ops);
3337 	if (xfs_metadata_is_sick(error))
3338 		xfs_ag_mark_sick(pag, XFS_SICK_AG_AGF);
3339 	if (error)
3340 		return error;
3341 
3342 	xfs_buf_set_ref(*agfbpp, XFS_AGF_REF);
3343 	return 0;
3344 }
3345 
3346 /*
3347  * Read in the allocation group header (free/alloc section) and initialise the
3348  * perag structure if necessary. If the caller provides @agfbpp, then return the
3349  * locked buffer to the caller, otherwise free it.
3350  */
3351 int
3352 xfs_alloc_read_agf(
3353 	struct xfs_perag	*pag,
3354 	struct xfs_trans	*tp,
3355 	int			flags,
3356 	struct xfs_buf		**agfbpp)
3357 {
3358 	struct xfs_buf		*agfbp;
3359 	struct xfs_agf		*agf;
3360 	int			error;
3361 	int			allocbt_blks;
3362 
3363 	trace_xfs_alloc_read_agf(pag->pag_mount, pag->pag_agno);
3364 
3365 	/* We don't support trylock when freeing. */
3366 	ASSERT((flags & (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK)) !=
3367 			(XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK));
3368 	error = xfs_read_agf(pag, tp,
3369 			(flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
3370 			&agfbp);
3371 	if (error)
3372 		return error;
3373 
3374 	agf = agfbp->b_addr;
3375 	if (!xfs_perag_initialised_agf(pag)) {
3376 		pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
3377 		pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
3378 		pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
3379 		pag->pagf_longest = be32_to_cpu(agf->agf_longest);
3380 		pag->pagf_levels[XFS_BTNUM_BNOi] =
3381 			be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
3382 		pag->pagf_levels[XFS_BTNUM_CNTi] =
3383 			be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
3384 		pag->pagf_levels[XFS_BTNUM_RMAPi] =
3385 			be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
3386 		pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
3387 		if (xfs_agfl_needs_reset(pag->pag_mount, agf))
3388 			set_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
3389 		else
3390 			clear_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
3391 
3392 		/*
3393 		 * Update the in-core allocbt counter. Filter out the rmapbt
3394 		 * subset of the btreeblks counter because the rmapbt is managed
3395 		 * by perag reservation. Subtract one for the rmapbt root block
3396 		 * because the rmap counter includes it while the btreeblks
3397 		 * counter only tracks non-root blocks.
3398 		 */
3399 		allocbt_blks = pag->pagf_btreeblks;
3400 		if (xfs_has_rmapbt(pag->pag_mount))
3401 			allocbt_blks -= be32_to_cpu(agf->agf_rmap_blocks) - 1;
3402 		if (allocbt_blks > 0)
3403 			atomic64_add(allocbt_blks,
3404 					&pag->pag_mount->m_allocbt_blks);
3405 
3406 		set_bit(XFS_AGSTATE_AGF_INIT, &pag->pag_opstate);
3407 	}
3408 #ifdef DEBUG
3409 	else if (!xfs_is_shutdown(pag->pag_mount)) {
3410 		ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
3411 		ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
3412 		ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
3413 		ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
3414 		ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
3415 		       be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
3416 		ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
3417 		       be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
3418 	}
3419 #endif
3420 	if (agfbpp)
3421 		*agfbpp = agfbp;
3422 	else
3423 		xfs_trans_brelse(tp, agfbp);
3424 	return 0;
3425 }
3426 
3427 /*
3428  * Pre-proces allocation arguments to set initial state that we don't require
3429  * callers to set up correctly, as well as bounds check the allocation args
3430  * that are set up.
3431  */
3432 static int
3433 xfs_alloc_vextent_check_args(
3434 	struct xfs_alloc_arg	*args,
3435 	xfs_fsblock_t		target,
3436 	xfs_agnumber_t		*minimum_agno)
3437 {
3438 	struct xfs_mount	*mp = args->mp;
3439 	xfs_agblock_t		agsize;
3440 
3441 	args->fsbno = NULLFSBLOCK;
3442 
3443 	*minimum_agno = 0;
3444 	if (args->tp->t_highest_agno != NULLAGNUMBER)
3445 		*minimum_agno = args->tp->t_highest_agno;
3446 
3447 	/*
3448 	 * Just fix this up, for the case where the last a.g. is shorter
3449 	 * (or there's only one a.g.) and the caller couldn't easily figure
3450 	 * that out (xfs_bmap_alloc).
3451 	 */
3452 	agsize = mp->m_sb.sb_agblocks;
3453 	if (args->maxlen > agsize)
3454 		args->maxlen = agsize;
3455 	if (args->alignment == 0)
3456 		args->alignment = 1;
3457 
3458 	ASSERT(args->minlen > 0);
3459 	ASSERT(args->maxlen > 0);
3460 	ASSERT(args->alignment > 0);
3461 	ASSERT(args->resv != XFS_AG_RESV_AGFL);
3462 
3463 	ASSERT(XFS_FSB_TO_AGNO(mp, target) < mp->m_sb.sb_agcount);
3464 	ASSERT(XFS_FSB_TO_AGBNO(mp, target) < agsize);
3465 	ASSERT(args->minlen <= args->maxlen);
3466 	ASSERT(args->minlen <= agsize);
3467 	ASSERT(args->mod < args->prod);
3468 
3469 	if (XFS_FSB_TO_AGNO(mp, target) >= mp->m_sb.sb_agcount ||
3470 	    XFS_FSB_TO_AGBNO(mp, target) >= agsize ||
3471 	    args->minlen > args->maxlen || args->minlen > agsize ||
3472 	    args->mod >= args->prod) {
3473 		trace_xfs_alloc_vextent_badargs(args);
3474 		return -ENOSPC;
3475 	}
3476 
3477 	if (args->agno != NULLAGNUMBER && *minimum_agno > args->agno) {
3478 		trace_xfs_alloc_vextent_skip_deadlock(args);
3479 		return -ENOSPC;
3480 	}
3481 	return 0;
3482 
3483 }
3484 
3485 /*
3486  * Prepare an AG for allocation. If the AG is not prepared to accept the
3487  * allocation, return failure.
3488  *
3489  * XXX(dgc): The complexity of "need_pag" will go away as all caller paths are
3490  * modified to hold their own perag references.
3491  */
3492 static int
3493 xfs_alloc_vextent_prepare_ag(
3494 	struct xfs_alloc_arg	*args,
3495 	uint32_t		alloc_flags)
3496 {
3497 	bool			need_pag = !args->pag;
3498 	int			error;
3499 
3500 	if (need_pag)
3501 		args->pag = xfs_perag_get(args->mp, args->agno);
3502 
3503 	args->agbp = NULL;
3504 	error = xfs_alloc_fix_freelist(args, alloc_flags);
3505 	if (error) {
3506 		trace_xfs_alloc_vextent_nofix(args);
3507 		if (need_pag)
3508 			xfs_perag_put(args->pag);
3509 		args->agbno = NULLAGBLOCK;
3510 		return error;
3511 	}
3512 	if (!args->agbp) {
3513 		/* cannot allocate in this AG at all */
3514 		trace_xfs_alloc_vextent_noagbp(args);
3515 		args->agbno = NULLAGBLOCK;
3516 		return 0;
3517 	}
3518 	args->wasfromfl = 0;
3519 	return 0;
3520 }
3521 
3522 /*
3523  * Post-process allocation results to account for the allocation if it succeed
3524  * and set the allocated block number correctly for the caller.
3525  *
3526  * XXX: we should really be returning ENOSPC for ENOSPC, not
3527  * hiding it behind a "successful" NULLFSBLOCK allocation.
3528  */
3529 static int
3530 xfs_alloc_vextent_finish(
3531 	struct xfs_alloc_arg	*args,
3532 	xfs_agnumber_t		minimum_agno,
3533 	int			alloc_error,
3534 	bool			drop_perag)
3535 {
3536 	struct xfs_mount	*mp = args->mp;
3537 	int			error = 0;
3538 
3539 	/*
3540 	 * We can end up here with a locked AGF. If we failed, the caller is
3541 	 * likely going to try to allocate again with different parameters, and
3542 	 * that can widen the AGs that are searched for free space. If we have
3543 	 * to do BMBT block allocation, we have to do a new allocation.
3544 	 *
3545 	 * Hence leaving this function with the AGF locked opens up potential
3546 	 * ABBA AGF deadlocks because a future allocation attempt in this
3547 	 * transaction may attempt to lock a lower number AGF.
3548 	 *
3549 	 * We can't release the AGF until the transaction is commited, so at
3550 	 * this point we must update the "first allocation" tracker to point at
3551 	 * this AG if the tracker is empty or points to a lower AG. This allows
3552 	 * the next allocation attempt to be modified appropriately to avoid
3553 	 * deadlocks.
3554 	 */
3555 	if (args->agbp &&
3556 	    (args->tp->t_highest_agno == NULLAGNUMBER ||
3557 	     args->agno > minimum_agno))
3558 		args->tp->t_highest_agno = args->agno;
3559 
3560 	/*
3561 	 * If the allocation failed with an error or we had an ENOSPC result,
3562 	 * preserve the returned error whilst also marking the allocation result
3563 	 * as "no extent allocated". This ensures that callers that fail to
3564 	 * capture the error will still treat it as a failed allocation.
3565 	 */
3566 	if (alloc_error || args->agbno == NULLAGBLOCK) {
3567 		args->fsbno = NULLFSBLOCK;
3568 		error = alloc_error;
3569 		goto out_drop_perag;
3570 	}
3571 
3572 	args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
3573 
3574 	ASSERT(args->len >= args->minlen);
3575 	ASSERT(args->len <= args->maxlen);
3576 	ASSERT(args->agbno % args->alignment == 0);
3577 	XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno), args->len);
3578 
3579 	/* if not file data, insert new block into the reverse map btree */
3580 	if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
3581 		error = xfs_rmap_alloc(args->tp, args->agbp, args->pag,
3582 				       args->agbno, args->len, &args->oinfo);
3583 		if (error)
3584 			goto out_drop_perag;
3585 	}
3586 
3587 	if (!args->wasfromfl) {
3588 		error = xfs_alloc_update_counters(args->tp, args->agbp,
3589 						  -((long)(args->len)));
3590 		if (error)
3591 			goto out_drop_perag;
3592 
3593 		ASSERT(!xfs_extent_busy_search(mp, args->pag, args->agbno,
3594 				args->len));
3595 	}
3596 
3597 	xfs_ag_resv_alloc_extent(args->pag, args->resv, args);
3598 
3599 	XFS_STATS_INC(mp, xs_allocx);
3600 	XFS_STATS_ADD(mp, xs_allocb, args->len);
3601 
3602 	trace_xfs_alloc_vextent_finish(args);
3603 
3604 out_drop_perag:
3605 	if (drop_perag && args->pag) {
3606 		xfs_perag_rele(args->pag);
3607 		args->pag = NULL;
3608 	}
3609 	return error;
3610 }
3611 
3612 /*
3613  * Allocate within a single AG only. This uses a best-fit length algorithm so if
3614  * you need an exact sized allocation without locality constraints, this is the
3615  * fastest way to do it.
3616  *
3617  * Caller is expected to hold a perag reference in args->pag.
3618  */
3619 int
3620 xfs_alloc_vextent_this_ag(
3621 	struct xfs_alloc_arg	*args,
3622 	xfs_agnumber_t		agno)
3623 {
3624 	struct xfs_mount	*mp = args->mp;
3625 	xfs_agnumber_t		minimum_agno;
3626 	uint32_t		alloc_flags = 0;
3627 	int			error;
3628 
3629 	ASSERT(args->pag != NULL);
3630 	ASSERT(args->pag->pag_agno == agno);
3631 
3632 	args->agno = agno;
3633 	args->agbno = 0;
3634 
3635 	trace_xfs_alloc_vextent_this_ag(args);
3636 
3637 	error = xfs_alloc_vextent_check_args(args, XFS_AGB_TO_FSB(mp, agno, 0),
3638 			&minimum_agno);
3639 	if (error) {
3640 		if (error == -ENOSPC)
3641 			return 0;
3642 		return error;
3643 	}
3644 
3645 	error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
3646 	if (!error && args->agbp)
3647 		error = xfs_alloc_ag_vextent_size(args, alloc_flags);
3648 
3649 	return xfs_alloc_vextent_finish(args, minimum_agno, error, false);
3650 }
3651 
3652 /*
3653  * Iterate all AGs trying to allocate an extent starting from @start_ag.
3654  *
3655  * If the incoming allocation type is XFS_ALLOCTYPE_NEAR_BNO, it means the
3656  * allocation attempts in @start_agno have locality information. If we fail to
3657  * allocate in that AG, then we revert to anywhere-in-AG for all the other AGs
3658  * we attempt to allocation in as there is no locality optimisation possible for
3659  * those allocations.
3660  *
3661  * On return, args->pag may be left referenced if we finish before the "all
3662  * failed" return point. The allocation finish still needs the perag, and
3663  * so the caller will release it once they've finished the allocation.
3664  *
3665  * When we wrap the AG iteration at the end of the filesystem, we have to be
3666  * careful not to wrap into AGs below ones we already have locked in the
3667  * transaction if we are doing a blocking iteration. This will result in an
3668  * out-of-order locking of AGFs and hence can cause deadlocks.
3669  */
3670 static int
3671 xfs_alloc_vextent_iterate_ags(
3672 	struct xfs_alloc_arg	*args,
3673 	xfs_agnumber_t		minimum_agno,
3674 	xfs_agnumber_t		start_agno,
3675 	xfs_agblock_t		target_agbno,
3676 	uint32_t		alloc_flags)
3677 {
3678 	struct xfs_mount	*mp = args->mp;
3679 	xfs_agnumber_t		restart_agno = minimum_agno;
3680 	xfs_agnumber_t		agno;
3681 	int			error = 0;
3682 
3683 	if (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK)
3684 		restart_agno = 0;
3685 restart:
3686 	for_each_perag_wrap_range(mp, start_agno, restart_agno,
3687 			mp->m_sb.sb_agcount, agno, args->pag) {
3688 		args->agno = agno;
3689 		error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
3690 		if (error)
3691 			break;
3692 		if (!args->agbp) {
3693 			trace_xfs_alloc_vextent_loopfailed(args);
3694 			continue;
3695 		}
3696 
3697 		/*
3698 		 * Allocation is supposed to succeed now, so break out of the
3699 		 * loop regardless of whether we succeed or not.
3700 		 */
3701 		if (args->agno == start_agno && target_agbno) {
3702 			args->agbno = target_agbno;
3703 			error = xfs_alloc_ag_vextent_near(args, alloc_flags);
3704 		} else {
3705 			args->agbno = 0;
3706 			error = xfs_alloc_ag_vextent_size(args, alloc_flags);
3707 		}
3708 		break;
3709 	}
3710 	if (error) {
3711 		xfs_perag_rele(args->pag);
3712 		args->pag = NULL;
3713 		return error;
3714 	}
3715 	if (args->agbp)
3716 		return 0;
3717 
3718 	/*
3719 	 * We didn't find an AG we can alloation from. If we were given
3720 	 * constraining flags by the caller, drop them and retry the allocation
3721 	 * without any constraints being set.
3722 	 */
3723 	if (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK) {
3724 		alloc_flags &= ~XFS_ALLOC_FLAG_TRYLOCK;
3725 		restart_agno = minimum_agno;
3726 		goto restart;
3727 	}
3728 
3729 	ASSERT(args->pag == NULL);
3730 	trace_xfs_alloc_vextent_allfailed(args);
3731 	return 0;
3732 }
3733 
3734 /*
3735  * Iterate from the AGs from the start AG to the end of the filesystem, trying
3736  * to allocate blocks. It starts with a near allocation attempt in the initial
3737  * AG, then falls back to anywhere-in-ag after the first AG fails. It will wrap
3738  * back to zero if allowed by previous allocations in this transaction,
3739  * otherwise will wrap back to the start AG and run a second blocking pass to
3740  * the end of the filesystem.
3741  */
3742 int
3743 xfs_alloc_vextent_start_ag(
3744 	struct xfs_alloc_arg	*args,
3745 	xfs_fsblock_t		target)
3746 {
3747 	struct xfs_mount	*mp = args->mp;
3748 	xfs_agnumber_t		minimum_agno;
3749 	xfs_agnumber_t		start_agno;
3750 	xfs_agnumber_t		rotorstep = xfs_rotorstep;
3751 	bool			bump_rotor = false;
3752 	uint32_t		alloc_flags = XFS_ALLOC_FLAG_TRYLOCK;
3753 	int			error;
3754 
3755 	ASSERT(args->pag == NULL);
3756 
3757 	args->agno = NULLAGNUMBER;
3758 	args->agbno = NULLAGBLOCK;
3759 
3760 	trace_xfs_alloc_vextent_start_ag(args);
3761 
3762 	error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3763 	if (error) {
3764 		if (error == -ENOSPC)
3765 			return 0;
3766 		return error;
3767 	}
3768 
3769 	if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) &&
3770 	    xfs_is_inode32(mp)) {
3771 		target = XFS_AGB_TO_FSB(mp,
3772 				((mp->m_agfrotor / rotorstep) %
3773 				mp->m_sb.sb_agcount), 0);
3774 		bump_rotor = 1;
3775 	}
3776 
3777 	start_agno = max(minimum_agno, XFS_FSB_TO_AGNO(mp, target));
3778 	error = xfs_alloc_vextent_iterate_ags(args, minimum_agno, start_agno,
3779 			XFS_FSB_TO_AGBNO(mp, target), alloc_flags);
3780 
3781 	if (bump_rotor) {
3782 		if (args->agno == start_agno)
3783 			mp->m_agfrotor = (mp->m_agfrotor + 1) %
3784 				(mp->m_sb.sb_agcount * rotorstep);
3785 		else
3786 			mp->m_agfrotor = (args->agno * rotorstep + 1) %
3787 				(mp->m_sb.sb_agcount * rotorstep);
3788 	}
3789 
3790 	return xfs_alloc_vextent_finish(args, minimum_agno, error, true);
3791 }
3792 
3793 /*
3794  * Iterate from the agno indicated via @target through to the end of the
3795  * filesystem attempting blocking allocation. This does not wrap or try a second
3796  * pass, so will not recurse into AGs lower than indicated by the target.
3797  */
3798 int
3799 xfs_alloc_vextent_first_ag(
3800 	struct xfs_alloc_arg	*args,
3801 	xfs_fsblock_t		target)
3802  {
3803 	struct xfs_mount	*mp = args->mp;
3804 	xfs_agnumber_t		minimum_agno;
3805 	xfs_agnumber_t		start_agno;
3806 	uint32_t		alloc_flags = XFS_ALLOC_FLAG_TRYLOCK;
3807 	int			error;
3808 
3809 	ASSERT(args->pag == NULL);
3810 
3811 	args->agno = NULLAGNUMBER;
3812 	args->agbno = NULLAGBLOCK;
3813 
3814 	trace_xfs_alloc_vextent_first_ag(args);
3815 
3816 	error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3817 	if (error) {
3818 		if (error == -ENOSPC)
3819 			return 0;
3820 		return error;
3821 	}
3822 
3823 	start_agno = max(minimum_agno, XFS_FSB_TO_AGNO(mp, target));
3824 	error = xfs_alloc_vextent_iterate_ags(args, minimum_agno, start_agno,
3825 			XFS_FSB_TO_AGBNO(mp, target), alloc_flags);
3826 	return xfs_alloc_vextent_finish(args, minimum_agno, error, true);
3827 }
3828 
3829 /*
3830  * Allocate at the exact block target or fail. Caller is expected to hold a
3831  * perag reference in args->pag.
3832  */
3833 int
3834 xfs_alloc_vextent_exact_bno(
3835 	struct xfs_alloc_arg	*args,
3836 	xfs_fsblock_t		target)
3837 {
3838 	struct xfs_mount	*mp = args->mp;
3839 	xfs_agnumber_t		minimum_agno;
3840 	int			error;
3841 
3842 	ASSERT(args->pag != NULL);
3843 	ASSERT(args->pag->pag_agno == XFS_FSB_TO_AGNO(mp, target));
3844 
3845 	args->agno = XFS_FSB_TO_AGNO(mp, target);
3846 	args->agbno = XFS_FSB_TO_AGBNO(mp, target);
3847 
3848 	trace_xfs_alloc_vextent_exact_bno(args);
3849 
3850 	error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3851 	if (error) {
3852 		if (error == -ENOSPC)
3853 			return 0;
3854 		return error;
3855 	}
3856 
3857 	error = xfs_alloc_vextent_prepare_ag(args, 0);
3858 	if (!error && args->agbp)
3859 		error = xfs_alloc_ag_vextent_exact(args);
3860 
3861 	return xfs_alloc_vextent_finish(args, minimum_agno, error, false);
3862 }
3863 
3864 /*
3865  * Allocate an extent as close to the target as possible. If there are not
3866  * viable candidates in the AG, then fail the allocation.
3867  *
3868  * Caller may or may not have a per-ag reference in args->pag.
3869  */
3870 int
3871 xfs_alloc_vextent_near_bno(
3872 	struct xfs_alloc_arg	*args,
3873 	xfs_fsblock_t		target)
3874 {
3875 	struct xfs_mount	*mp = args->mp;
3876 	xfs_agnumber_t		minimum_agno;
3877 	bool			needs_perag = args->pag == NULL;
3878 	uint32_t		alloc_flags = 0;
3879 	int			error;
3880 
3881 	if (!needs_perag)
3882 		ASSERT(args->pag->pag_agno == XFS_FSB_TO_AGNO(mp, target));
3883 
3884 	args->agno = XFS_FSB_TO_AGNO(mp, target);
3885 	args->agbno = XFS_FSB_TO_AGBNO(mp, target);
3886 
3887 	trace_xfs_alloc_vextent_near_bno(args);
3888 
3889 	error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3890 	if (error) {
3891 		if (error == -ENOSPC)
3892 			return 0;
3893 		return error;
3894 	}
3895 
3896 	if (needs_perag)
3897 		args->pag = xfs_perag_grab(mp, args->agno);
3898 
3899 	error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
3900 	if (!error && args->agbp)
3901 		error = xfs_alloc_ag_vextent_near(args, alloc_flags);
3902 
3903 	return xfs_alloc_vextent_finish(args, minimum_agno, error, needs_perag);
3904 }
3905 
3906 /* Ensure that the freelist is at full capacity. */
3907 int
3908 xfs_free_extent_fix_freelist(
3909 	struct xfs_trans	*tp,
3910 	struct xfs_perag	*pag,
3911 	struct xfs_buf		**agbp)
3912 {
3913 	struct xfs_alloc_arg	args;
3914 	int			error;
3915 
3916 	memset(&args, 0, sizeof(struct xfs_alloc_arg));
3917 	args.tp = tp;
3918 	args.mp = tp->t_mountp;
3919 	args.agno = pag->pag_agno;
3920 	args.pag = pag;
3921 
3922 	/*
3923 	 * validate that the block number is legal - the enables us to detect
3924 	 * and handle a silent filesystem corruption rather than crashing.
3925 	 */
3926 	if (args.agno >= args.mp->m_sb.sb_agcount)
3927 		return -EFSCORRUPTED;
3928 
3929 	error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
3930 	if (error)
3931 		return error;
3932 
3933 	*agbp = args.agbp;
3934 	return 0;
3935 }
3936 
3937 /*
3938  * Free an extent.
3939  * Just break up the extent address and hand off to xfs_free_ag_extent
3940  * after fixing up the freelist.
3941  */
3942 int
3943 __xfs_free_extent(
3944 	struct xfs_trans		*tp,
3945 	struct xfs_perag		*pag,
3946 	xfs_agblock_t			agbno,
3947 	xfs_extlen_t			len,
3948 	const struct xfs_owner_info	*oinfo,
3949 	enum xfs_ag_resv_type		type,
3950 	bool				skip_discard)
3951 {
3952 	struct xfs_mount		*mp = tp->t_mountp;
3953 	struct xfs_buf			*agbp;
3954 	struct xfs_agf			*agf;
3955 	int				error;
3956 	unsigned int			busy_flags = 0;
3957 
3958 	ASSERT(len != 0);
3959 	ASSERT(type != XFS_AG_RESV_AGFL);
3960 
3961 	if (XFS_TEST_ERROR(false, mp,
3962 			XFS_ERRTAG_FREE_EXTENT))
3963 		return -EIO;
3964 
3965 	error = xfs_free_extent_fix_freelist(tp, pag, &agbp);
3966 	if (error) {
3967 		if (xfs_metadata_is_sick(error))
3968 			xfs_ag_mark_sick(pag, XFS_SICK_AG_BNOBT);
3969 		return error;
3970 	}
3971 
3972 	agf = agbp->b_addr;
3973 
3974 	if (XFS_IS_CORRUPT(mp, agbno >= mp->m_sb.sb_agblocks)) {
3975 		xfs_ag_mark_sick(pag, XFS_SICK_AG_BNOBT);
3976 		error = -EFSCORRUPTED;
3977 		goto err_release;
3978 	}
3979 
3980 	/* validate the extent size is legal now we have the agf locked */
3981 	if (XFS_IS_CORRUPT(mp, agbno + len > be32_to_cpu(agf->agf_length))) {
3982 		xfs_ag_mark_sick(pag, XFS_SICK_AG_BNOBT);
3983 		error = -EFSCORRUPTED;
3984 		goto err_release;
3985 	}
3986 
3987 	error = xfs_free_ag_extent(tp, agbp, pag->pag_agno, agbno, len, oinfo,
3988 			type);
3989 	if (error)
3990 		goto err_release;
3991 
3992 	if (skip_discard)
3993 		busy_flags |= XFS_EXTENT_BUSY_SKIP_DISCARD;
3994 	xfs_extent_busy_insert(tp, pag, agbno, len, busy_flags);
3995 	return 0;
3996 
3997 err_release:
3998 	xfs_trans_brelse(tp, agbp);
3999 	return error;
4000 }
4001 
4002 struct xfs_alloc_query_range_info {
4003 	xfs_alloc_query_range_fn	fn;
4004 	void				*priv;
4005 };
4006 
4007 /* Format btree record and pass to our callback. */
4008 STATIC int
4009 xfs_alloc_query_range_helper(
4010 	struct xfs_btree_cur		*cur,
4011 	const union xfs_btree_rec	*rec,
4012 	void				*priv)
4013 {
4014 	struct xfs_alloc_query_range_info	*query = priv;
4015 	struct xfs_alloc_rec_incore		irec;
4016 	xfs_failaddr_t				fa;
4017 
4018 	xfs_alloc_btrec_to_irec(rec, &irec);
4019 	fa = xfs_alloc_check_irec(cur->bc_ag.pag, &irec);
4020 	if (fa)
4021 		return xfs_alloc_complain_bad_rec(cur, fa, &irec);
4022 
4023 	return query->fn(cur, &irec, query->priv);
4024 }
4025 
4026 /* Find all free space within a given range of blocks. */
4027 int
4028 xfs_alloc_query_range(
4029 	struct xfs_btree_cur			*cur,
4030 	const struct xfs_alloc_rec_incore	*low_rec,
4031 	const struct xfs_alloc_rec_incore	*high_rec,
4032 	xfs_alloc_query_range_fn		fn,
4033 	void					*priv)
4034 {
4035 	union xfs_btree_irec			low_brec = { .a = *low_rec };
4036 	union xfs_btree_irec			high_brec = { .a = *high_rec };
4037 	struct xfs_alloc_query_range_info	query = { .priv = priv, .fn = fn };
4038 
4039 	ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
4040 	return xfs_btree_query_range(cur, &low_brec, &high_brec,
4041 			xfs_alloc_query_range_helper, &query);
4042 }
4043 
4044 /* Find all free space records. */
4045 int
4046 xfs_alloc_query_all(
4047 	struct xfs_btree_cur			*cur,
4048 	xfs_alloc_query_range_fn		fn,
4049 	void					*priv)
4050 {
4051 	struct xfs_alloc_query_range_info	query;
4052 
4053 	ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
4054 	query.priv = priv;
4055 	query.fn = fn;
4056 	return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
4057 }
4058 
4059 /*
4060  * Scan part of the keyspace of the free space and tell us if the area has no
4061  * records, is fully mapped by records, or is partially filled.
4062  */
4063 int
4064 xfs_alloc_has_records(
4065 	struct xfs_btree_cur	*cur,
4066 	xfs_agblock_t		bno,
4067 	xfs_extlen_t		len,
4068 	enum xbtree_recpacking	*outcome)
4069 {
4070 	union xfs_btree_irec	low;
4071 	union xfs_btree_irec	high;
4072 
4073 	memset(&low, 0, sizeof(low));
4074 	low.a.ar_startblock = bno;
4075 	memset(&high, 0xFF, sizeof(high));
4076 	high.a.ar_startblock = bno + len - 1;
4077 
4078 	return xfs_btree_has_records(cur, &low, &high, NULL, outcome);
4079 }
4080 
4081 /*
4082  * Walk all the blocks in the AGFL.  The @walk_fn can return any negative
4083  * error code or XFS_ITER_*.
4084  */
4085 int
4086 xfs_agfl_walk(
4087 	struct xfs_mount	*mp,
4088 	struct xfs_agf		*agf,
4089 	struct xfs_buf		*agflbp,
4090 	xfs_agfl_walk_fn	walk_fn,
4091 	void			*priv)
4092 {
4093 	__be32			*agfl_bno;
4094 	unsigned int		i;
4095 	int			error;
4096 
4097 	agfl_bno = xfs_buf_to_agfl_bno(agflbp);
4098 	i = be32_to_cpu(agf->agf_flfirst);
4099 
4100 	/* Nothing to walk in an empty AGFL. */
4101 	if (agf->agf_flcount == cpu_to_be32(0))
4102 		return 0;
4103 
4104 	/* Otherwise, walk from first to last, wrapping as needed. */
4105 	for (;;) {
4106 		error = walk_fn(mp, be32_to_cpu(agfl_bno[i]), priv);
4107 		if (error)
4108 			return error;
4109 		if (i == be32_to_cpu(agf->agf_fllast))
4110 			break;
4111 		if (++i == xfs_agfl_size(mp))
4112 			i = 0;
4113 	}
4114 
4115 	return 0;
4116 }
4117 
4118 int __init
4119 xfs_extfree_intent_init_cache(void)
4120 {
4121 	xfs_extfree_item_cache = kmem_cache_create("xfs_extfree_intent",
4122 			sizeof(struct xfs_extent_free_item),
4123 			0, 0, NULL);
4124 
4125 	return xfs_extfree_item_cache != NULL ? 0 : -ENOMEM;
4126 }
4127 
4128 void
4129 xfs_extfree_intent_destroy_cache(void)
4130 {
4131 	kmem_cache_destroy(xfs_extfree_item_cache);
4132 	xfs_extfree_item_cache = NULL;
4133 }
4134