xref: /linux/fs/xfs/libxfs/xfs_alloc.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_shared.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_mount.h"
14 #include "xfs_defer.h"
15 #include "xfs_btree.h"
16 #include "xfs_rmap.h"
17 #include "xfs_alloc_btree.h"
18 #include "xfs_alloc.h"
19 #include "xfs_extent_busy.h"
20 #include "xfs_errortag.h"
21 #include "xfs_error.h"
22 #include "xfs_trace.h"
23 #include "xfs_trans.h"
24 #include "xfs_buf_item.h"
25 #include "xfs_log.h"
26 #include "xfs_ag.h"
27 #include "xfs_ag_resv.h"
28 #include "xfs_bmap.h"
29 #include "xfs_health.h"
30 #include "xfs_extfree_item.h"
31 
32 struct kmem_cache	*xfs_extfree_item_cache;
33 
34 struct workqueue_struct *xfs_alloc_wq;
35 
36 #define XFS_ABSDIFF(a,b)	(((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
37 
38 #define	XFSA_FIXUP_BNO_OK	1
39 #define	XFSA_FIXUP_CNT_OK	2
40 
41 /*
42  * Size of the AGFL.  For CRC-enabled filesystes we steal a couple of slots in
43  * the beginning of the block for a proper header with the location information
44  * and CRC.
45  */
46 unsigned int
xfs_agfl_size(struct xfs_mount * mp)47 xfs_agfl_size(
48 	struct xfs_mount	*mp)
49 {
50 	unsigned int		size = mp->m_sb.sb_sectsize;
51 
52 	if (xfs_has_crc(mp))
53 		size -= sizeof(struct xfs_agfl);
54 
55 	return size / sizeof(xfs_agblock_t);
56 }
57 
58 unsigned int
xfs_refc_block(struct xfs_mount * mp)59 xfs_refc_block(
60 	struct xfs_mount	*mp)
61 {
62 	if (xfs_has_rmapbt(mp))
63 		return XFS_RMAP_BLOCK(mp) + 1;
64 	if (xfs_has_finobt(mp))
65 		return XFS_FIBT_BLOCK(mp) + 1;
66 	return XFS_IBT_BLOCK(mp) + 1;
67 }
68 
69 xfs_extlen_t
xfs_prealloc_blocks(struct xfs_mount * mp)70 xfs_prealloc_blocks(
71 	struct xfs_mount	*mp)
72 {
73 	if (xfs_has_reflink(mp))
74 		return xfs_refc_block(mp) + 1;
75 	if (xfs_has_rmapbt(mp))
76 		return XFS_RMAP_BLOCK(mp) + 1;
77 	if (xfs_has_finobt(mp))
78 		return XFS_FIBT_BLOCK(mp) + 1;
79 	return XFS_IBT_BLOCK(mp) + 1;
80 }
81 
82 /*
83  * The number of blocks per AG that we withhold from xfs_dec_fdblocks to
84  * guarantee that we can refill the AGFL prior to allocating space in a nearly
85  * full AG.  Although the space described by the free space btrees, the
86  * blocks used by the freesp btrees themselves, and the blocks owned by the
87  * AGFL are counted in the ondisk fdblocks, it's a mistake to let the ondisk
88  * free space in the AG drop so low that the free space btrees cannot refill an
89  * empty AGFL up to the minimum level.  Rather than grind through empty AGs
90  * until the fs goes down, we subtract this many AG blocks from the incore
91  * fdblocks to ensure user allocation does not overcommit the space the
92  * filesystem needs for the AGFLs.  The rmap btree uses a per-AG reservation to
93  * withhold space from xfs_dec_fdblocks, so we do not account for that here.
94  */
95 #define XFS_ALLOCBT_AGFL_RESERVE	4
96 
97 /*
98  * Compute the number of blocks that we set aside to guarantee the ability to
99  * refill the AGFL and handle a full bmap btree split.
100  *
101  * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
102  * AGF buffer (PV 947395), we place constraints on the relationship among
103  * actual allocations for data blocks, freelist blocks, and potential file data
104  * bmap btree blocks. However, these restrictions may result in no actual space
105  * allocated for a delayed extent, for example, a data block in a certain AG is
106  * allocated but there is no additional block for the additional bmap btree
107  * block due to a split of the bmap btree of the file. The result of this may
108  * lead to an infinite loop when the file gets flushed to disk and all delayed
109  * extents need to be actually allocated. To get around this, we explicitly set
110  * aside a few blocks which will not be reserved in delayed allocation.
111  *
112  * For each AG, we need to reserve enough blocks to replenish a totally empty
113  * AGFL and 4 more to handle a potential split of the file's bmap btree.
114  */
115 unsigned int
xfs_alloc_set_aside(struct xfs_mount * mp)116 xfs_alloc_set_aside(
117 	struct xfs_mount	*mp)
118 {
119 	return mp->m_sb.sb_agcount * (XFS_ALLOCBT_AGFL_RESERVE + 4);
120 }
121 
122 /*
123  * When deciding how much space to allocate out of an AG, we limit the
124  * allocation maximum size to the size the AG. However, we cannot use all the
125  * blocks in the AG - some are permanently used by metadata. These
126  * blocks are generally:
127  *	- the AG superblock, AGF, AGI and AGFL
128  *	- the AGF (bno and cnt) and AGI btree root blocks, and optionally
129  *	  the AGI free inode and rmap btree root blocks.
130  *	- blocks on the AGFL according to xfs_alloc_set_aside() limits
131  *	- the rmapbt root block
132  *
133  * The AG headers are sector sized, so the amount of space they take up is
134  * dependent on filesystem geometry. The others are all single blocks.
135  */
136 unsigned int
xfs_alloc_ag_max_usable(struct xfs_mount * mp)137 xfs_alloc_ag_max_usable(
138 	struct xfs_mount	*mp)
139 {
140 	unsigned int		blocks;
141 
142 	blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
143 	blocks += XFS_ALLOCBT_AGFL_RESERVE;
144 	blocks += 3;			/* AGF, AGI btree root blocks */
145 	if (xfs_has_finobt(mp))
146 		blocks++;		/* finobt root block */
147 	if (xfs_has_rmapbt(mp))
148 		blocks++;		/* rmap root block */
149 	if (xfs_has_reflink(mp))
150 		blocks++;		/* refcount root block */
151 
152 	return mp->m_sb.sb_agblocks - blocks;
153 }
154 
155 
156 static int
xfs_alloc_lookup(struct xfs_btree_cur * cur,xfs_lookup_t dir,xfs_agblock_t bno,xfs_extlen_t len,int * stat)157 xfs_alloc_lookup(
158 	struct xfs_btree_cur	*cur,
159 	xfs_lookup_t		dir,
160 	xfs_agblock_t		bno,
161 	xfs_extlen_t		len,
162 	int			*stat)
163 {
164 	int			error;
165 
166 	cur->bc_rec.a.ar_startblock = bno;
167 	cur->bc_rec.a.ar_blockcount = len;
168 	error = xfs_btree_lookup(cur, dir, stat);
169 	if (*stat == 1)
170 		cur->bc_flags |= XFS_BTREE_ALLOCBT_ACTIVE;
171 	else
172 		cur->bc_flags &= ~XFS_BTREE_ALLOCBT_ACTIVE;
173 	return error;
174 }
175 
176 /*
177  * Lookup the record equal to [bno, len] in the btree given by cur.
178  */
179 static inline int				/* error */
xfs_alloc_lookup_eq(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,int * stat)180 xfs_alloc_lookup_eq(
181 	struct xfs_btree_cur	*cur,	/* btree cursor */
182 	xfs_agblock_t		bno,	/* starting block of extent */
183 	xfs_extlen_t		len,	/* length of extent */
184 	int			*stat)	/* success/failure */
185 {
186 	return xfs_alloc_lookup(cur, XFS_LOOKUP_EQ, bno, len, stat);
187 }
188 
189 /*
190  * Lookup the first record greater than or equal to [bno, len]
191  * in the btree given by cur.
192  */
193 int				/* error */
xfs_alloc_lookup_ge(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,int * stat)194 xfs_alloc_lookup_ge(
195 	struct xfs_btree_cur	*cur,	/* btree cursor */
196 	xfs_agblock_t		bno,	/* starting block of extent */
197 	xfs_extlen_t		len,	/* length of extent */
198 	int			*stat)	/* success/failure */
199 {
200 	return xfs_alloc_lookup(cur, XFS_LOOKUP_GE, bno, len, stat);
201 }
202 
203 /*
204  * Lookup the first record less than or equal to [bno, len]
205  * in the btree given by cur.
206  */
207 int					/* error */
xfs_alloc_lookup_le(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,int * stat)208 xfs_alloc_lookup_le(
209 	struct xfs_btree_cur	*cur,	/* btree cursor */
210 	xfs_agblock_t		bno,	/* starting block of extent */
211 	xfs_extlen_t		len,	/* length of extent */
212 	int			*stat)	/* success/failure */
213 {
214 	return xfs_alloc_lookup(cur, XFS_LOOKUP_LE, bno, len, stat);
215 }
216 
217 static inline bool
xfs_alloc_cur_active(struct xfs_btree_cur * cur)218 xfs_alloc_cur_active(
219 	struct xfs_btree_cur	*cur)
220 {
221 	return cur && (cur->bc_flags & XFS_BTREE_ALLOCBT_ACTIVE);
222 }
223 
224 /*
225  * Update the record referred to by cur to the value given
226  * by [bno, len].
227  * This either works (return 0) or gets an EFSCORRUPTED error.
228  */
229 STATIC int				/* error */
xfs_alloc_update(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len)230 xfs_alloc_update(
231 	struct xfs_btree_cur	*cur,	/* btree cursor */
232 	xfs_agblock_t		bno,	/* starting block of extent */
233 	xfs_extlen_t		len)	/* length of extent */
234 {
235 	union xfs_btree_rec	rec;
236 
237 	rec.alloc.ar_startblock = cpu_to_be32(bno);
238 	rec.alloc.ar_blockcount = cpu_to_be32(len);
239 	return xfs_btree_update(cur, &rec);
240 }
241 
242 /* Convert the ondisk btree record to its incore representation. */
243 void
xfs_alloc_btrec_to_irec(const union xfs_btree_rec * rec,struct xfs_alloc_rec_incore * irec)244 xfs_alloc_btrec_to_irec(
245 	const union xfs_btree_rec	*rec,
246 	struct xfs_alloc_rec_incore	*irec)
247 {
248 	irec->ar_startblock = be32_to_cpu(rec->alloc.ar_startblock);
249 	irec->ar_blockcount = be32_to_cpu(rec->alloc.ar_blockcount);
250 }
251 
252 /* Simple checks for free space records. */
253 xfs_failaddr_t
xfs_alloc_check_irec(struct xfs_perag * pag,const struct xfs_alloc_rec_incore * irec)254 xfs_alloc_check_irec(
255 	struct xfs_perag			*pag,
256 	const struct xfs_alloc_rec_incore	*irec)
257 {
258 	if (irec->ar_blockcount == 0)
259 		return __this_address;
260 
261 	/* check for valid extent range, including overflow */
262 	if (!xfs_verify_agbext(pag, irec->ar_startblock, irec->ar_blockcount))
263 		return __this_address;
264 
265 	return NULL;
266 }
267 
268 static inline int
xfs_alloc_complain_bad_rec(struct xfs_btree_cur * cur,xfs_failaddr_t fa,const struct xfs_alloc_rec_incore * irec)269 xfs_alloc_complain_bad_rec(
270 	struct xfs_btree_cur		*cur,
271 	xfs_failaddr_t			fa,
272 	const struct xfs_alloc_rec_incore *irec)
273 {
274 	struct xfs_mount		*mp = cur->bc_mp;
275 
276 	xfs_warn(mp,
277 		"%sbt record corruption in AG %d detected at %pS!",
278 		cur->bc_ops->name, cur->bc_ag.pag->pag_agno, fa);
279 	xfs_warn(mp,
280 		"start block 0x%x block count 0x%x", irec->ar_startblock,
281 		irec->ar_blockcount);
282 	xfs_btree_mark_sick(cur);
283 	return -EFSCORRUPTED;
284 }
285 
286 /*
287  * Get the data from the pointed-to record.
288  */
289 int					/* error */
xfs_alloc_get_rec(struct xfs_btree_cur * cur,xfs_agblock_t * bno,xfs_extlen_t * len,int * stat)290 xfs_alloc_get_rec(
291 	struct xfs_btree_cur	*cur,	/* btree cursor */
292 	xfs_agblock_t		*bno,	/* output: starting block of extent */
293 	xfs_extlen_t		*len,	/* output: length of extent */
294 	int			*stat)	/* output: success/failure */
295 {
296 	struct xfs_alloc_rec_incore irec;
297 	union xfs_btree_rec	*rec;
298 	xfs_failaddr_t		fa;
299 	int			error;
300 
301 	error = xfs_btree_get_rec(cur, &rec, stat);
302 	if (error || !(*stat))
303 		return error;
304 
305 	xfs_alloc_btrec_to_irec(rec, &irec);
306 	fa = xfs_alloc_check_irec(cur->bc_ag.pag, &irec);
307 	if (fa)
308 		return xfs_alloc_complain_bad_rec(cur, fa, &irec);
309 
310 	*bno = irec.ar_startblock;
311 	*len = irec.ar_blockcount;
312 	return 0;
313 }
314 
315 /*
316  * Compute aligned version of the found extent.
317  * Takes alignment and min length into account.
318  */
319 STATIC bool
xfs_alloc_compute_aligned(xfs_alloc_arg_t * args,xfs_agblock_t foundbno,xfs_extlen_t foundlen,xfs_agblock_t * resbno,xfs_extlen_t * reslen,unsigned * busy_gen)320 xfs_alloc_compute_aligned(
321 	xfs_alloc_arg_t	*args,		/* allocation argument structure */
322 	xfs_agblock_t	foundbno,	/* starting block in found extent */
323 	xfs_extlen_t	foundlen,	/* length in found extent */
324 	xfs_agblock_t	*resbno,	/* result block number */
325 	xfs_extlen_t	*reslen,	/* result length */
326 	unsigned	*busy_gen)
327 {
328 	xfs_agblock_t	bno = foundbno;
329 	xfs_extlen_t	len = foundlen;
330 	xfs_extlen_t	diff;
331 	bool		busy;
332 
333 	/* Trim busy sections out of found extent */
334 	busy = xfs_extent_busy_trim(args, &bno, &len, busy_gen);
335 
336 	/*
337 	 * If we have a largish extent that happens to start before min_agbno,
338 	 * see if we can shift it into range...
339 	 */
340 	if (bno < args->min_agbno && bno + len > args->min_agbno) {
341 		diff = args->min_agbno - bno;
342 		if (len > diff) {
343 			bno += diff;
344 			len -= diff;
345 		}
346 	}
347 
348 	if (args->alignment > 1 && len >= args->minlen) {
349 		xfs_agblock_t	aligned_bno = roundup(bno, args->alignment);
350 
351 		diff = aligned_bno - bno;
352 
353 		*resbno = aligned_bno;
354 		*reslen = diff >= len ? 0 : len - diff;
355 	} else {
356 		*resbno = bno;
357 		*reslen = len;
358 	}
359 
360 	return busy;
361 }
362 
363 /*
364  * Compute best start block and diff for "near" allocations.
365  * freelen >= wantlen already checked by caller.
366  */
367 STATIC xfs_extlen_t			/* difference value (absolute) */
xfs_alloc_compute_diff(xfs_agblock_t wantbno,xfs_extlen_t wantlen,xfs_extlen_t alignment,int datatype,xfs_agblock_t freebno,xfs_extlen_t freelen,xfs_agblock_t * newbnop)368 xfs_alloc_compute_diff(
369 	xfs_agblock_t	wantbno,	/* target starting block */
370 	xfs_extlen_t	wantlen,	/* target length */
371 	xfs_extlen_t	alignment,	/* target alignment */
372 	int		datatype,	/* are we allocating data? */
373 	xfs_agblock_t	freebno,	/* freespace's starting block */
374 	xfs_extlen_t	freelen,	/* freespace's length */
375 	xfs_agblock_t	*newbnop)	/* result: best start block from free */
376 {
377 	xfs_agblock_t	freeend;	/* end of freespace extent */
378 	xfs_agblock_t	newbno1;	/* return block number */
379 	xfs_agblock_t	newbno2;	/* other new block number */
380 	xfs_extlen_t	newlen1=0;	/* length with newbno1 */
381 	xfs_extlen_t	newlen2=0;	/* length with newbno2 */
382 	xfs_agblock_t	wantend;	/* end of target extent */
383 	bool		userdata = datatype & XFS_ALLOC_USERDATA;
384 
385 	ASSERT(freelen >= wantlen);
386 	freeend = freebno + freelen;
387 	wantend = wantbno + wantlen;
388 	/*
389 	 * We want to allocate from the start of a free extent if it is past
390 	 * the desired block or if we are allocating user data and the free
391 	 * extent is before desired block. The second case is there to allow
392 	 * for contiguous allocation from the remaining free space if the file
393 	 * grows in the short term.
394 	 */
395 	if (freebno >= wantbno || (userdata && freeend < wantend)) {
396 		if ((newbno1 = roundup(freebno, alignment)) >= freeend)
397 			newbno1 = NULLAGBLOCK;
398 	} else if (freeend >= wantend && alignment > 1) {
399 		newbno1 = roundup(wantbno, alignment);
400 		newbno2 = newbno1 - alignment;
401 		if (newbno1 >= freeend)
402 			newbno1 = NULLAGBLOCK;
403 		else
404 			newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
405 		if (newbno2 < freebno)
406 			newbno2 = NULLAGBLOCK;
407 		else
408 			newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
409 		if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
410 			if (newlen1 < newlen2 ||
411 			    (newlen1 == newlen2 &&
412 			     XFS_ABSDIFF(newbno1, wantbno) >
413 			     XFS_ABSDIFF(newbno2, wantbno)))
414 				newbno1 = newbno2;
415 		} else if (newbno2 != NULLAGBLOCK)
416 			newbno1 = newbno2;
417 	} else if (freeend >= wantend) {
418 		newbno1 = wantbno;
419 	} else if (alignment > 1) {
420 		newbno1 = roundup(freeend - wantlen, alignment);
421 		if (newbno1 > freeend - wantlen &&
422 		    newbno1 - alignment >= freebno)
423 			newbno1 -= alignment;
424 		else if (newbno1 >= freeend)
425 			newbno1 = NULLAGBLOCK;
426 	} else
427 		newbno1 = freeend - wantlen;
428 	*newbnop = newbno1;
429 	return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
430 }
431 
432 /*
433  * Fix up the length, based on mod and prod.
434  * len should be k * prod + mod for some k.
435  * If len is too small it is returned unchanged.
436  * If len hits maxlen it is left alone.
437  */
438 STATIC void
xfs_alloc_fix_len(xfs_alloc_arg_t * args)439 xfs_alloc_fix_len(
440 	xfs_alloc_arg_t	*args)		/* allocation argument structure */
441 {
442 	xfs_extlen_t	k;
443 	xfs_extlen_t	rlen;
444 
445 	ASSERT(args->mod < args->prod);
446 	rlen = args->len;
447 	ASSERT(rlen >= args->minlen);
448 	ASSERT(rlen <= args->maxlen);
449 	if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
450 	    (args->mod == 0 && rlen < args->prod))
451 		return;
452 	k = rlen % args->prod;
453 	if (k == args->mod)
454 		return;
455 	if (k > args->mod)
456 		rlen = rlen - (k - args->mod);
457 	else
458 		rlen = rlen - args->prod + (args->mod - k);
459 	/* casts to (int) catch length underflows */
460 	if ((int)rlen < (int)args->minlen)
461 		return;
462 	ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
463 	ASSERT(rlen % args->prod == args->mod);
464 	ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
465 		rlen + args->minleft);
466 	args->len = rlen;
467 }
468 
469 /*
470  * Determine if the cursor points to the block that contains the right-most
471  * block of records in the by-count btree. This block contains the largest
472  * contiguous free extent in the AG, so if we modify a record in this block we
473  * need to call xfs_alloc_fixup_longest() once the modifications are done to
474  * ensure the agf->agf_longest field is kept up to date with the longest free
475  * extent tracked by the by-count btree.
476  */
477 static bool
xfs_alloc_cursor_at_lastrec(struct xfs_btree_cur * cnt_cur)478 xfs_alloc_cursor_at_lastrec(
479 	struct xfs_btree_cur	*cnt_cur)
480 {
481 	struct xfs_btree_block	*block;
482 	union xfs_btree_ptr	ptr;
483 	struct xfs_buf		*bp;
484 
485 	block = xfs_btree_get_block(cnt_cur, 0, &bp);
486 
487 	xfs_btree_get_sibling(cnt_cur, block, &ptr, XFS_BB_RIGHTSIB);
488 	return xfs_btree_ptr_is_null(cnt_cur, &ptr);
489 }
490 
491 /*
492  * Find the rightmost record of the cntbt, and return the longest free space
493  * recorded in it. Simply set both the block number and the length to their
494  * maximum values before searching.
495  */
496 static int
xfs_cntbt_longest(struct xfs_btree_cur * cnt_cur,xfs_extlen_t * longest)497 xfs_cntbt_longest(
498 	struct xfs_btree_cur	*cnt_cur,
499 	xfs_extlen_t		*longest)
500 {
501 	struct xfs_alloc_rec_incore irec;
502 	union xfs_btree_rec	    *rec;
503 	int			    stat = 0;
504 	int			    error;
505 
506 	memset(&cnt_cur->bc_rec, 0xFF, sizeof(cnt_cur->bc_rec));
507 	error = xfs_btree_lookup(cnt_cur, XFS_LOOKUP_LE, &stat);
508 	if (error)
509 		return error;
510 	if (!stat) {
511 		/* totally empty tree */
512 		*longest = 0;
513 		return 0;
514 	}
515 
516 	error = xfs_btree_get_rec(cnt_cur, &rec, &stat);
517 	if (error)
518 		return error;
519 	if (XFS_IS_CORRUPT(cnt_cur->bc_mp, !stat)) {
520 		xfs_btree_mark_sick(cnt_cur);
521 		return -EFSCORRUPTED;
522 	}
523 
524 	xfs_alloc_btrec_to_irec(rec, &irec);
525 	*longest = irec.ar_blockcount;
526 	return 0;
527 }
528 
529 /*
530  * Update the longest contiguous free extent in the AG from the by-count cursor
531  * that is passed to us. This should be done at the end of any allocation or
532  * freeing operation that touches the longest extent in the btree.
533  *
534  * Needing to update the longest extent can be determined by calling
535  * xfs_alloc_cursor_at_lastrec() after the cursor is positioned for record
536  * modification but before the modification begins.
537  */
538 static int
xfs_alloc_fixup_longest(struct xfs_btree_cur * cnt_cur)539 xfs_alloc_fixup_longest(
540 	struct xfs_btree_cur	*cnt_cur)
541 {
542 	struct xfs_perag	*pag = cnt_cur->bc_ag.pag;
543 	struct xfs_buf		*bp = cnt_cur->bc_ag.agbp;
544 	struct xfs_agf		*agf = bp->b_addr;
545 	xfs_extlen_t		longest = 0;
546 	int			error;
547 
548 	/* Lookup last rec in order to update AGF. */
549 	error = xfs_cntbt_longest(cnt_cur, &longest);
550 	if (error)
551 		return error;
552 
553 	pag->pagf_longest = longest;
554 	agf->agf_longest = cpu_to_be32(pag->pagf_longest);
555 	xfs_alloc_log_agf(cnt_cur->bc_tp, bp, XFS_AGF_LONGEST);
556 
557 	return 0;
558 }
559 
560 /*
561  * Update the two btrees, logically removing from freespace the extent
562  * starting at rbno, rlen blocks.  The extent is contained within the
563  * actual (current) free extent fbno for flen blocks.
564  * Flags are passed in indicating whether the cursors are set to the
565  * relevant records.
566  */
567 STATIC int				/* error code */
xfs_alloc_fixup_trees(struct xfs_btree_cur * cnt_cur,struct xfs_btree_cur * bno_cur,xfs_agblock_t fbno,xfs_extlen_t flen,xfs_agblock_t rbno,xfs_extlen_t rlen,int flags)568 xfs_alloc_fixup_trees(
569 	struct xfs_btree_cur *cnt_cur,	/* cursor for by-size btree */
570 	struct xfs_btree_cur *bno_cur,	/* cursor for by-block btree */
571 	xfs_agblock_t	fbno,		/* starting block of free extent */
572 	xfs_extlen_t	flen,		/* length of free extent */
573 	xfs_agblock_t	rbno,		/* starting block of returned extent */
574 	xfs_extlen_t	rlen,		/* length of returned extent */
575 	int		flags)		/* flags, XFSA_FIXUP_... */
576 {
577 	int		error;		/* error code */
578 	int		i;		/* operation results */
579 	xfs_agblock_t	nfbno1;		/* first new free startblock */
580 	xfs_agblock_t	nfbno2;		/* second new free startblock */
581 	xfs_extlen_t	nflen1=0;	/* first new free length */
582 	xfs_extlen_t	nflen2=0;	/* second new free length */
583 	struct xfs_mount *mp;
584 	bool		fixup_longest = false;
585 
586 	mp = cnt_cur->bc_mp;
587 
588 	/*
589 	 * Look up the record in the by-size tree if necessary.
590 	 */
591 	if (flags & XFSA_FIXUP_CNT_OK) {
592 #ifdef DEBUG
593 		if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
594 			return error;
595 		if (XFS_IS_CORRUPT(mp,
596 				   i != 1 ||
597 				   nfbno1 != fbno ||
598 				   nflen1 != flen)) {
599 			xfs_btree_mark_sick(cnt_cur);
600 			return -EFSCORRUPTED;
601 		}
602 #endif
603 	} else {
604 		if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
605 			return error;
606 		if (XFS_IS_CORRUPT(mp, i != 1)) {
607 			xfs_btree_mark_sick(cnt_cur);
608 			return -EFSCORRUPTED;
609 		}
610 	}
611 	/*
612 	 * Look up the record in the by-block tree if necessary.
613 	 */
614 	if (flags & XFSA_FIXUP_BNO_OK) {
615 #ifdef DEBUG
616 		if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
617 			return error;
618 		if (XFS_IS_CORRUPT(mp,
619 				   i != 1 ||
620 				   nfbno1 != fbno ||
621 				   nflen1 != flen)) {
622 			xfs_btree_mark_sick(bno_cur);
623 			return -EFSCORRUPTED;
624 		}
625 #endif
626 	} else {
627 		if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
628 			return error;
629 		if (XFS_IS_CORRUPT(mp, i != 1)) {
630 			xfs_btree_mark_sick(bno_cur);
631 			return -EFSCORRUPTED;
632 		}
633 	}
634 
635 #ifdef DEBUG
636 	if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
637 		struct xfs_btree_block	*bnoblock;
638 		struct xfs_btree_block	*cntblock;
639 
640 		bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_levels[0].bp);
641 		cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_levels[0].bp);
642 
643 		if (XFS_IS_CORRUPT(mp,
644 				   bnoblock->bb_numrecs !=
645 				   cntblock->bb_numrecs)) {
646 			xfs_btree_mark_sick(bno_cur);
647 			return -EFSCORRUPTED;
648 		}
649 	}
650 #endif
651 
652 	/*
653 	 * Deal with all four cases: the allocated record is contained
654 	 * within the freespace record, so we can have new freespace
655 	 * at either (or both) end, or no freespace remaining.
656 	 */
657 	if (rbno == fbno && rlen == flen)
658 		nfbno1 = nfbno2 = NULLAGBLOCK;
659 	else if (rbno == fbno) {
660 		nfbno1 = rbno + rlen;
661 		nflen1 = flen - rlen;
662 		nfbno2 = NULLAGBLOCK;
663 	} else if (rbno + rlen == fbno + flen) {
664 		nfbno1 = fbno;
665 		nflen1 = flen - rlen;
666 		nfbno2 = NULLAGBLOCK;
667 	} else {
668 		nfbno1 = fbno;
669 		nflen1 = rbno - fbno;
670 		nfbno2 = rbno + rlen;
671 		nflen2 = (fbno + flen) - nfbno2;
672 	}
673 
674 	if (xfs_alloc_cursor_at_lastrec(cnt_cur))
675 		fixup_longest = true;
676 
677 	/*
678 	 * Delete the entry from the by-size btree.
679 	 */
680 	if ((error = xfs_btree_delete(cnt_cur, &i)))
681 		return error;
682 	if (XFS_IS_CORRUPT(mp, i != 1)) {
683 		xfs_btree_mark_sick(cnt_cur);
684 		return -EFSCORRUPTED;
685 	}
686 	/*
687 	 * Add new by-size btree entry(s).
688 	 */
689 	if (nfbno1 != NULLAGBLOCK) {
690 		if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
691 			return error;
692 		if (XFS_IS_CORRUPT(mp, i != 0)) {
693 			xfs_btree_mark_sick(cnt_cur);
694 			return -EFSCORRUPTED;
695 		}
696 		if ((error = xfs_btree_insert(cnt_cur, &i)))
697 			return error;
698 		if (XFS_IS_CORRUPT(mp, i != 1)) {
699 			xfs_btree_mark_sick(cnt_cur);
700 			return -EFSCORRUPTED;
701 		}
702 	}
703 	if (nfbno2 != NULLAGBLOCK) {
704 		if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
705 			return error;
706 		if (XFS_IS_CORRUPT(mp, i != 0)) {
707 			xfs_btree_mark_sick(cnt_cur);
708 			return -EFSCORRUPTED;
709 		}
710 		if ((error = xfs_btree_insert(cnt_cur, &i)))
711 			return error;
712 		if (XFS_IS_CORRUPT(mp, i != 1)) {
713 			xfs_btree_mark_sick(cnt_cur);
714 			return -EFSCORRUPTED;
715 		}
716 	}
717 	/*
718 	 * Fix up the by-block btree entry(s).
719 	 */
720 	if (nfbno1 == NULLAGBLOCK) {
721 		/*
722 		 * No remaining freespace, just delete the by-block tree entry.
723 		 */
724 		if ((error = xfs_btree_delete(bno_cur, &i)))
725 			return error;
726 		if (XFS_IS_CORRUPT(mp, i != 1)) {
727 			xfs_btree_mark_sick(bno_cur);
728 			return -EFSCORRUPTED;
729 		}
730 	} else {
731 		/*
732 		 * Update the by-block entry to start later|be shorter.
733 		 */
734 		if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
735 			return error;
736 	}
737 	if (nfbno2 != NULLAGBLOCK) {
738 		/*
739 		 * 2 resulting free entries, need to add one.
740 		 */
741 		if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
742 			return error;
743 		if (XFS_IS_CORRUPT(mp, i != 0)) {
744 			xfs_btree_mark_sick(bno_cur);
745 			return -EFSCORRUPTED;
746 		}
747 		if ((error = xfs_btree_insert(bno_cur, &i)))
748 			return error;
749 		if (XFS_IS_CORRUPT(mp, i != 1)) {
750 			xfs_btree_mark_sick(bno_cur);
751 			return -EFSCORRUPTED;
752 		}
753 	}
754 
755 	if (fixup_longest)
756 		return xfs_alloc_fixup_longest(cnt_cur);
757 
758 	return 0;
759 }
760 
761 /*
762  * We do not verify the AGFL contents against AGF-based index counters here,
763  * even though we may have access to the perag that contains shadow copies. We
764  * don't know if the AGF based counters have been checked, and if they have they
765  * still may be inconsistent because they haven't yet been reset on the first
766  * allocation after the AGF has been read in.
767  *
768  * This means we can only check that all agfl entries contain valid or null
769  * values because we can't reliably determine the active range to exclude
770  * NULLAGBNO as a valid value.
771  *
772  * However, we can't even do that for v4 format filesystems because there are
773  * old versions of mkfs out there that does not initialise the AGFL to known,
774  * verifiable values. HEnce we can't tell the difference between a AGFL block
775  * allocated by mkfs and a corrupted AGFL block here on v4 filesystems.
776  *
777  * As a result, we can only fully validate AGFL block numbers when we pull them
778  * from the freelist in xfs_alloc_get_freelist().
779  */
780 static xfs_failaddr_t
xfs_agfl_verify(struct xfs_buf * bp)781 xfs_agfl_verify(
782 	struct xfs_buf	*bp)
783 {
784 	struct xfs_mount *mp = bp->b_mount;
785 	struct xfs_agfl	*agfl = XFS_BUF_TO_AGFL(bp);
786 	__be32		*agfl_bno = xfs_buf_to_agfl_bno(bp);
787 	int		i;
788 
789 	if (!xfs_has_crc(mp))
790 		return NULL;
791 
792 	if (!xfs_verify_magic(bp, agfl->agfl_magicnum))
793 		return __this_address;
794 	if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
795 		return __this_address;
796 	/*
797 	 * during growfs operations, the perag is not fully initialised,
798 	 * so we can't use it for any useful checking. growfs ensures we can't
799 	 * use it by using uncached buffers that don't have the perag attached
800 	 * so we can detect and avoid this problem.
801 	 */
802 	if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
803 		return __this_address;
804 
805 	for (i = 0; i < xfs_agfl_size(mp); i++) {
806 		if (be32_to_cpu(agfl_bno[i]) != NULLAGBLOCK &&
807 		    be32_to_cpu(agfl_bno[i]) >= mp->m_sb.sb_agblocks)
808 			return __this_address;
809 	}
810 
811 	if (!xfs_log_check_lsn(mp, be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn)))
812 		return __this_address;
813 	return NULL;
814 }
815 
816 static void
xfs_agfl_read_verify(struct xfs_buf * bp)817 xfs_agfl_read_verify(
818 	struct xfs_buf	*bp)
819 {
820 	struct xfs_mount *mp = bp->b_mount;
821 	xfs_failaddr_t	fa;
822 
823 	/*
824 	 * There is no verification of non-crc AGFLs because mkfs does not
825 	 * initialise the AGFL to zero or NULL. Hence the only valid part of the
826 	 * AGFL is what the AGF says is active. We can't get to the AGF, so we
827 	 * can't verify just those entries are valid.
828 	 */
829 	if (!xfs_has_crc(mp))
830 		return;
831 
832 	if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
833 		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
834 	else {
835 		fa = xfs_agfl_verify(bp);
836 		if (fa)
837 			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
838 	}
839 }
840 
841 static void
xfs_agfl_write_verify(struct xfs_buf * bp)842 xfs_agfl_write_verify(
843 	struct xfs_buf	*bp)
844 {
845 	struct xfs_mount	*mp = bp->b_mount;
846 	struct xfs_buf_log_item	*bip = bp->b_log_item;
847 	xfs_failaddr_t		fa;
848 
849 	/* no verification of non-crc AGFLs */
850 	if (!xfs_has_crc(mp))
851 		return;
852 
853 	fa = xfs_agfl_verify(bp);
854 	if (fa) {
855 		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
856 		return;
857 	}
858 
859 	if (bip)
860 		XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
861 
862 	xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
863 }
864 
865 const struct xfs_buf_ops xfs_agfl_buf_ops = {
866 	.name = "xfs_agfl",
867 	.magic = { cpu_to_be32(XFS_AGFL_MAGIC), cpu_to_be32(XFS_AGFL_MAGIC) },
868 	.verify_read = xfs_agfl_read_verify,
869 	.verify_write = xfs_agfl_write_verify,
870 	.verify_struct = xfs_agfl_verify,
871 };
872 
873 /*
874  * Read in the allocation group free block array.
875  */
876 int
xfs_alloc_read_agfl(struct xfs_perag * pag,struct xfs_trans * tp,struct xfs_buf ** bpp)877 xfs_alloc_read_agfl(
878 	struct xfs_perag	*pag,
879 	struct xfs_trans	*tp,
880 	struct xfs_buf		**bpp)
881 {
882 	struct xfs_mount	*mp = pag->pag_mount;
883 	struct xfs_buf		*bp;
884 	int			error;
885 
886 	error = xfs_trans_read_buf(
887 			mp, tp, mp->m_ddev_targp,
888 			XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGFL_DADDR(mp)),
889 			XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
890 	if (xfs_metadata_is_sick(error))
891 		xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL);
892 	if (error)
893 		return error;
894 	xfs_buf_set_ref(bp, XFS_AGFL_REF);
895 	*bpp = bp;
896 	return 0;
897 }
898 
899 STATIC int
xfs_alloc_update_counters(struct xfs_trans * tp,struct xfs_buf * agbp,long len)900 xfs_alloc_update_counters(
901 	struct xfs_trans	*tp,
902 	struct xfs_buf		*agbp,
903 	long			len)
904 {
905 	struct xfs_agf		*agf = agbp->b_addr;
906 
907 	agbp->b_pag->pagf_freeblks += len;
908 	be32_add_cpu(&agf->agf_freeblks, len);
909 
910 	if (unlikely(be32_to_cpu(agf->agf_freeblks) >
911 		     be32_to_cpu(agf->agf_length))) {
912 		xfs_buf_mark_corrupt(agbp);
913 		xfs_ag_mark_sick(agbp->b_pag, XFS_SICK_AG_AGF);
914 		return -EFSCORRUPTED;
915 	}
916 
917 	xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
918 	return 0;
919 }
920 
921 /*
922  * Block allocation algorithm and data structures.
923  */
924 struct xfs_alloc_cur {
925 	struct xfs_btree_cur		*cnt;	/* btree cursors */
926 	struct xfs_btree_cur		*bnolt;
927 	struct xfs_btree_cur		*bnogt;
928 	xfs_extlen_t			cur_len;/* current search length */
929 	xfs_agblock_t			rec_bno;/* extent startblock */
930 	xfs_extlen_t			rec_len;/* extent length */
931 	xfs_agblock_t			bno;	/* alloc bno */
932 	xfs_extlen_t			len;	/* alloc len */
933 	xfs_extlen_t			diff;	/* diff from search bno */
934 	unsigned int			busy_gen;/* busy state */
935 	bool				busy;
936 };
937 
938 /*
939  * Set up cursors, etc. in the extent allocation cursor. This function can be
940  * called multiple times to reset an initialized structure without having to
941  * reallocate cursors.
942  */
943 static int
xfs_alloc_cur_setup(struct xfs_alloc_arg * args,struct xfs_alloc_cur * acur)944 xfs_alloc_cur_setup(
945 	struct xfs_alloc_arg	*args,
946 	struct xfs_alloc_cur	*acur)
947 {
948 	int			error;
949 	int			i;
950 
951 	acur->cur_len = args->maxlen;
952 	acur->rec_bno = 0;
953 	acur->rec_len = 0;
954 	acur->bno = 0;
955 	acur->len = 0;
956 	acur->diff = -1;
957 	acur->busy = false;
958 	acur->busy_gen = 0;
959 
960 	/*
961 	 * Perform an initial cntbt lookup to check for availability of maxlen
962 	 * extents. If this fails, we'll return -ENOSPC to signal the caller to
963 	 * attempt a small allocation.
964 	 */
965 	if (!acur->cnt)
966 		acur->cnt = xfs_cntbt_init_cursor(args->mp, args->tp,
967 					args->agbp, args->pag);
968 	error = xfs_alloc_lookup_ge(acur->cnt, 0, args->maxlen, &i);
969 	if (error)
970 		return error;
971 
972 	/*
973 	 * Allocate the bnobt left and right search cursors.
974 	 */
975 	if (!acur->bnolt)
976 		acur->bnolt = xfs_bnobt_init_cursor(args->mp, args->tp,
977 					args->agbp, args->pag);
978 	if (!acur->bnogt)
979 		acur->bnogt = xfs_bnobt_init_cursor(args->mp, args->tp,
980 					args->agbp, args->pag);
981 	return i == 1 ? 0 : -ENOSPC;
982 }
983 
984 static void
xfs_alloc_cur_close(struct xfs_alloc_cur * acur,bool error)985 xfs_alloc_cur_close(
986 	struct xfs_alloc_cur	*acur,
987 	bool			error)
988 {
989 	int			cur_error = XFS_BTREE_NOERROR;
990 
991 	if (error)
992 		cur_error = XFS_BTREE_ERROR;
993 
994 	if (acur->cnt)
995 		xfs_btree_del_cursor(acur->cnt, cur_error);
996 	if (acur->bnolt)
997 		xfs_btree_del_cursor(acur->bnolt, cur_error);
998 	if (acur->bnogt)
999 		xfs_btree_del_cursor(acur->bnogt, cur_error);
1000 	acur->cnt = acur->bnolt = acur->bnogt = NULL;
1001 }
1002 
1003 /*
1004  * Check an extent for allocation and track the best available candidate in the
1005  * allocation structure. The cursor is deactivated if it has entered an out of
1006  * range state based on allocation arguments. Optionally return the extent
1007  * extent geometry and allocation status if requested by the caller.
1008  */
1009 static int
xfs_alloc_cur_check(struct xfs_alloc_arg * args,struct xfs_alloc_cur * acur,struct xfs_btree_cur * cur,int * new)1010 xfs_alloc_cur_check(
1011 	struct xfs_alloc_arg	*args,
1012 	struct xfs_alloc_cur	*acur,
1013 	struct xfs_btree_cur	*cur,
1014 	int			*new)
1015 {
1016 	int			error, i;
1017 	xfs_agblock_t		bno, bnoa, bnew;
1018 	xfs_extlen_t		len, lena, diff = -1;
1019 	bool			busy;
1020 	unsigned		busy_gen = 0;
1021 	bool			deactivate = false;
1022 	bool			isbnobt = xfs_btree_is_bno(cur->bc_ops);
1023 
1024 	*new = 0;
1025 
1026 	error = xfs_alloc_get_rec(cur, &bno, &len, &i);
1027 	if (error)
1028 		return error;
1029 	if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1030 		xfs_btree_mark_sick(cur);
1031 		return -EFSCORRUPTED;
1032 	}
1033 
1034 	/*
1035 	 * Check minlen and deactivate a cntbt cursor if out of acceptable size
1036 	 * range (i.e., walking backwards looking for a minlen extent).
1037 	 */
1038 	if (len < args->minlen) {
1039 		deactivate = !isbnobt;
1040 		goto out;
1041 	}
1042 
1043 	busy = xfs_alloc_compute_aligned(args, bno, len, &bnoa, &lena,
1044 					 &busy_gen);
1045 	acur->busy |= busy;
1046 	if (busy)
1047 		acur->busy_gen = busy_gen;
1048 	/* deactivate a bnobt cursor outside of locality range */
1049 	if (bnoa < args->min_agbno || bnoa > args->max_agbno) {
1050 		deactivate = isbnobt;
1051 		goto out;
1052 	}
1053 	if (lena < args->minlen)
1054 		goto out;
1055 
1056 	args->len = XFS_EXTLEN_MIN(lena, args->maxlen);
1057 	xfs_alloc_fix_len(args);
1058 	ASSERT(args->len >= args->minlen);
1059 	if (args->len < acur->len)
1060 		goto out;
1061 
1062 	/*
1063 	 * We have an aligned record that satisfies minlen and beats or matches
1064 	 * the candidate extent size. Compare locality for near allocation mode.
1065 	 */
1066 	diff = xfs_alloc_compute_diff(args->agbno, args->len,
1067 				      args->alignment, args->datatype,
1068 				      bnoa, lena, &bnew);
1069 	if (bnew == NULLAGBLOCK)
1070 		goto out;
1071 
1072 	/*
1073 	 * Deactivate a bnobt cursor with worse locality than the current best.
1074 	 */
1075 	if (diff > acur->diff) {
1076 		deactivate = isbnobt;
1077 		goto out;
1078 	}
1079 
1080 	ASSERT(args->len > acur->len ||
1081 	       (args->len == acur->len && diff <= acur->diff));
1082 	acur->rec_bno = bno;
1083 	acur->rec_len = len;
1084 	acur->bno = bnew;
1085 	acur->len = args->len;
1086 	acur->diff = diff;
1087 	*new = 1;
1088 
1089 	/*
1090 	 * We're done if we found a perfect allocation. This only deactivates
1091 	 * the current cursor, but this is just an optimization to terminate a
1092 	 * cntbt search that otherwise runs to the edge of the tree.
1093 	 */
1094 	if (acur->diff == 0 && acur->len == args->maxlen)
1095 		deactivate = true;
1096 out:
1097 	if (deactivate)
1098 		cur->bc_flags &= ~XFS_BTREE_ALLOCBT_ACTIVE;
1099 	trace_xfs_alloc_cur_check(cur, bno, len, diff, *new);
1100 	return 0;
1101 }
1102 
1103 /*
1104  * Complete an allocation of a candidate extent. Remove the extent from both
1105  * trees and update the args structure.
1106  */
1107 STATIC int
xfs_alloc_cur_finish(struct xfs_alloc_arg * args,struct xfs_alloc_cur * acur)1108 xfs_alloc_cur_finish(
1109 	struct xfs_alloc_arg	*args,
1110 	struct xfs_alloc_cur	*acur)
1111 {
1112 	int			error;
1113 
1114 	ASSERT(acur->cnt && acur->bnolt);
1115 	ASSERT(acur->bno >= acur->rec_bno);
1116 	ASSERT(acur->bno + acur->len <= acur->rec_bno + acur->rec_len);
1117 	ASSERT(xfs_verify_agbext(args->pag, acur->rec_bno, acur->rec_len));
1118 
1119 	error = xfs_alloc_fixup_trees(acur->cnt, acur->bnolt, acur->rec_bno,
1120 				      acur->rec_len, acur->bno, acur->len, 0);
1121 	if (error)
1122 		return error;
1123 
1124 	args->agbno = acur->bno;
1125 	args->len = acur->len;
1126 	args->wasfromfl = 0;
1127 
1128 	trace_xfs_alloc_cur(args);
1129 	return 0;
1130 }
1131 
1132 /*
1133  * Locality allocation lookup algorithm. This expects a cntbt cursor and uses
1134  * bno optimized lookup to search for extents with ideal size and locality.
1135  */
1136 STATIC int
xfs_alloc_cntbt_iter(struct xfs_alloc_arg * args,struct xfs_alloc_cur * acur)1137 xfs_alloc_cntbt_iter(
1138 	struct xfs_alloc_arg		*args,
1139 	struct xfs_alloc_cur		*acur)
1140 {
1141 	struct xfs_btree_cur	*cur = acur->cnt;
1142 	xfs_agblock_t		bno;
1143 	xfs_extlen_t		len, cur_len;
1144 	int			error;
1145 	int			i;
1146 
1147 	if (!xfs_alloc_cur_active(cur))
1148 		return 0;
1149 
1150 	/* locality optimized lookup */
1151 	cur_len = acur->cur_len;
1152 	error = xfs_alloc_lookup_ge(cur, args->agbno, cur_len, &i);
1153 	if (error)
1154 		return error;
1155 	if (i == 0)
1156 		return 0;
1157 	error = xfs_alloc_get_rec(cur, &bno, &len, &i);
1158 	if (error)
1159 		return error;
1160 
1161 	/* check the current record and update search length from it */
1162 	error = xfs_alloc_cur_check(args, acur, cur, &i);
1163 	if (error)
1164 		return error;
1165 	ASSERT(len >= acur->cur_len);
1166 	acur->cur_len = len;
1167 
1168 	/*
1169 	 * We looked up the first record >= [agbno, len] above. The agbno is a
1170 	 * secondary key and so the current record may lie just before or after
1171 	 * agbno. If it is past agbno, check the previous record too so long as
1172 	 * the length matches as it may be closer. Don't check a smaller record
1173 	 * because that could deactivate our cursor.
1174 	 */
1175 	if (bno > args->agbno) {
1176 		error = xfs_btree_decrement(cur, 0, &i);
1177 		if (!error && i) {
1178 			error = xfs_alloc_get_rec(cur, &bno, &len, &i);
1179 			if (!error && i && len == acur->cur_len)
1180 				error = xfs_alloc_cur_check(args, acur, cur,
1181 							    &i);
1182 		}
1183 		if (error)
1184 			return error;
1185 	}
1186 
1187 	/*
1188 	 * Increment the search key until we find at least one allocation
1189 	 * candidate or if the extent we found was larger. Otherwise, double the
1190 	 * search key to optimize the search. Efficiency is more important here
1191 	 * than absolute best locality.
1192 	 */
1193 	cur_len <<= 1;
1194 	if (!acur->len || acur->cur_len >= cur_len)
1195 		acur->cur_len++;
1196 	else
1197 		acur->cur_len = cur_len;
1198 
1199 	return error;
1200 }
1201 
1202 /*
1203  * Deal with the case where only small freespaces remain. Either return the
1204  * contents of the last freespace record, or allocate space from the freelist if
1205  * there is nothing in the tree.
1206  */
1207 STATIC int			/* error */
xfs_alloc_ag_vextent_small(struct xfs_alloc_arg * args,struct xfs_btree_cur * ccur,xfs_agblock_t * fbnop,xfs_extlen_t * flenp,int * stat)1208 xfs_alloc_ag_vextent_small(
1209 	struct xfs_alloc_arg	*args,	/* allocation argument structure */
1210 	struct xfs_btree_cur	*ccur,	/* optional by-size cursor */
1211 	xfs_agblock_t		*fbnop,	/* result block number */
1212 	xfs_extlen_t		*flenp,	/* result length */
1213 	int			*stat)	/* status: 0-freelist, 1-normal/none */
1214 {
1215 	struct xfs_agf		*agf = args->agbp->b_addr;
1216 	int			error = 0;
1217 	xfs_agblock_t		fbno = NULLAGBLOCK;
1218 	xfs_extlen_t		flen = 0;
1219 	int			i = 0;
1220 
1221 	/*
1222 	 * If a cntbt cursor is provided, try to allocate the largest record in
1223 	 * the tree. Try the AGFL if the cntbt is empty, otherwise fail the
1224 	 * allocation. Make sure to respect minleft even when pulling from the
1225 	 * freelist.
1226 	 */
1227 	if (ccur)
1228 		error = xfs_btree_decrement(ccur, 0, &i);
1229 	if (error)
1230 		goto error;
1231 	if (i) {
1232 		error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i);
1233 		if (error)
1234 			goto error;
1235 		if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1236 			xfs_btree_mark_sick(ccur);
1237 			error = -EFSCORRUPTED;
1238 			goto error;
1239 		}
1240 		goto out;
1241 	}
1242 
1243 	if (args->minlen != 1 || args->alignment != 1 ||
1244 	    args->resv == XFS_AG_RESV_AGFL ||
1245 	    be32_to_cpu(agf->agf_flcount) <= args->minleft)
1246 		goto out;
1247 
1248 	error = xfs_alloc_get_freelist(args->pag, args->tp, args->agbp,
1249 			&fbno, 0);
1250 	if (error)
1251 		goto error;
1252 	if (fbno == NULLAGBLOCK)
1253 		goto out;
1254 
1255 	xfs_extent_busy_reuse(args->mp, args->pag, fbno, 1,
1256 			      (args->datatype & XFS_ALLOC_NOBUSY));
1257 
1258 	if (args->datatype & XFS_ALLOC_USERDATA) {
1259 		struct xfs_buf	*bp;
1260 
1261 		error = xfs_trans_get_buf(args->tp, args->mp->m_ddev_targp,
1262 				XFS_AGB_TO_DADDR(args->mp, args->agno, fbno),
1263 				args->mp->m_bsize, 0, &bp);
1264 		if (error)
1265 			goto error;
1266 		xfs_trans_binval(args->tp, bp);
1267 	}
1268 	*fbnop = args->agbno = fbno;
1269 	*flenp = args->len = 1;
1270 	if (XFS_IS_CORRUPT(args->mp, fbno >= be32_to_cpu(agf->agf_length))) {
1271 		xfs_btree_mark_sick(ccur);
1272 		error = -EFSCORRUPTED;
1273 		goto error;
1274 	}
1275 	args->wasfromfl = 1;
1276 	trace_xfs_alloc_small_freelist(args);
1277 
1278 	/*
1279 	 * If we're feeding an AGFL block to something that doesn't live in the
1280 	 * free space, we need to clear out the OWN_AG rmap.
1281 	 */
1282 	error = xfs_rmap_free(args->tp, args->agbp, args->pag, fbno, 1,
1283 			      &XFS_RMAP_OINFO_AG);
1284 	if (error)
1285 		goto error;
1286 
1287 	*stat = 0;
1288 	return 0;
1289 
1290 out:
1291 	/*
1292 	 * Can't do the allocation, give up.
1293 	 */
1294 	if (flen < args->minlen) {
1295 		args->agbno = NULLAGBLOCK;
1296 		trace_xfs_alloc_small_notenough(args);
1297 		flen = 0;
1298 	}
1299 	*fbnop = fbno;
1300 	*flenp = flen;
1301 	*stat = 1;
1302 	trace_xfs_alloc_small_done(args);
1303 	return 0;
1304 
1305 error:
1306 	trace_xfs_alloc_small_error(args);
1307 	return error;
1308 }
1309 
1310 /*
1311  * Allocate a variable extent at exactly agno/bno.
1312  * Extent's length (returned in *len) will be between minlen and maxlen,
1313  * and of the form k * prod + mod unless there's nothing that large.
1314  * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
1315  */
1316 STATIC int			/* error */
xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t * args)1317 xfs_alloc_ag_vextent_exact(
1318 	xfs_alloc_arg_t	*args)	/* allocation argument structure */
1319 {
1320 	struct xfs_btree_cur *bno_cur;/* by block-number btree cursor */
1321 	struct xfs_btree_cur *cnt_cur;/* by count btree cursor */
1322 	int		error;
1323 	xfs_agblock_t	fbno;	/* start block of found extent */
1324 	xfs_extlen_t	flen;	/* length of found extent */
1325 	xfs_agblock_t	tbno;	/* start block of busy extent */
1326 	xfs_extlen_t	tlen;	/* length of busy extent */
1327 	xfs_agblock_t	tend;	/* end block of busy extent */
1328 	int		i;	/* success/failure of operation */
1329 	unsigned	busy_gen;
1330 
1331 	ASSERT(args->alignment == 1);
1332 
1333 	/*
1334 	 * Allocate/initialize a cursor for the by-number freespace btree.
1335 	 */
1336 	bno_cur = xfs_bnobt_init_cursor(args->mp, args->tp, args->agbp,
1337 					  args->pag);
1338 
1339 	/*
1340 	 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
1341 	 * Look for the closest free block <= bno, it must contain bno
1342 	 * if any free block does.
1343 	 */
1344 	error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
1345 	if (error)
1346 		goto error0;
1347 	if (!i)
1348 		goto not_found;
1349 
1350 	/*
1351 	 * Grab the freespace record.
1352 	 */
1353 	error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
1354 	if (error)
1355 		goto error0;
1356 	if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1357 		xfs_btree_mark_sick(bno_cur);
1358 		error = -EFSCORRUPTED;
1359 		goto error0;
1360 	}
1361 	ASSERT(fbno <= args->agbno);
1362 
1363 	/*
1364 	 * Check for overlapping busy extents.
1365 	 */
1366 	tbno = fbno;
1367 	tlen = flen;
1368 	xfs_extent_busy_trim(args, &tbno, &tlen, &busy_gen);
1369 
1370 	/*
1371 	 * Give up if the start of the extent is busy, or the freespace isn't
1372 	 * long enough for the minimum request.
1373 	 */
1374 	if (tbno > args->agbno)
1375 		goto not_found;
1376 	if (tlen < args->minlen)
1377 		goto not_found;
1378 	tend = tbno + tlen;
1379 	if (tend < args->agbno + args->minlen)
1380 		goto not_found;
1381 
1382 	/*
1383 	 * End of extent will be smaller of the freespace end and the
1384 	 * maximal requested end.
1385 	 *
1386 	 * Fix the length according to mod and prod if given.
1387 	 */
1388 	args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
1389 						- args->agbno;
1390 	xfs_alloc_fix_len(args);
1391 	ASSERT(args->agbno + args->len <= tend);
1392 
1393 	/*
1394 	 * We are allocating agbno for args->len
1395 	 * Allocate/initialize a cursor for the by-size btree.
1396 	 */
1397 	cnt_cur = xfs_cntbt_init_cursor(args->mp, args->tp, args->agbp,
1398 					args->pag);
1399 	ASSERT(xfs_verify_agbext(args->pag, args->agbno, args->len));
1400 	error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
1401 				      args->len, XFSA_FIXUP_BNO_OK);
1402 	if (error) {
1403 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1404 		goto error0;
1405 	}
1406 
1407 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1408 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1409 
1410 	args->wasfromfl = 0;
1411 	trace_xfs_alloc_exact_done(args);
1412 	return 0;
1413 
1414 not_found:
1415 	/* Didn't find it, return null. */
1416 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1417 	args->agbno = NULLAGBLOCK;
1418 	trace_xfs_alloc_exact_notfound(args);
1419 	return 0;
1420 
1421 error0:
1422 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1423 	trace_xfs_alloc_exact_error(args);
1424 	return error;
1425 }
1426 
1427 /*
1428  * Search a given number of btree records in a given direction. Check each
1429  * record against the good extent we've already found.
1430  */
1431 STATIC int
xfs_alloc_walk_iter(struct xfs_alloc_arg * args,struct xfs_alloc_cur * acur,struct xfs_btree_cur * cur,bool increment,bool find_one,int count,int * stat)1432 xfs_alloc_walk_iter(
1433 	struct xfs_alloc_arg	*args,
1434 	struct xfs_alloc_cur	*acur,
1435 	struct xfs_btree_cur	*cur,
1436 	bool			increment,
1437 	bool			find_one, /* quit on first candidate */
1438 	int			count,    /* rec count (-1 for infinite) */
1439 	int			*stat)
1440 {
1441 	int			error;
1442 	int			i;
1443 
1444 	*stat = 0;
1445 
1446 	/*
1447 	 * Search so long as the cursor is active or we find a better extent.
1448 	 * The cursor is deactivated if it extends beyond the range of the
1449 	 * current allocation candidate.
1450 	 */
1451 	while (xfs_alloc_cur_active(cur) && count) {
1452 		error = xfs_alloc_cur_check(args, acur, cur, &i);
1453 		if (error)
1454 			return error;
1455 		if (i == 1) {
1456 			*stat = 1;
1457 			if (find_one)
1458 				break;
1459 		}
1460 		if (!xfs_alloc_cur_active(cur))
1461 			break;
1462 
1463 		if (increment)
1464 			error = xfs_btree_increment(cur, 0, &i);
1465 		else
1466 			error = xfs_btree_decrement(cur, 0, &i);
1467 		if (error)
1468 			return error;
1469 		if (i == 0)
1470 			cur->bc_flags &= ~XFS_BTREE_ALLOCBT_ACTIVE;
1471 
1472 		if (count > 0)
1473 			count--;
1474 	}
1475 
1476 	return 0;
1477 }
1478 
1479 /*
1480  * Search the by-bno and by-size btrees in parallel in search of an extent with
1481  * ideal locality based on the NEAR mode ->agbno locality hint.
1482  */
1483 STATIC int
xfs_alloc_ag_vextent_locality(struct xfs_alloc_arg * args,struct xfs_alloc_cur * acur,int * stat)1484 xfs_alloc_ag_vextent_locality(
1485 	struct xfs_alloc_arg	*args,
1486 	struct xfs_alloc_cur	*acur,
1487 	int			*stat)
1488 {
1489 	struct xfs_btree_cur	*fbcur = NULL;
1490 	int			error;
1491 	int			i;
1492 	bool			fbinc;
1493 
1494 	ASSERT(acur->len == 0);
1495 
1496 	*stat = 0;
1497 
1498 	error = xfs_alloc_lookup_ge(acur->cnt, args->agbno, acur->cur_len, &i);
1499 	if (error)
1500 		return error;
1501 	error = xfs_alloc_lookup_le(acur->bnolt, args->agbno, 0, &i);
1502 	if (error)
1503 		return error;
1504 	error = xfs_alloc_lookup_ge(acur->bnogt, args->agbno, 0, &i);
1505 	if (error)
1506 		return error;
1507 
1508 	/*
1509 	 * Search the bnobt and cntbt in parallel. Search the bnobt left and
1510 	 * right and lookup the closest extent to the locality hint for each
1511 	 * extent size key in the cntbt. The entire search terminates
1512 	 * immediately on a bnobt hit because that means we've found best case
1513 	 * locality. Otherwise the search continues until the cntbt cursor runs
1514 	 * off the end of the tree. If no allocation candidate is found at this
1515 	 * point, give up on locality, walk backwards from the end of the cntbt
1516 	 * and take the first available extent.
1517 	 *
1518 	 * The parallel tree searches balance each other out to provide fairly
1519 	 * consistent performance for various situations. The bnobt search can
1520 	 * have pathological behavior in the worst case scenario of larger
1521 	 * allocation requests and fragmented free space. On the other hand, the
1522 	 * bnobt is able to satisfy most smaller allocation requests much more
1523 	 * quickly than the cntbt. The cntbt search can sift through fragmented
1524 	 * free space and sets of free extents for larger allocation requests
1525 	 * more quickly than the bnobt. Since the locality hint is just a hint
1526 	 * and we don't want to scan the entire bnobt for perfect locality, the
1527 	 * cntbt search essentially bounds the bnobt search such that we can
1528 	 * find good enough locality at reasonable performance in most cases.
1529 	 */
1530 	while (xfs_alloc_cur_active(acur->bnolt) ||
1531 	       xfs_alloc_cur_active(acur->bnogt) ||
1532 	       xfs_alloc_cur_active(acur->cnt)) {
1533 
1534 		trace_xfs_alloc_cur_lookup(args);
1535 
1536 		/*
1537 		 * Search the bnobt left and right. In the case of a hit, finish
1538 		 * the search in the opposite direction and we're done.
1539 		 */
1540 		error = xfs_alloc_walk_iter(args, acur, acur->bnolt, false,
1541 					    true, 1, &i);
1542 		if (error)
1543 			return error;
1544 		if (i == 1) {
1545 			trace_xfs_alloc_cur_left(args);
1546 			fbcur = acur->bnogt;
1547 			fbinc = true;
1548 			break;
1549 		}
1550 		error = xfs_alloc_walk_iter(args, acur, acur->bnogt, true, true,
1551 					    1, &i);
1552 		if (error)
1553 			return error;
1554 		if (i == 1) {
1555 			trace_xfs_alloc_cur_right(args);
1556 			fbcur = acur->bnolt;
1557 			fbinc = false;
1558 			break;
1559 		}
1560 
1561 		/*
1562 		 * Check the extent with best locality based on the current
1563 		 * extent size search key and keep track of the best candidate.
1564 		 */
1565 		error = xfs_alloc_cntbt_iter(args, acur);
1566 		if (error)
1567 			return error;
1568 		if (!xfs_alloc_cur_active(acur->cnt)) {
1569 			trace_xfs_alloc_cur_lookup_done(args);
1570 			break;
1571 		}
1572 	}
1573 
1574 	/*
1575 	 * If we failed to find anything due to busy extents, return empty
1576 	 * handed so the caller can flush and retry. If no busy extents were
1577 	 * found, walk backwards from the end of the cntbt as a last resort.
1578 	 */
1579 	if (!xfs_alloc_cur_active(acur->cnt) && !acur->len && !acur->busy) {
1580 		error = xfs_btree_decrement(acur->cnt, 0, &i);
1581 		if (error)
1582 			return error;
1583 		if (i) {
1584 			acur->cnt->bc_flags |= XFS_BTREE_ALLOCBT_ACTIVE;
1585 			fbcur = acur->cnt;
1586 			fbinc = false;
1587 		}
1588 	}
1589 
1590 	/*
1591 	 * Search in the opposite direction for a better entry in the case of
1592 	 * a bnobt hit or walk backwards from the end of the cntbt.
1593 	 */
1594 	if (fbcur) {
1595 		error = xfs_alloc_walk_iter(args, acur, fbcur, fbinc, true, -1,
1596 					    &i);
1597 		if (error)
1598 			return error;
1599 	}
1600 
1601 	if (acur->len)
1602 		*stat = 1;
1603 
1604 	return 0;
1605 }
1606 
1607 /* Check the last block of the cnt btree for allocations. */
1608 static int
xfs_alloc_ag_vextent_lastblock(struct xfs_alloc_arg * args,struct xfs_alloc_cur * acur,xfs_agblock_t * bno,xfs_extlen_t * len,bool * allocated)1609 xfs_alloc_ag_vextent_lastblock(
1610 	struct xfs_alloc_arg	*args,
1611 	struct xfs_alloc_cur	*acur,
1612 	xfs_agblock_t		*bno,
1613 	xfs_extlen_t		*len,
1614 	bool			*allocated)
1615 {
1616 	int			error;
1617 	int			i;
1618 
1619 #ifdef DEBUG
1620 	/* Randomly don't execute the first algorithm. */
1621 	if (get_random_u32_below(2))
1622 		return 0;
1623 #endif
1624 
1625 	/*
1626 	 * Start from the entry that lookup found, sequence through all larger
1627 	 * free blocks.  If we're actually pointing at a record smaller than
1628 	 * maxlen, go to the start of this block, and skip all those smaller
1629 	 * than minlen.
1630 	 */
1631 	if (*len || args->alignment > 1) {
1632 		acur->cnt->bc_levels[0].ptr = 1;
1633 		do {
1634 			error = xfs_alloc_get_rec(acur->cnt, bno, len, &i);
1635 			if (error)
1636 				return error;
1637 			if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1638 				xfs_btree_mark_sick(acur->cnt);
1639 				return -EFSCORRUPTED;
1640 			}
1641 			if (*len >= args->minlen)
1642 				break;
1643 			error = xfs_btree_increment(acur->cnt, 0, &i);
1644 			if (error)
1645 				return error;
1646 		} while (i);
1647 		ASSERT(*len >= args->minlen);
1648 		if (!i)
1649 			return 0;
1650 	}
1651 
1652 	error = xfs_alloc_walk_iter(args, acur, acur->cnt, true, false, -1, &i);
1653 	if (error)
1654 		return error;
1655 
1656 	/*
1657 	 * It didn't work.  We COULD be in a case where there's a good record
1658 	 * somewhere, so try again.
1659 	 */
1660 	if (acur->len == 0)
1661 		return 0;
1662 
1663 	trace_xfs_alloc_near_first(args);
1664 	*allocated = true;
1665 	return 0;
1666 }
1667 
1668 /*
1669  * Allocate a variable extent near bno in the allocation group agno.
1670  * Extent's length (returned in len) will be between minlen and maxlen,
1671  * and of the form k * prod + mod unless there's nothing that large.
1672  * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1673  */
1674 STATIC int
xfs_alloc_ag_vextent_near(struct xfs_alloc_arg * args,uint32_t alloc_flags)1675 xfs_alloc_ag_vextent_near(
1676 	struct xfs_alloc_arg	*args,
1677 	uint32_t		alloc_flags)
1678 {
1679 	struct xfs_alloc_cur	acur = {};
1680 	int			error;		/* error code */
1681 	int			i;		/* result code, temporary */
1682 	xfs_agblock_t		bno;
1683 	xfs_extlen_t		len;
1684 
1685 	/* handle uninitialized agbno range so caller doesn't have to */
1686 	if (!args->min_agbno && !args->max_agbno)
1687 		args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
1688 	ASSERT(args->min_agbno <= args->max_agbno);
1689 
1690 	/* clamp agbno to the range if it's outside */
1691 	if (args->agbno < args->min_agbno)
1692 		args->agbno = args->min_agbno;
1693 	if (args->agbno > args->max_agbno)
1694 		args->agbno = args->max_agbno;
1695 
1696 	/* Retry once quickly if we find busy extents before blocking. */
1697 	alloc_flags |= XFS_ALLOC_FLAG_TRYFLUSH;
1698 restart:
1699 	len = 0;
1700 
1701 	/*
1702 	 * Set up cursors and see if there are any free extents as big as
1703 	 * maxlen. If not, pick the last entry in the tree unless the tree is
1704 	 * empty.
1705 	 */
1706 	error = xfs_alloc_cur_setup(args, &acur);
1707 	if (error == -ENOSPC) {
1708 		error = xfs_alloc_ag_vextent_small(args, acur.cnt, &bno,
1709 				&len, &i);
1710 		if (error)
1711 			goto out;
1712 		if (i == 0 || len == 0) {
1713 			trace_xfs_alloc_near_noentry(args);
1714 			goto out;
1715 		}
1716 		ASSERT(i == 1);
1717 	} else if (error) {
1718 		goto out;
1719 	}
1720 
1721 	/*
1722 	 * First algorithm.
1723 	 * If the requested extent is large wrt the freespaces available
1724 	 * in this a.g., then the cursor will be pointing to a btree entry
1725 	 * near the right edge of the tree.  If it's in the last btree leaf
1726 	 * block, then we just examine all the entries in that block
1727 	 * that are big enough, and pick the best one.
1728 	 */
1729 	if (xfs_btree_islastblock(acur.cnt, 0)) {
1730 		bool		allocated = false;
1731 
1732 		error = xfs_alloc_ag_vextent_lastblock(args, &acur, &bno, &len,
1733 				&allocated);
1734 		if (error)
1735 			goto out;
1736 		if (allocated)
1737 			goto alloc_finish;
1738 	}
1739 
1740 	/*
1741 	 * Second algorithm. Combined cntbt and bnobt search to find ideal
1742 	 * locality.
1743 	 */
1744 	error = xfs_alloc_ag_vextent_locality(args, &acur, &i);
1745 	if (error)
1746 		goto out;
1747 
1748 	/*
1749 	 * If we couldn't get anything, give up.
1750 	 */
1751 	if (!acur.len) {
1752 		if (acur.busy) {
1753 			/*
1754 			 * Our only valid extents must have been busy. Flush and
1755 			 * retry the allocation again. If we get an -EAGAIN
1756 			 * error, we're being told that a deadlock was avoided
1757 			 * and the current transaction needs committing before
1758 			 * the allocation can be retried.
1759 			 */
1760 			trace_xfs_alloc_near_busy(args);
1761 			error = xfs_extent_busy_flush(args->tp, args->pag,
1762 					acur.busy_gen, alloc_flags);
1763 			if (error)
1764 				goto out;
1765 
1766 			alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH;
1767 			goto restart;
1768 		}
1769 		trace_xfs_alloc_size_neither(args);
1770 		args->agbno = NULLAGBLOCK;
1771 		goto out;
1772 	}
1773 
1774 alloc_finish:
1775 	/* fix up btrees on a successful allocation */
1776 	error = xfs_alloc_cur_finish(args, &acur);
1777 
1778 out:
1779 	xfs_alloc_cur_close(&acur, error);
1780 	return error;
1781 }
1782 
1783 /*
1784  * Allocate a variable extent anywhere in the allocation group agno.
1785  * Extent's length (returned in len) will be between minlen and maxlen,
1786  * and of the form k * prod + mod unless there's nothing that large.
1787  * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1788  */
1789 static int
xfs_alloc_ag_vextent_size(struct xfs_alloc_arg * args,uint32_t alloc_flags)1790 xfs_alloc_ag_vextent_size(
1791 	struct xfs_alloc_arg	*args,
1792 	uint32_t		alloc_flags)
1793 {
1794 	struct xfs_agf		*agf = args->agbp->b_addr;
1795 	struct xfs_btree_cur	*bno_cur;
1796 	struct xfs_btree_cur	*cnt_cur;
1797 	xfs_agblock_t		fbno;		/* start of found freespace */
1798 	xfs_extlen_t		flen;		/* length of found freespace */
1799 	xfs_agblock_t		rbno;		/* returned block number */
1800 	xfs_extlen_t		rlen;		/* length of returned extent */
1801 	bool			busy;
1802 	unsigned		busy_gen;
1803 	int			error;
1804 	int			i;
1805 
1806 	/* Retry once quickly if we find busy extents before blocking. */
1807 	alloc_flags |= XFS_ALLOC_FLAG_TRYFLUSH;
1808 restart:
1809 	/*
1810 	 * Allocate and initialize a cursor for the by-size btree.
1811 	 */
1812 	cnt_cur = xfs_cntbt_init_cursor(args->mp, args->tp, args->agbp,
1813 					args->pag);
1814 	bno_cur = NULL;
1815 
1816 	/*
1817 	 * Look for an entry >= maxlen+alignment-1 blocks.
1818 	 */
1819 	if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1820 			args->maxlen + args->alignment - 1, &i)))
1821 		goto error0;
1822 
1823 	/*
1824 	 * If none then we have to settle for a smaller extent. In the case that
1825 	 * there are no large extents, this will return the last entry in the
1826 	 * tree unless the tree is empty. In the case that there are only busy
1827 	 * large extents, this will return the largest small extent unless there
1828 	 * are no smaller extents available.
1829 	 */
1830 	if (!i) {
1831 		error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1832 						   &fbno, &flen, &i);
1833 		if (error)
1834 			goto error0;
1835 		if (i == 0 || flen == 0) {
1836 			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1837 			trace_xfs_alloc_size_noentry(args);
1838 			return 0;
1839 		}
1840 		ASSERT(i == 1);
1841 		busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno,
1842 				&rlen, &busy_gen);
1843 	} else {
1844 		/*
1845 		 * Search for a non-busy extent that is large enough.
1846 		 */
1847 		for (;;) {
1848 			error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1849 			if (error)
1850 				goto error0;
1851 			if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1852 				xfs_btree_mark_sick(cnt_cur);
1853 				error = -EFSCORRUPTED;
1854 				goto error0;
1855 			}
1856 
1857 			busy = xfs_alloc_compute_aligned(args, fbno, flen,
1858 					&rbno, &rlen, &busy_gen);
1859 
1860 			if (rlen >= args->maxlen)
1861 				break;
1862 
1863 			error = xfs_btree_increment(cnt_cur, 0, &i);
1864 			if (error)
1865 				goto error0;
1866 			if (i)
1867 				continue;
1868 
1869 			/*
1870 			 * Our only valid extents must have been busy. Flush and
1871 			 * retry the allocation again. If we get an -EAGAIN
1872 			 * error, we're being told that a deadlock was avoided
1873 			 * and the current transaction needs committing before
1874 			 * the allocation can be retried.
1875 			 */
1876 			trace_xfs_alloc_size_busy(args);
1877 			error = xfs_extent_busy_flush(args->tp, args->pag,
1878 					busy_gen, alloc_flags);
1879 			if (error)
1880 				goto error0;
1881 
1882 			alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH;
1883 			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1884 			goto restart;
1885 		}
1886 	}
1887 
1888 	/*
1889 	 * In the first case above, we got the last entry in the
1890 	 * by-size btree.  Now we check to see if the space hits maxlen
1891 	 * once aligned; if not, we search left for something better.
1892 	 * This can't happen in the second case above.
1893 	 */
1894 	rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1895 	if (XFS_IS_CORRUPT(args->mp,
1896 			   rlen != 0 &&
1897 			   (rlen > flen ||
1898 			    rbno + rlen > fbno + flen))) {
1899 		xfs_btree_mark_sick(cnt_cur);
1900 		error = -EFSCORRUPTED;
1901 		goto error0;
1902 	}
1903 	if (rlen < args->maxlen) {
1904 		xfs_agblock_t	bestfbno;
1905 		xfs_extlen_t	bestflen;
1906 		xfs_agblock_t	bestrbno;
1907 		xfs_extlen_t	bestrlen;
1908 
1909 		bestrlen = rlen;
1910 		bestrbno = rbno;
1911 		bestflen = flen;
1912 		bestfbno = fbno;
1913 		for (;;) {
1914 			if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1915 				goto error0;
1916 			if (i == 0)
1917 				break;
1918 			if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1919 					&i)))
1920 				goto error0;
1921 			if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1922 				xfs_btree_mark_sick(cnt_cur);
1923 				error = -EFSCORRUPTED;
1924 				goto error0;
1925 			}
1926 			if (flen < bestrlen)
1927 				break;
1928 			busy = xfs_alloc_compute_aligned(args, fbno, flen,
1929 					&rbno, &rlen, &busy_gen);
1930 			rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1931 			if (XFS_IS_CORRUPT(args->mp,
1932 					   rlen != 0 &&
1933 					   (rlen > flen ||
1934 					    rbno + rlen > fbno + flen))) {
1935 				xfs_btree_mark_sick(cnt_cur);
1936 				error = -EFSCORRUPTED;
1937 				goto error0;
1938 			}
1939 			if (rlen > bestrlen) {
1940 				bestrlen = rlen;
1941 				bestrbno = rbno;
1942 				bestflen = flen;
1943 				bestfbno = fbno;
1944 				if (rlen == args->maxlen)
1945 					break;
1946 			}
1947 		}
1948 		if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1949 				&i)))
1950 			goto error0;
1951 		if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1952 			xfs_btree_mark_sick(cnt_cur);
1953 			error = -EFSCORRUPTED;
1954 			goto error0;
1955 		}
1956 		rlen = bestrlen;
1957 		rbno = bestrbno;
1958 		flen = bestflen;
1959 		fbno = bestfbno;
1960 	}
1961 	args->wasfromfl = 0;
1962 	/*
1963 	 * Fix up the length.
1964 	 */
1965 	args->len = rlen;
1966 	if (rlen < args->minlen) {
1967 		if (busy) {
1968 			/*
1969 			 * Our only valid extents must have been busy. Flush and
1970 			 * retry the allocation again. If we get an -EAGAIN
1971 			 * error, we're being told that a deadlock was avoided
1972 			 * and the current transaction needs committing before
1973 			 * the allocation can be retried.
1974 			 */
1975 			trace_xfs_alloc_size_busy(args);
1976 			error = xfs_extent_busy_flush(args->tp, args->pag,
1977 					busy_gen, alloc_flags);
1978 			if (error)
1979 				goto error0;
1980 
1981 			alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH;
1982 			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1983 			goto restart;
1984 		}
1985 		goto out_nominleft;
1986 	}
1987 	xfs_alloc_fix_len(args);
1988 
1989 	rlen = args->len;
1990 	if (XFS_IS_CORRUPT(args->mp, rlen > flen)) {
1991 		xfs_btree_mark_sick(cnt_cur);
1992 		error = -EFSCORRUPTED;
1993 		goto error0;
1994 	}
1995 	/*
1996 	 * Allocate and initialize a cursor for the by-block tree.
1997 	 */
1998 	bno_cur = xfs_bnobt_init_cursor(args->mp, args->tp, args->agbp,
1999 					args->pag);
2000 	if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
2001 			rbno, rlen, XFSA_FIXUP_CNT_OK)))
2002 		goto error0;
2003 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
2004 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
2005 	cnt_cur = bno_cur = NULL;
2006 	args->len = rlen;
2007 	args->agbno = rbno;
2008 	if (XFS_IS_CORRUPT(args->mp,
2009 			   args->agbno + args->len >
2010 			   be32_to_cpu(agf->agf_length))) {
2011 		xfs_ag_mark_sick(args->pag, XFS_SICK_AG_BNOBT);
2012 		error = -EFSCORRUPTED;
2013 		goto error0;
2014 	}
2015 	trace_xfs_alloc_size_done(args);
2016 	return 0;
2017 
2018 error0:
2019 	trace_xfs_alloc_size_error(args);
2020 	if (cnt_cur)
2021 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
2022 	if (bno_cur)
2023 		xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
2024 	return error;
2025 
2026 out_nominleft:
2027 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
2028 	trace_xfs_alloc_size_nominleft(args);
2029 	args->agbno = NULLAGBLOCK;
2030 	return 0;
2031 }
2032 
2033 /*
2034  * Free the extent starting at agno/bno for length.
2035  */
2036 int
xfs_free_ag_extent(struct xfs_trans * tp,struct xfs_buf * agbp,xfs_agnumber_t agno,xfs_agblock_t bno,xfs_extlen_t len,const struct xfs_owner_info * oinfo,enum xfs_ag_resv_type type)2037 xfs_free_ag_extent(
2038 	struct xfs_trans		*tp,
2039 	struct xfs_buf			*agbp,
2040 	xfs_agnumber_t			agno,
2041 	xfs_agblock_t			bno,
2042 	xfs_extlen_t			len,
2043 	const struct xfs_owner_info	*oinfo,
2044 	enum xfs_ag_resv_type		type)
2045 {
2046 	struct xfs_mount		*mp;
2047 	struct xfs_btree_cur		*bno_cur;
2048 	struct xfs_btree_cur		*cnt_cur;
2049 	xfs_agblock_t			gtbno; /* start of right neighbor */
2050 	xfs_extlen_t			gtlen; /* length of right neighbor */
2051 	xfs_agblock_t			ltbno; /* start of left neighbor */
2052 	xfs_extlen_t			ltlen; /* length of left neighbor */
2053 	xfs_agblock_t			nbno; /* new starting block of freesp */
2054 	xfs_extlen_t			nlen; /* new length of freespace */
2055 	int				haveleft; /* have a left neighbor */
2056 	int				haveright; /* have a right neighbor */
2057 	int				i;
2058 	int				error;
2059 	struct xfs_perag		*pag = agbp->b_pag;
2060 	bool				fixup_longest = false;
2061 
2062 	bno_cur = cnt_cur = NULL;
2063 	mp = tp->t_mountp;
2064 
2065 	if (!xfs_rmap_should_skip_owner_update(oinfo)) {
2066 		error = xfs_rmap_free(tp, agbp, pag, bno, len, oinfo);
2067 		if (error)
2068 			goto error0;
2069 	}
2070 
2071 	/*
2072 	 * Allocate and initialize a cursor for the by-block btree.
2073 	 */
2074 	bno_cur = xfs_bnobt_init_cursor(mp, tp, agbp, pag);
2075 	/*
2076 	 * Look for a neighboring block on the left (lower block numbers)
2077 	 * that is contiguous with this space.
2078 	 */
2079 	if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
2080 		goto error0;
2081 	if (haveleft) {
2082 		/*
2083 		 * There is a block to our left.
2084 		 */
2085 		if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i)))
2086 			goto error0;
2087 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2088 			xfs_btree_mark_sick(bno_cur);
2089 			error = -EFSCORRUPTED;
2090 			goto error0;
2091 		}
2092 		/*
2093 		 * It's not contiguous, though.
2094 		 */
2095 		if (ltbno + ltlen < bno)
2096 			haveleft = 0;
2097 		else {
2098 			/*
2099 			 * If this failure happens the request to free this
2100 			 * space was invalid, it's (partly) already free.
2101 			 * Very bad.
2102 			 */
2103 			if (XFS_IS_CORRUPT(mp, ltbno + ltlen > bno)) {
2104 				xfs_btree_mark_sick(bno_cur);
2105 				error = -EFSCORRUPTED;
2106 				goto error0;
2107 			}
2108 		}
2109 	}
2110 	/*
2111 	 * Look for a neighboring block on the right (higher block numbers)
2112 	 * that is contiguous with this space.
2113 	 */
2114 	if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
2115 		goto error0;
2116 	if (haveright) {
2117 		/*
2118 		 * There is a block to our right.
2119 		 */
2120 		if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i)))
2121 			goto error0;
2122 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2123 			xfs_btree_mark_sick(bno_cur);
2124 			error = -EFSCORRUPTED;
2125 			goto error0;
2126 		}
2127 		/*
2128 		 * It's not contiguous, though.
2129 		 */
2130 		if (bno + len < gtbno)
2131 			haveright = 0;
2132 		else {
2133 			/*
2134 			 * If this failure happens the request to free this
2135 			 * space was invalid, it's (partly) already free.
2136 			 * Very bad.
2137 			 */
2138 			if (XFS_IS_CORRUPT(mp, bno + len > gtbno)) {
2139 				xfs_btree_mark_sick(bno_cur);
2140 				error = -EFSCORRUPTED;
2141 				goto error0;
2142 			}
2143 		}
2144 	}
2145 	/*
2146 	 * Now allocate and initialize a cursor for the by-size tree.
2147 	 */
2148 	cnt_cur = xfs_cntbt_init_cursor(mp, tp, agbp, pag);
2149 	/*
2150 	 * Have both left and right contiguous neighbors.
2151 	 * Merge all three into a single free block.
2152 	 */
2153 	if (haveleft && haveright) {
2154 		/*
2155 		 * Delete the old by-size entry on the left.
2156 		 */
2157 		if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
2158 			goto error0;
2159 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2160 			xfs_btree_mark_sick(cnt_cur);
2161 			error = -EFSCORRUPTED;
2162 			goto error0;
2163 		}
2164 		if ((error = xfs_btree_delete(cnt_cur, &i)))
2165 			goto error0;
2166 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2167 			xfs_btree_mark_sick(cnt_cur);
2168 			error = -EFSCORRUPTED;
2169 			goto error0;
2170 		}
2171 		/*
2172 		 * Delete the old by-size entry on the right.
2173 		 */
2174 		if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
2175 			goto error0;
2176 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2177 			xfs_btree_mark_sick(cnt_cur);
2178 			error = -EFSCORRUPTED;
2179 			goto error0;
2180 		}
2181 		if ((error = xfs_btree_delete(cnt_cur, &i)))
2182 			goto error0;
2183 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2184 			xfs_btree_mark_sick(cnt_cur);
2185 			error = -EFSCORRUPTED;
2186 			goto error0;
2187 		}
2188 		/*
2189 		 * Delete the old by-block entry for the right block.
2190 		 */
2191 		if ((error = xfs_btree_delete(bno_cur, &i)))
2192 			goto error0;
2193 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2194 			xfs_btree_mark_sick(bno_cur);
2195 			error = -EFSCORRUPTED;
2196 			goto error0;
2197 		}
2198 		/*
2199 		 * Move the by-block cursor back to the left neighbor.
2200 		 */
2201 		if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
2202 			goto error0;
2203 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2204 			xfs_btree_mark_sick(bno_cur);
2205 			error = -EFSCORRUPTED;
2206 			goto error0;
2207 		}
2208 #ifdef DEBUG
2209 		/*
2210 		 * Check that this is the right record: delete didn't
2211 		 * mangle the cursor.
2212 		 */
2213 		{
2214 			xfs_agblock_t	xxbno;
2215 			xfs_extlen_t	xxlen;
2216 
2217 			if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
2218 					&i)))
2219 				goto error0;
2220 			if (XFS_IS_CORRUPT(mp,
2221 					   i != 1 ||
2222 					   xxbno != ltbno ||
2223 					   xxlen != ltlen)) {
2224 				xfs_btree_mark_sick(bno_cur);
2225 				error = -EFSCORRUPTED;
2226 				goto error0;
2227 			}
2228 		}
2229 #endif
2230 		/*
2231 		 * Update remaining by-block entry to the new, joined block.
2232 		 */
2233 		nbno = ltbno;
2234 		nlen = len + ltlen + gtlen;
2235 		if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2236 			goto error0;
2237 	}
2238 	/*
2239 	 * Have only a left contiguous neighbor.
2240 	 * Merge it together with the new freespace.
2241 	 */
2242 	else if (haveleft) {
2243 		/*
2244 		 * Delete the old by-size entry on the left.
2245 		 */
2246 		if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
2247 			goto error0;
2248 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2249 			xfs_btree_mark_sick(cnt_cur);
2250 			error = -EFSCORRUPTED;
2251 			goto error0;
2252 		}
2253 		if ((error = xfs_btree_delete(cnt_cur, &i)))
2254 			goto error0;
2255 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2256 			xfs_btree_mark_sick(cnt_cur);
2257 			error = -EFSCORRUPTED;
2258 			goto error0;
2259 		}
2260 		/*
2261 		 * Back up the by-block cursor to the left neighbor, and
2262 		 * update its length.
2263 		 */
2264 		if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
2265 			goto error0;
2266 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2267 			xfs_btree_mark_sick(bno_cur);
2268 			error = -EFSCORRUPTED;
2269 			goto error0;
2270 		}
2271 		nbno = ltbno;
2272 		nlen = len + ltlen;
2273 		if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2274 			goto error0;
2275 	}
2276 	/*
2277 	 * Have only a right contiguous neighbor.
2278 	 * Merge it together with the new freespace.
2279 	 */
2280 	else if (haveright) {
2281 		/*
2282 		 * Delete the old by-size entry on the right.
2283 		 */
2284 		if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
2285 			goto error0;
2286 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2287 			xfs_btree_mark_sick(cnt_cur);
2288 			error = -EFSCORRUPTED;
2289 			goto error0;
2290 		}
2291 		if ((error = xfs_btree_delete(cnt_cur, &i)))
2292 			goto error0;
2293 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2294 			xfs_btree_mark_sick(cnt_cur);
2295 			error = -EFSCORRUPTED;
2296 			goto error0;
2297 		}
2298 		/*
2299 		 * Update the starting block and length of the right
2300 		 * neighbor in the by-block tree.
2301 		 */
2302 		nbno = bno;
2303 		nlen = len + gtlen;
2304 		if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2305 			goto error0;
2306 	}
2307 	/*
2308 	 * No contiguous neighbors.
2309 	 * Insert the new freespace into the by-block tree.
2310 	 */
2311 	else {
2312 		nbno = bno;
2313 		nlen = len;
2314 		if ((error = xfs_btree_insert(bno_cur, &i)))
2315 			goto error0;
2316 		if (XFS_IS_CORRUPT(mp, i != 1)) {
2317 			xfs_btree_mark_sick(bno_cur);
2318 			error = -EFSCORRUPTED;
2319 			goto error0;
2320 		}
2321 	}
2322 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
2323 	bno_cur = NULL;
2324 
2325 	/*
2326 	 * In all cases we need to insert the new freespace in the by-size tree.
2327 	 *
2328 	 * If this new freespace is being inserted in the block that contains
2329 	 * the largest free space in the btree, make sure we also fix up the
2330 	 * agf->agf-longest tracker field.
2331 	 */
2332 	if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
2333 		goto error0;
2334 	if (XFS_IS_CORRUPT(mp, i != 0)) {
2335 		xfs_btree_mark_sick(cnt_cur);
2336 		error = -EFSCORRUPTED;
2337 		goto error0;
2338 	}
2339 	if (xfs_alloc_cursor_at_lastrec(cnt_cur))
2340 		fixup_longest = true;
2341 	if ((error = xfs_btree_insert(cnt_cur, &i)))
2342 		goto error0;
2343 	if (XFS_IS_CORRUPT(mp, i != 1)) {
2344 		xfs_btree_mark_sick(cnt_cur);
2345 		error = -EFSCORRUPTED;
2346 		goto error0;
2347 	}
2348 	if (fixup_longest) {
2349 		error = xfs_alloc_fixup_longest(cnt_cur);
2350 		if (error)
2351 			goto error0;
2352 	}
2353 
2354 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
2355 	cnt_cur = NULL;
2356 
2357 	/*
2358 	 * Update the freespace totals in the ag and superblock.
2359 	 */
2360 	error = xfs_alloc_update_counters(tp, agbp, len);
2361 	xfs_ag_resv_free_extent(agbp->b_pag, type, tp, len);
2362 	if (error)
2363 		goto error0;
2364 
2365 	XFS_STATS_INC(mp, xs_freex);
2366 	XFS_STATS_ADD(mp, xs_freeb, len);
2367 
2368 	trace_xfs_free_extent(mp, agno, bno, len, type, haveleft, haveright);
2369 
2370 	return 0;
2371 
2372  error0:
2373 	trace_xfs_free_extent(mp, agno, bno, len, type, -1, -1);
2374 	if (bno_cur)
2375 		xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
2376 	if (cnt_cur)
2377 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
2378 	return error;
2379 }
2380 
2381 /*
2382  * Visible (exported) allocation/free functions.
2383  * Some of these are used just by xfs_alloc_btree.c and this file.
2384  */
2385 
2386 /*
2387  * Compute and fill in value of m_alloc_maxlevels.
2388  */
2389 void
xfs_alloc_compute_maxlevels(xfs_mount_t * mp)2390 xfs_alloc_compute_maxlevels(
2391 	xfs_mount_t	*mp)	/* file system mount structure */
2392 {
2393 	mp->m_alloc_maxlevels = xfs_btree_compute_maxlevels(mp->m_alloc_mnr,
2394 			(mp->m_sb.sb_agblocks + 1) / 2);
2395 	ASSERT(mp->m_alloc_maxlevels <= xfs_allocbt_maxlevels_ondisk());
2396 }
2397 
2398 /*
2399  * Find the length of the longest extent in an AG.  The 'need' parameter
2400  * specifies how much space we're going to need for the AGFL and the
2401  * 'reserved' parameter tells us how many blocks in this AG are reserved for
2402  * other callers.
2403  */
2404 xfs_extlen_t
xfs_alloc_longest_free_extent(struct xfs_perag * pag,xfs_extlen_t need,xfs_extlen_t reserved)2405 xfs_alloc_longest_free_extent(
2406 	struct xfs_perag	*pag,
2407 	xfs_extlen_t		need,
2408 	xfs_extlen_t		reserved)
2409 {
2410 	xfs_extlen_t		delta = 0;
2411 
2412 	/*
2413 	 * If the AGFL needs a recharge, we'll have to subtract that from the
2414 	 * longest extent.
2415 	 */
2416 	if (need > pag->pagf_flcount)
2417 		delta = need - pag->pagf_flcount;
2418 
2419 	/*
2420 	 * If we cannot maintain others' reservations with space from the
2421 	 * not-longest freesp extents, we'll have to subtract /that/ from
2422 	 * the longest extent too.
2423 	 */
2424 	if (pag->pagf_freeblks - pag->pagf_longest < reserved)
2425 		delta += reserved - (pag->pagf_freeblks - pag->pagf_longest);
2426 
2427 	/*
2428 	 * If the longest extent is long enough to satisfy all the
2429 	 * reservations and AGFL rules in place, we can return this extent.
2430 	 */
2431 	if (pag->pagf_longest > delta)
2432 		return min_t(xfs_extlen_t, pag->pag_mount->m_ag_max_usable,
2433 				pag->pagf_longest - delta);
2434 
2435 	/* Otherwise, let the caller try for 1 block if there's space. */
2436 	return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
2437 }
2438 
2439 /*
2440  * Compute the minimum length of the AGFL in the given AG.  If @pag is NULL,
2441  * return the largest possible minimum length.
2442  */
2443 unsigned int
xfs_alloc_min_freelist(struct xfs_mount * mp,struct xfs_perag * pag)2444 xfs_alloc_min_freelist(
2445 	struct xfs_mount	*mp,
2446 	struct xfs_perag	*pag)
2447 {
2448 	/* AG btrees have at least 1 level. */
2449 	const unsigned int	bno_level = pag ? pag->pagf_bno_level : 1;
2450 	const unsigned int	cnt_level = pag ? pag->pagf_cnt_level : 1;
2451 	const unsigned int	rmap_level = pag ? pag->pagf_rmap_level : 1;
2452 	unsigned int		min_free;
2453 
2454 	ASSERT(mp->m_alloc_maxlevels > 0);
2455 
2456 	/*
2457 	 * For a btree shorter than the maximum height, the worst case is that
2458 	 * every level gets split and a new level is added, then while inserting
2459 	 * another entry to refill the AGFL, every level under the old root gets
2460 	 * split again. This is:
2461 	 *
2462 	 *   (full height split reservation) + (AGFL refill split height)
2463 	 * = (current height + 1) + (current height - 1)
2464 	 * = (new height) + (new height - 2)
2465 	 * = 2 * new height - 2
2466 	 *
2467 	 * For a btree of maximum height, the worst case is that every level
2468 	 * under the root gets split, then while inserting another entry to
2469 	 * refill the AGFL, every level under the root gets split again. This is
2470 	 * also:
2471 	 *
2472 	 *   2 * (current height - 1)
2473 	 * = 2 * (new height - 1)
2474 	 * = 2 * new height - 2
2475 	 */
2476 
2477 	/* space needed by-bno freespace btree */
2478 	min_free = min(bno_level + 1, mp->m_alloc_maxlevels) * 2 - 2;
2479 	/* space needed by-size freespace btree */
2480 	min_free += min(cnt_level + 1, mp->m_alloc_maxlevels) * 2 - 2;
2481 	/* space needed reverse mapping used space btree */
2482 	if (xfs_has_rmapbt(mp))
2483 		min_free += min(rmap_level + 1, mp->m_rmap_maxlevels) * 2 - 2;
2484 	return min_free;
2485 }
2486 
2487 /*
2488  * Check if the operation we are fixing up the freelist for should go ahead or
2489  * not. If we are freeing blocks, we always allow it, otherwise the allocation
2490  * is dependent on whether the size and shape of free space available will
2491  * permit the requested allocation to take place.
2492  */
2493 static bool
xfs_alloc_space_available(struct xfs_alloc_arg * args,xfs_extlen_t min_free,int flags)2494 xfs_alloc_space_available(
2495 	struct xfs_alloc_arg	*args,
2496 	xfs_extlen_t		min_free,
2497 	int			flags)
2498 {
2499 	struct xfs_perag	*pag = args->pag;
2500 	xfs_extlen_t		alloc_len, longest;
2501 	xfs_extlen_t		reservation; /* blocks that are still reserved */
2502 	int			available;
2503 	xfs_extlen_t		agflcount;
2504 
2505 	if (flags & XFS_ALLOC_FLAG_FREEING)
2506 		return true;
2507 
2508 	reservation = xfs_ag_resv_needed(pag, args->resv);
2509 
2510 	/* do we have enough contiguous free space for the allocation? */
2511 	alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
2512 	longest = xfs_alloc_longest_free_extent(pag, min_free, reservation);
2513 	if (longest < alloc_len)
2514 		return false;
2515 
2516 	/*
2517 	 * Do we have enough free space remaining for the allocation? Don't
2518 	 * account extra agfl blocks because we are about to defer free them,
2519 	 * making them unavailable until the current transaction commits.
2520 	 */
2521 	agflcount = min_t(xfs_extlen_t, pag->pagf_flcount, min_free);
2522 	available = (int)(pag->pagf_freeblks + agflcount -
2523 			  reservation - min_free - args->minleft);
2524 	if (available < (int)max(args->total, alloc_len))
2525 		return false;
2526 
2527 	/*
2528 	 * Clamp maxlen to the amount of free space available for the actual
2529 	 * extent allocation.
2530 	 */
2531 	if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
2532 		args->maxlen = available;
2533 		ASSERT(args->maxlen > 0);
2534 		ASSERT(args->maxlen >= args->minlen);
2535 	}
2536 
2537 	return true;
2538 }
2539 
2540 /*
2541  * Check the agfl fields of the agf for inconsistency or corruption.
2542  *
2543  * The original purpose was to detect an agfl header padding mismatch between
2544  * current and early v5 kernels. This problem manifests as a 1-slot size
2545  * difference between the on-disk flcount and the active [first, last] range of
2546  * a wrapped agfl.
2547  *
2548  * However, we need to use these same checks to catch agfl count corruptions
2549  * unrelated to padding. This could occur on any v4 or v5 filesystem, so either
2550  * way, we need to reset the agfl and warn the user.
2551  *
2552  * Return true if a reset is required before the agfl can be used, false
2553  * otherwise.
2554  */
2555 static bool
xfs_agfl_needs_reset(struct xfs_mount * mp,struct xfs_agf * agf)2556 xfs_agfl_needs_reset(
2557 	struct xfs_mount	*mp,
2558 	struct xfs_agf		*agf)
2559 {
2560 	uint32_t		f = be32_to_cpu(agf->agf_flfirst);
2561 	uint32_t		l = be32_to_cpu(agf->agf_fllast);
2562 	uint32_t		c = be32_to_cpu(agf->agf_flcount);
2563 	int			agfl_size = xfs_agfl_size(mp);
2564 	int			active;
2565 
2566 	/*
2567 	 * The agf read verifier catches severe corruption of these fields.
2568 	 * Repeat some sanity checks to cover a packed -> unpacked mismatch if
2569 	 * the verifier allows it.
2570 	 */
2571 	if (f >= agfl_size || l >= agfl_size)
2572 		return true;
2573 	if (c > agfl_size)
2574 		return true;
2575 
2576 	/*
2577 	 * Check consistency between the on-disk count and the active range. An
2578 	 * agfl padding mismatch manifests as an inconsistent flcount.
2579 	 */
2580 	if (c && l >= f)
2581 		active = l - f + 1;
2582 	else if (c)
2583 		active = agfl_size - f + l + 1;
2584 	else
2585 		active = 0;
2586 
2587 	return active != c;
2588 }
2589 
2590 /*
2591  * Reset the agfl to an empty state. Ignore/drop any existing blocks since the
2592  * agfl content cannot be trusted. Warn the user that a repair is required to
2593  * recover leaked blocks.
2594  *
2595  * The purpose of this mechanism is to handle filesystems affected by the agfl
2596  * header padding mismatch problem. A reset keeps the filesystem online with a
2597  * relatively minor free space accounting inconsistency rather than suffer the
2598  * inevitable crash from use of an invalid agfl block.
2599  */
2600 static void
xfs_agfl_reset(struct xfs_trans * tp,struct xfs_buf * agbp,struct xfs_perag * pag)2601 xfs_agfl_reset(
2602 	struct xfs_trans	*tp,
2603 	struct xfs_buf		*agbp,
2604 	struct xfs_perag	*pag)
2605 {
2606 	struct xfs_mount	*mp = tp->t_mountp;
2607 	struct xfs_agf		*agf = agbp->b_addr;
2608 
2609 	ASSERT(xfs_perag_agfl_needs_reset(pag));
2610 	trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
2611 
2612 	xfs_warn(mp,
2613 	       "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
2614 	       "Please unmount and run xfs_repair.",
2615 	         pag->pag_agno, pag->pagf_flcount);
2616 
2617 	agf->agf_flfirst = 0;
2618 	agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1);
2619 	agf->agf_flcount = 0;
2620 	xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
2621 				    XFS_AGF_FLCOUNT);
2622 
2623 	pag->pagf_flcount = 0;
2624 	clear_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
2625 }
2626 
2627 /*
2628  * Add the extent to the list of extents to be free at transaction end.
2629  * The list is maintained sorted (by block number).
2630  */
2631 static int
xfs_defer_extent_free(struct xfs_trans * tp,xfs_fsblock_t bno,xfs_filblks_t len,const struct xfs_owner_info * oinfo,enum xfs_ag_resv_type type,unsigned int free_flags,struct xfs_defer_pending ** dfpp)2632 xfs_defer_extent_free(
2633 	struct xfs_trans		*tp,
2634 	xfs_fsblock_t			bno,
2635 	xfs_filblks_t			len,
2636 	const struct xfs_owner_info	*oinfo,
2637 	enum xfs_ag_resv_type		type,
2638 	unsigned int			free_flags,
2639 	struct xfs_defer_pending	**dfpp)
2640 {
2641 	struct xfs_extent_free_item	*xefi;
2642 	struct xfs_mount		*mp = tp->t_mountp;
2643 
2644 	ASSERT(len <= XFS_MAX_BMBT_EXTLEN);
2645 	ASSERT(!isnullstartblock(bno));
2646 	ASSERT(!(free_flags & ~XFS_FREE_EXTENT_ALL_FLAGS));
2647 
2648 	if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbext(mp, bno, len)))
2649 		return -EFSCORRUPTED;
2650 
2651 	xefi = kmem_cache_zalloc(xfs_extfree_item_cache,
2652 			       GFP_KERNEL | __GFP_NOFAIL);
2653 	xefi->xefi_startblock = bno;
2654 	xefi->xefi_blockcount = (xfs_extlen_t)len;
2655 	xefi->xefi_agresv = type;
2656 	if (free_flags & XFS_FREE_EXTENT_SKIP_DISCARD)
2657 		xefi->xefi_flags |= XFS_EFI_SKIP_DISCARD;
2658 	if (oinfo) {
2659 		ASSERT(oinfo->oi_offset == 0);
2660 
2661 		if (oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK)
2662 			xefi->xefi_flags |= XFS_EFI_ATTR_FORK;
2663 		if (oinfo->oi_flags & XFS_OWNER_INFO_BMBT_BLOCK)
2664 			xefi->xefi_flags |= XFS_EFI_BMBT_BLOCK;
2665 		xefi->xefi_owner = oinfo->oi_owner;
2666 	} else {
2667 		xefi->xefi_owner = XFS_RMAP_OWN_NULL;
2668 	}
2669 
2670 	xfs_extent_free_defer_add(tp, xefi, dfpp);
2671 	return 0;
2672 }
2673 
2674 int
xfs_free_extent_later(struct xfs_trans * tp,xfs_fsblock_t bno,xfs_filblks_t len,const struct xfs_owner_info * oinfo,enum xfs_ag_resv_type type,unsigned int free_flags)2675 xfs_free_extent_later(
2676 	struct xfs_trans		*tp,
2677 	xfs_fsblock_t			bno,
2678 	xfs_filblks_t			len,
2679 	const struct xfs_owner_info	*oinfo,
2680 	enum xfs_ag_resv_type		type,
2681 	unsigned int			free_flags)
2682 {
2683 	struct xfs_defer_pending	*dontcare = NULL;
2684 
2685 	return xfs_defer_extent_free(tp, bno, len, oinfo, type, free_flags,
2686 			&dontcare);
2687 }
2688 
2689 /*
2690  * Set up automatic freeing of unwritten space in the filesystem.
2691  *
2692  * This function attached a paused deferred extent free item to the
2693  * transaction.  Pausing means that the EFI will be logged in the next
2694  * transaction commit, but the pending EFI will not be finished until the
2695  * pending item is unpaused.
2696  *
2697  * If the system goes down after the EFI has been persisted to the log but
2698  * before the pending item is unpaused, log recovery will find the EFI, fail to
2699  * find the EFD, and free the space.
2700  *
2701  * If the pending item is unpaused, the next transaction commit will log an EFD
2702  * without freeing the space.
2703  *
2704  * Caller must ensure that the tp, fsbno, len, oinfo, and resv flags of the
2705  * @args structure are set to the relevant values.
2706  */
2707 int
xfs_alloc_schedule_autoreap(const struct xfs_alloc_arg * args,unsigned int free_flags,struct xfs_alloc_autoreap * aarp)2708 xfs_alloc_schedule_autoreap(
2709 	const struct xfs_alloc_arg	*args,
2710 	unsigned int			free_flags,
2711 	struct xfs_alloc_autoreap	*aarp)
2712 {
2713 	int				error;
2714 
2715 	error = xfs_defer_extent_free(args->tp, args->fsbno, args->len,
2716 			&args->oinfo, args->resv, free_flags, &aarp->dfp);
2717 	if (error)
2718 		return error;
2719 
2720 	xfs_defer_item_pause(args->tp, aarp->dfp);
2721 	return 0;
2722 }
2723 
2724 /*
2725  * Cancel automatic freeing of unwritten space in the filesystem.
2726  *
2727  * Earlier, we created a paused deferred extent free item and attached it to
2728  * this transaction so that we could automatically roll back a new space
2729  * allocation if the system went down.  Now we want to cancel the paused work
2730  * item by marking the EFI stale so we don't actually free the space, unpausing
2731  * the pending item and logging an EFD.
2732  *
2733  * The caller generally should have already mapped the space into the ondisk
2734  * filesystem.  If the reserved space was partially used, the caller must call
2735  * xfs_free_extent_later to create a new EFI to free the unused space.
2736  */
2737 void
xfs_alloc_cancel_autoreap(struct xfs_trans * tp,struct xfs_alloc_autoreap * aarp)2738 xfs_alloc_cancel_autoreap(
2739 	struct xfs_trans		*tp,
2740 	struct xfs_alloc_autoreap	*aarp)
2741 {
2742 	struct xfs_defer_pending	*dfp = aarp->dfp;
2743 	struct xfs_extent_free_item	*xefi;
2744 
2745 	if (!dfp)
2746 		return;
2747 
2748 	list_for_each_entry(xefi, &dfp->dfp_work, xefi_list)
2749 		xefi->xefi_flags |= XFS_EFI_CANCELLED;
2750 
2751 	xfs_defer_item_unpause(tp, dfp);
2752 }
2753 
2754 /*
2755  * Commit automatic freeing of unwritten space in the filesystem.
2756  *
2757  * This unpauses an earlier _schedule_autoreap and commits to freeing the
2758  * allocated space.  Call this if none of the reserved space was used.
2759  */
2760 void
xfs_alloc_commit_autoreap(struct xfs_trans * tp,struct xfs_alloc_autoreap * aarp)2761 xfs_alloc_commit_autoreap(
2762 	struct xfs_trans		*tp,
2763 	struct xfs_alloc_autoreap	*aarp)
2764 {
2765 	if (aarp->dfp)
2766 		xfs_defer_item_unpause(tp, aarp->dfp);
2767 }
2768 
2769 #ifdef DEBUG
2770 /*
2771  * Check if an AGF has a free extent record whose length is equal to
2772  * args->minlen.
2773  */
2774 STATIC int
xfs_exact_minlen_extent_available(struct xfs_alloc_arg * args,struct xfs_buf * agbp,int * stat)2775 xfs_exact_minlen_extent_available(
2776 	struct xfs_alloc_arg	*args,
2777 	struct xfs_buf		*agbp,
2778 	int			*stat)
2779 {
2780 	struct xfs_btree_cur	*cnt_cur;
2781 	xfs_agblock_t		fbno;
2782 	xfs_extlen_t		flen;
2783 	int			error = 0;
2784 
2785 	cnt_cur = xfs_cntbt_init_cursor(args->mp, args->tp, agbp,
2786 					args->pag);
2787 	error = xfs_alloc_lookup_ge(cnt_cur, 0, args->minlen, stat);
2788 	if (error)
2789 		goto out;
2790 
2791 	if (*stat == 0) {
2792 		xfs_btree_mark_sick(cnt_cur);
2793 		error = -EFSCORRUPTED;
2794 		goto out;
2795 	}
2796 
2797 	error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, stat);
2798 	if (error)
2799 		goto out;
2800 
2801 	if (*stat == 1 && flen != args->minlen)
2802 		*stat = 0;
2803 
2804 out:
2805 	xfs_btree_del_cursor(cnt_cur, error);
2806 
2807 	return error;
2808 }
2809 #endif
2810 
2811 /*
2812  * Decide whether to use this allocation group for this allocation.
2813  * If so, fix up the btree freelist's size.
2814  */
2815 int			/* error */
xfs_alloc_fix_freelist(struct xfs_alloc_arg * args,uint32_t alloc_flags)2816 xfs_alloc_fix_freelist(
2817 	struct xfs_alloc_arg	*args,	/* allocation argument structure */
2818 	uint32_t		alloc_flags)
2819 {
2820 	struct xfs_mount	*mp = args->mp;
2821 	struct xfs_perag	*pag = args->pag;
2822 	struct xfs_trans	*tp = args->tp;
2823 	struct xfs_buf		*agbp = NULL;
2824 	struct xfs_buf		*agflbp = NULL;
2825 	struct xfs_alloc_arg	targs;	/* local allocation arguments */
2826 	xfs_agblock_t		bno;	/* freelist block */
2827 	xfs_extlen_t		need;	/* total blocks needed in freelist */
2828 	int			error = 0;
2829 
2830 	/* deferred ops (AGFL block frees) require permanent transactions */
2831 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
2832 
2833 	if (!xfs_perag_initialised_agf(pag)) {
2834 		error = xfs_alloc_read_agf(pag, tp, alloc_flags, &agbp);
2835 		if (error) {
2836 			/* Couldn't lock the AGF so skip this AG. */
2837 			if (error == -EAGAIN)
2838 				error = 0;
2839 			goto out_no_agbp;
2840 		}
2841 	}
2842 
2843 	/*
2844 	 * If this is a metadata preferred pag and we are user data then try
2845 	 * somewhere else if we are not being asked to try harder at this
2846 	 * point
2847 	 */
2848 	if (xfs_perag_prefers_metadata(pag) &&
2849 	    (args->datatype & XFS_ALLOC_USERDATA) &&
2850 	    (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK)) {
2851 		ASSERT(!(alloc_flags & XFS_ALLOC_FLAG_FREEING));
2852 		goto out_agbp_relse;
2853 	}
2854 
2855 	need = xfs_alloc_min_freelist(mp, pag);
2856 	if (!xfs_alloc_space_available(args, need, alloc_flags |
2857 			XFS_ALLOC_FLAG_CHECK))
2858 		goto out_agbp_relse;
2859 
2860 	/*
2861 	 * Get the a.g. freespace buffer.
2862 	 * Can fail if we're not blocking on locks, and it's held.
2863 	 */
2864 	if (!agbp) {
2865 		error = xfs_alloc_read_agf(pag, tp, alloc_flags, &agbp);
2866 		if (error) {
2867 			/* Couldn't lock the AGF so skip this AG. */
2868 			if (error == -EAGAIN)
2869 				error = 0;
2870 			goto out_no_agbp;
2871 		}
2872 	}
2873 
2874 	/* reset a padding mismatched agfl before final free space check */
2875 	if (xfs_perag_agfl_needs_reset(pag))
2876 		xfs_agfl_reset(tp, agbp, pag);
2877 
2878 	/* If there isn't enough total space or single-extent, reject it. */
2879 	need = xfs_alloc_min_freelist(mp, pag);
2880 	if (!xfs_alloc_space_available(args, need, alloc_flags))
2881 		goto out_agbp_relse;
2882 
2883 #ifdef DEBUG
2884 	if (args->alloc_minlen_only) {
2885 		int stat;
2886 
2887 		error = xfs_exact_minlen_extent_available(args, agbp, &stat);
2888 		if (error || !stat)
2889 			goto out_agbp_relse;
2890 	}
2891 #endif
2892 	/*
2893 	 * Make the freelist shorter if it's too long.
2894 	 *
2895 	 * Note that from this point onwards, we will always release the agf and
2896 	 * agfl buffers on error. This handles the case where we error out and
2897 	 * the buffers are clean or may not have been joined to the transaction
2898 	 * and hence need to be released manually. If they have been joined to
2899 	 * the transaction, then xfs_trans_brelse() will handle them
2900 	 * appropriately based on the recursion count and dirty state of the
2901 	 * buffer.
2902 	 *
2903 	 * XXX (dgc): When we have lots of free space, does this buy us
2904 	 * anything other than extra overhead when we need to put more blocks
2905 	 * back on the free list? Maybe we should only do this when space is
2906 	 * getting low or the AGFL is more than half full?
2907 	 *
2908 	 * The NOSHRINK flag prevents the AGFL from being shrunk if it's too
2909 	 * big; the NORMAP flag prevents AGFL expand/shrink operations from
2910 	 * updating the rmapbt.  Both flags are used in xfs_repair while we're
2911 	 * rebuilding the rmapbt, and neither are used by the kernel.  They're
2912 	 * both required to ensure that rmaps are correctly recorded for the
2913 	 * regenerated AGFL, bnobt, and cntbt.  See repair/phase5.c and
2914 	 * repair/rmap.c in xfsprogs for details.
2915 	 */
2916 	memset(&targs, 0, sizeof(targs));
2917 	/* struct copy below */
2918 	if (alloc_flags & XFS_ALLOC_FLAG_NORMAP)
2919 		targs.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
2920 	else
2921 		targs.oinfo = XFS_RMAP_OINFO_AG;
2922 	while (!(alloc_flags & XFS_ALLOC_FLAG_NOSHRINK) &&
2923 			pag->pagf_flcount > need) {
2924 		error = xfs_alloc_get_freelist(pag, tp, agbp, &bno, 0);
2925 		if (error)
2926 			goto out_agbp_relse;
2927 
2928 		/*
2929 		 * Defer the AGFL block free.
2930 		 *
2931 		 * This helps to prevent log reservation overruns due to too
2932 		 * many allocation operations in a transaction. AGFL frees are
2933 		 * prone to this problem because for one they are always freed
2934 		 * one at a time.  Further, an immediate AGFL block free can
2935 		 * cause a btree join and require another block free before the
2936 		 * real allocation can proceed.
2937 		 * Deferring the free disconnects freeing up the AGFL slot from
2938 		 * freeing the block.
2939 		 */
2940 		error = xfs_free_extent_later(tp,
2941 				XFS_AGB_TO_FSB(mp, args->agno, bno), 1,
2942 				&targs.oinfo, XFS_AG_RESV_AGFL, 0);
2943 		if (error)
2944 			goto out_agbp_relse;
2945 	}
2946 
2947 	targs.tp = tp;
2948 	targs.mp = mp;
2949 	targs.agbp = agbp;
2950 	targs.agno = args->agno;
2951 	targs.alignment = targs.minlen = targs.prod = 1;
2952 	targs.pag = pag;
2953 	error = xfs_alloc_read_agfl(pag, tp, &agflbp);
2954 	if (error)
2955 		goto out_agbp_relse;
2956 
2957 	/* Make the freelist longer if it's too short. */
2958 	while (pag->pagf_flcount < need) {
2959 		targs.agbno = 0;
2960 		targs.maxlen = need - pag->pagf_flcount;
2961 		targs.resv = XFS_AG_RESV_AGFL;
2962 
2963 		/* Allocate as many blocks as possible at once. */
2964 		error = xfs_alloc_ag_vextent_size(&targs, alloc_flags);
2965 		if (error)
2966 			goto out_agflbp_relse;
2967 
2968 		/*
2969 		 * Stop if we run out.  Won't happen if callers are obeying
2970 		 * the restrictions correctly.  Can happen for free calls
2971 		 * on a completely full ag.
2972 		 */
2973 		if (targs.agbno == NULLAGBLOCK) {
2974 			if (alloc_flags & XFS_ALLOC_FLAG_FREEING)
2975 				break;
2976 			goto out_agflbp_relse;
2977 		}
2978 
2979 		if (!xfs_rmap_should_skip_owner_update(&targs.oinfo)) {
2980 			error = xfs_rmap_alloc(tp, agbp, pag,
2981 				       targs.agbno, targs.len, &targs.oinfo);
2982 			if (error)
2983 				goto out_agflbp_relse;
2984 		}
2985 		error = xfs_alloc_update_counters(tp, agbp,
2986 						  -((long)(targs.len)));
2987 		if (error)
2988 			goto out_agflbp_relse;
2989 
2990 		/*
2991 		 * Put each allocated block on the list.
2992 		 */
2993 		for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
2994 			error = xfs_alloc_put_freelist(pag, tp, agbp,
2995 							agflbp, bno, 0);
2996 			if (error)
2997 				goto out_agflbp_relse;
2998 		}
2999 	}
3000 	xfs_trans_brelse(tp, agflbp);
3001 	args->agbp = agbp;
3002 	return 0;
3003 
3004 out_agflbp_relse:
3005 	xfs_trans_brelse(tp, agflbp);
3006 out_agbp_relse:
3007 	if (agbp)
3008 		xfs_trans_brelse(tp, agbp);
3009 out_no_agbp:
3010 	args->agbp = NULL;
3011 	return error;
3012 }
3013 
3014 /*
3015  * Get a block from the freelist.
3016  * Returns with the buffer for the block gotten.
3017  */
3018 int
xfs_alloc_get_freelist(struct xfs_perag * pag,struct xfs_trans * tp,struct xfs_buf * agbp,xfs_agblock_t * bnop,int btreeblk)3019 xfs_alloc_get_freelist(
3020 	struct xfs_perag	*pag,
3021 	struct xfs_trans	*tp,
3022 	struct xfs_buf		*agbp,
3023 	xfs_agblock_t		*bnop,
3024 	int			btreeblk)
3025 {
3026 	struct xfs_agf		*agf = agbp->b_addr;
3027 	struct xfs_buf		*agflbp;
3028 	xfs_agblock_t		bno;
3029 	__be32			*agfl_bno;
3030 	int			error;
3031 	uint32_t		logflags;
3032 	struct xfs_mount	*mp = tp->t_mountp;
3033 
3034 	/*
3035 	 * Freelist is empty, give up.
3036 	 */
3037 	if (!agf->agf_flcount) {
3038 		*bnop = NULLAGBLOCK;
3039 		return 0;
3040 	}
3041 	/*
3042 	 * Read the array of free blocks.
3043 	 */
3044 	error = xfs_alloc_read_agfl(pag, tp, &agflbp);
3045 	if (error)
3046 		return error;
3047 
3048 
3049 	/*
3050 	 * Get the block number and update the data structures.
3051 	 */
3052 	agfl_bno = xfs_buf_to_agfl_bno(agflbp);
3053 	bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
3054 	if (XFS_IS_CORRUPT(tp->t_mountp, !xfs_verify_agbno(pag, bno)))
3055 		return -EFSCORRUPTED;
3056 
3057 	be32_add_cpu(&agf->agf_flfirst, 1);
3058 	xfs_trans_brelse(tp, agflbp);
3059 	if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
3060 		agf->agf_flfirst = 0;
3061 
3062 	ASSERT(!xfs_perag_agfl_needs_reset(pag));
3063 	be32_add_cpu(&agf->agf_flcount, -1);
3064 	pag->pagf_flcount--;
3065 
3066 	logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
3067 	if (btreeblk) {
3068 		be32_add_cpu(&agf->agf_btreeblks, 1);
3069 		pag->pagf_btreeblks++;
3070 		logflags |= XFS_AGF_BTREEBLKS;
3071 	}
3072 
3073 	xfs_alloc_log_agf(tp, agbp, logflags);
3074 	*bnop = bno;
3075 
3076 	return 0;
3077 }
3078 
3079 /*
3080  * Log the given fields from the agf structure.
3081  */
3082 void
xfs_alloc_log_agf(struct xfs_trans * tp,struct xfs_buf * bp,uint32_t fields)3083 xfs_alloc_log_agf(
3084 	struct xfs_trans	*tp,
3085 	struct xfs_buf		*bp,
3086 	uint32_t		fields)
3087 {
3088 	int	first;		/* first byte offset */
3089 	int	last;		/* last byte offset */
3090 	static const short	offsets[] = {
3091 		offsetof(xfs_agf_t, agf_magicnum),
3092 		offsetof(xfs_agf_t, agf_versionnum),
3093 		offsetof(xfs_agf_t, agf_seqno),
3094 		offsetof(xfs_agf_t, agf_length),
3095 		offsetof(xfs_agf_t, agf_bno_root),   /* also cnt/rmap root */
3096 		offsetof(xfs_agf_t, agf_bno_level),  /* also cnt/rmap levels */
3097 		offsetof(xfs_agf_t, agf_flfirst),
3098 		offsetof(xfs_agf_t, agf_fllast),
3099 		offsetof(xfs_agf_t, agf_flcount),
3100 		offsetof(xfs_agf_t, agf_freeblks),
3101 		offsetof(xfs_agf_t, agf_longest),
3102 		offsetof(xfs_agf_t, agf_btreeblks),
3103 		offsetof(xfs_agf_t, agf_uuid),
3104 		offsetof(xfs_agf_t, agf_rmap_blocks),
3105 		offsetof(xfs_agf_t, agf_refcount_blocks),
3106 		offsetof(xfs_agf_t, agf_refcount_root),
3107 		offsetof(xfs_agf_t, agf_refcount_level),
3108 		/* needed so that we don't log the whole rest of the structure: */
3109 		offsetof(xfs_agf_t, agf_spare64),
3110 		sizeof(xfs_agf_t)
3111 	};
3112 
3113 	trace_xfs_agf(tp->t_mountp, bp->b_addr, fields, _RET_IP_);
3114 
3115 	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
3116 
3117 	xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
3118 	xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
3119 }
3120 
3121 /*
3122  * Put the block on the freelist for the allocation group.
3123  */
3124 int
xfs_alloc_put_freelist(struct xfs_perag * pag,struct xfs_trans * tp,struct xfs_buf * agbp,struct xfs_buf * agflbp,xfs_agblock_t bno,int btreeblk)3125 xfs_alloc_put_freelist(
3126 	struct xfs_perag	*pag,
3127 	struct xfs_trans	*tp,
3128 	struct xfs_buf		*agbp,
3129 	struct xfs_buf		*agflbp,
3130 	xfs_agblock_t		bno,
3131 	int			btreeblk)
3132 {
3133 	struct xfs_mount	*mp = tp->t_mountp;
3134 	struct xfs_agf		*agf = agbp->b_addr;
3135 	__be32			*blockp;
3136 	int			error;
3137 	uint32_t		logflags;
3138 	__be32			*agfl_bno;
3139 	int			startoff;
3140 
3141 	if (!agflbp) {
3142 		error = xfs_alloc_read_agfl(pag, tp, &agflbp);
3143 		if (error)
3144 			return error;
3145 	}
3146 
3147 	be32_add_cpu(&agf->agf_fllast, 1);
3148 	if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
3149 		agf->agf_fllast = 0;
3150 
3151 	ASSERT(!xfs_perag_agfl_needs_reset(pag));
3152 	be32_add_cpu(&agf->agf_flcount, 1);
3153 	pag->pagf_flcount++;
3154 
3155 	logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
3156 	if (btreeblk) {
3157 		be32_add_cpu(&agf->agf_btreeblks, -1);
3158 		pag->pagf_btreeblks--;
3159 		logflags |= XFS_AGF_BTREEBLKS;
3160 	}
3161 
3162 	xfs_alloc_log_agf(tp, agbp, logflags);
3163 
3164 	ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
3165 
3166 	agfl_bno = xfs_buf_to_agfl_bno(agflbp);
3167 	blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
3168 	*blockp = cpu_to_be32(bno);
3169 	startoff = (char *)blockp - (char *)agflbp->b_addr;
3170 
3171 	xfs_alloc_log_agf(tp, agbp, logflags);
3172 
3173 	xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
3174 	xfs_trans_log_buf(tp, agflbp, startoff,
3175 			  startoff + sizeof(xfs_agblock_t) - 1);
3176 	return 0;
3177 }
3178 
3179 /*
3180  * Check that this AGF/AGI header's sequence number and length matches the AG
3181  * number and size in fsblocks.
3182  */
3183 xfs_failaddr_t
xfs_validate_ag_length(struct xfs_buf * bp,uint32_t seqno,uint32_t length)3184 xfs_validate_ag_length(
3185 	struct xfs_buf		*bp,
3186 	uint32_t		seqno,
3187 	uint32_t		length)
3188 {
3189 	struct xfs_mount	*mp = bp->b_mount;
3190 	/*
3191 	 * During growfs operations, the perag is not fully initialised,
3192 	 * so we can't use it for any useful checking. growfs ensures we can't
3193 	 * use it by using uncached buffers that don't have the perag attached
3194 	 * so we can detect and avoid this problem.
3195 	 */
3196 	if (bp->b_pag && seqno != bp->b_pag->pag_agno)
3197 		return __this_address;
3198 
3199 	/*
3200 	 * Only the last AG in the filesystem is allowed to be shorter
3201 	 * than the AG size recorded in the superblock.
3202 	 */
3203 	if (length != mp->m_sb.sb_agblocks) {
3204 		/*
3205 		 * During growfs, the new last AG can get here before we
3206 		 * have updated the superblock. Give it a pass on the seqno
3207 		 * check.
3208 		 */
3209 		if (bp->b_pag && seqno != mp->m_sb.sb_agcount - 1)
3210 			return __this_address;
3211 		if (length < XFS_MIN_AG_BLOCKS)
3212 			return __this_address;
3213 		if (length > mp->m_sb.sb_agblocks)
3214 			return __this_address;
3215 	}
3216 
3217 	return NULL;
3218 }
3219 
3220 /*
3221  * Verify the AGF is consistent.
3222  *
3223  * We do not verify the AGFL indexes in the AGF are fully consistent here
3224  * because of issues with variable on-disk structure sizes. Instead, we check
3225  * the agfl indexes for consistency when we initialise the perag from the AGF
3226  * information after a read completes.
3227  *
3228  * If the index is inconsistent, then we mark the perag as needing an AGFL
3229  * reset. The first AGFL update performed then resets the AGFL indexes and
3230  * refills the AGFL with known good free blocks, allowing the filesystem to
3231  * continue operating normally at the cost of a few leaked free space blocks.
3232  */
3233 static xfs_failaddr_t
xfs_agf_verify(struct xfs_buf * bp)3234 xfs_agf_verify(
3235 	struct xfs_buf		*bp)
3236 {
3237 	struct xfs_mount	*mp = bp->b_mount;
3238 	struct xfs_agf		*agf = bp->b_addr;
3239 	xfs_failaddr_t		fa;
3240 	uint32_t		agf_seqno = be32_to_cpu(agf->agf_seqno);
3241 	uint32_t		agf_length = be32_to_cpu(agf->agf_length);
3242 
3243 	if (xfs_has_crc(mp)) {
3244 		if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
3245 			return __this_address;
3246 		if (!xfs_log_check_lsn(mp, be64_to_cpu(agf->agf_lsn)))
3247 			return __this_address;
3248 	}
3249 
3250 	if (!xfs_verify_magic(bp, agf->agf_magicnum))
3251 		return __this_address;
3252 
3253 	if (!XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)))
3254 		return __this_address;
3255 
3256 	/*
3257 	 * Both agf_seqno and agf_length need to validated before anything else
3258 	 * block number related in the AGF or AGFL can be checked.
3259 	 */
3260 	fa = xfs_validate_ag_length(bp, agf_seqno, agf_length);
3261 	if (fa)
3262 		return fa;
3263 
3264 	if (be32_to_cpu(agf->agf_flfirst) >= xfs_agfl_size(mp))
3265 		return __this_address;
3266 	if (be32_to_cpu(agf->agf_fllast) >= xfs_agfl_size(mp))
3267 		return __this_address;
3268 	if (be32_to_cpu(agf->agf_flcount) > xfs_agfl_size(mp))
3269 		return __this_address;
3270 
3271 	if (be32_to_cpu(agf->agf_freeblks) < be32_to_cpu(agf->agf_longest) ||
3272 	    be32_to_cpu(agf->agf_freeblks) > agf_length)
3273 		return __this_address;
3274 
3275 	if (be32_to_cpu(agf->agf_bno_level) < 1 ||
3276 	    be32_to_cpu(agf->agf_cnt_level) < 1 ||
3277 	    be32_to_cpu(agf->agf_bno_level) > mp->m_alloc_maxlevels ||
3278 	    be32_to_cpu(agf->agf_cnt_level) > mp->m_alloc_maxlevels)
3279 		return __this_address;
3280 
3281 	if (xfs_has_lazysbcount(mp) &&
3282 	    be32_to_cpu(agf->agf_btreeblks) > agf_length)
3283 		return __this_address;
3284 
3285 	if (xfs_has_rmapbt(mp)) {
3286 		if (be32_to_cpu(agf->agf_rmap_blocks) > agf_length)
3287 			return __this_address;
3288 
3289 		if (be32_to_cpu(agf->agf_rmap_level) < 1 ||
3290 		    be32_to_cpu(agf->agf_rmap_level) > mp->m_rmap_maxlevels)
3291 			return __this_address;
3292 	}
3293 
3294 	if (xfs_has_reflink(mp)) {
3295 		if (be32_to_cpu(agf->agf_refcount_blocks) > agf_length)
3296 			return __this_address;
3297 
3298 		if (be32_to_cpu(agf->agf_refcount_level) < 1 ||
3299 		    be32_to_cpu(agf->agf_refcount_level) > mp->m_refc_maxlevels)
3300 			return __this_address;
3301 	}
3302 
3303 	return NULL;
3304 }
3305 
3306 static void
xfs_agf_read_verify(struct xfs_buf * bp)3307 xfs_agf_read_verify(
3308 	struct xfs_buf	*bp)
3309 {
3310 	struct xfs_mount *mp = bp->b_mount;
3311 	xfs_failaddr_t	fa;
3312 
3313 	if (xfs_has_crc(mp) &&
3314 	    !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
3315 		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
3316 	else {
3317 		fa = xfs_agf_verify(bp);
3318 		if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_ALLOC_READ_AGF))
3319 			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
3320 	}
3321 }
3322 
3323 static void
xfs_agf_write_verify(struct xfs_buf * bp)3324 xfs_agf_write_verify(
3325 	struct xfs_buf	*bp)
3326 {
3327 	struct xfs_mount	*mp = bp->b_mount;
3328 	struct xfs_buf_log_item	*bip = bp->b_log_item;
3329 	struct xfs_agf		*agf = bp->b_addr;
3330 	xfs_failaddr_t		fa;
3331 
3332 	fa = xfs_agf_verify(bp);
3333 	if (fa) {
3334 		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
3335 		return;
3336 	}
3337 
3338 	if (!xfs_has_crc(mp))
3339 		return;
3340 
3341 	if (bip)
3342 		agf->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
3343 
3344 	xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
3345 }
3346 
3347 const struct xfs_buf_ops xfs_agf_buf_ops = {
3348 	.name = "xfs_agf",
3349 	.magic = { cpu_to_be32(XFS_AGF_MAGIC), cpu_to_be32(XFS_AGF_MAGIC) },
3350 	.verify_read = xfs_agf_read_verify,
3351 	.verify_write = xfs_agf_write_verify,
3352 	.verify_struct = xfs_agf_verify,
3353 };
3354 
3355 /*
3356  * Read in the allocation group header (free/alloc section).
3357  */
3358 int
xfs_read_agf(struct xfs_perag * pag,struct xfs_trans * tp,int flags,struct xfs_buf ** agfbpp)3359 xfs_read_agf(
3360 	struct xfs_perag	*pag,
3361 	struct xfs_trans	*tp,
3362 	int			flags,
3363 	struct xfs_buf		**agfbpp)
3364 {
3365 	struct xfs_mount	*mp = pag->pag_mount;
3366 	int			error;
3367 
3368 	trace_xfs_read_agf(pag->pag_mount, pag->pag_agno);
3369 
3370 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
3371 			XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGF_DADDR(mp)),
3372 			XFS_FSS_TO_BB(mp, 1), flags, agfbpp, &xfs_agf_buf_ops);
3373 	if (xfs_metadata_is_sick(error))
3374 		xfs_ag_mark_sick(pag, XFS_SICK_AG_AGF);
3375 	if (error)
3376 		return error;
3377 
3378 	xfs_buf_set_ref(*agfbpp, XFS_AGF_REF);
3379 	return 0;
3380 }
3381 
3382 /*
3383  * Read in the allocation group header (free/alloc section) and initialise the
3384  * perag structure if necessary. If the caller provides @agfbpp, then return the
3385  * locked buffer to the caller, otherwise free it.
3386  */
3387 int
xfs_alloc_read_agf(struct xfs_perag * pag,struct xfs_trans * tp,int flags,struct xfs_buf ** agfbpp)3388 xfs_alloc_read_agf(
3389 	struct xfs_perag	*pag,
3390 	struct xfs_trans	*tp,
3391 	int			flags,
3392 	struct xfs_buf		**agfbpp)
3393 {
3394 	struct xfs_buf		*agfbp;
3395 	struct xfs_agf		*agf;
3396 	int			error;
3397 	int			allocbt_blks;
3398 
3399 	trace_xfs_alloc_read_agf(pag->pag_mount, pag->pag_agno);
3400 
3401 	/* We don't support trylock when freeing. */
3402 	ASSERT((flags & (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK)) !=
3403 			(XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK));
3404 	error = xfs_read_agf(pag, tp,
3405 			(flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
3406 			&agfbp);
3407 	if (error)
3408 		return error;
3409 
3410 	agf = agfbp->b_addr;
3411 	if (!xfs_perag_initialised_agf(pag)) {
3412 		pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
3413 		pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
3414 		pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
3415 		pag->pagf_longest = be32_to_cpu(agf->agf_longest);
3416 		pag->pagf_bno_level = be32_to_cpu(agf->agf_bno_level);
3417 		pag->pagf_cnt_level = be32_to_cpu(agf->agf_cnt_level);
3418 		pag->pagf_rmap_level = be32_to_cpu(agf->agf_rmap_level);
3419 		pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
3420 		if (xfs_agfl_needs_reset(pag->pag_mount, agf))
3421 			set_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
3422 		else
3423 			clear_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
3424 
3425 		/*
3426 		 * Update the in-core allocbt counter. Filter out the rmapbt
3427 		 * subset of the btreeblks counter because the rmapbt is managed
3428 		 * by perag reservation. Subtract one for the rmapbt root block
3429 		 * because the rmap counter includes it while the btreeblks
3430 		 * counter only tracks non-root blocks.
3431 		 */
3432 		allocbt_blks = pag->pagf_btreeblks;
3433 		if (xfs_has_rmapbt(pag->pag_mount))
3434 			allocbt_blks -= be32_to_cpu(agf->agf_rmap_blocks) - 1;
3435 		if (allocbt_blks > 0)
3436 			atomic64_add(allocbt_blks,
3437 					&pag->pag_mount->m_allocbt_blks);
3438 
3439 		set_bit(XFS_AGSTATE_AGF_INIT, &pag->pag_opstate);
3440 	}
3441 #ifdef DEBUG
3442 	else if (!xfs_is_shutdown(pag->pag_mount)) {
3443 		ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
3444 		ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
3445 		ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
3446 		ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
3447 		ASSERT(pag->pagf_bno_level == be32_to_cpu(agf->agf_bno_level));
3448 		ASSERT(pag->pagf_cnt_level == be32_to_cpu(agf->agf_cnt_level));
3449 	}
3450 #endif
3451 	if (agfbpp)
3452 		*agfbpp = agfbp;
3453 	else
3454 		xfs_trans_brelse(tp, agfbp);
3455 	return 0;
3456 }
3457 
3458 /*
3459  * Pre-proces allocation arguments to set initial state that we don't require
3460  * callers to set up correctly, as well as bounds check the allocation args
3461  * that are set up.
3462  */
3463 static int
xfs_alloc_vextent_check_args(struct xfs_alloc_arg * args,xfs_fsblock_t target,xfs_agnumber_t * minimum_agno)3464 xfs_alloc_vextent_check_args(
3465 	struct xfs_alloc_arg	*args,
3466 	xfs_fsblock_t		target,
3467 	xfs_agnumber_t		*minimum_agno)
3468 {
3469 	struct xfs_mount	*mp = args->mp;
3470 	xfs_agblock_t		agsize;
3471 
3472 	args->fsbno = NULLFSBLOCK;
3473 
3474 	*minimum_agno = 0;
3475 	if (args->tp->t_highest_agno != NULLAGNUMBER)
3476 		*minimum_agno = args->tp->t_highest_agno;
3477 
3478 	/*
3479 	 * Just fix this up, for the case where the last a.g. is shorter
3480 	 * (or there's only one a.g.) and the caller couldn't easily figure
3481 	 * that out (xfs_bmap_alloc).
3482 	 */
3483 	agsize = mp->m_sb.sb_agblocks;
3484 	if (args->maxlen > agsize)
3485 		args->maxlen = agsize;
3486 	if (args->alignment == 0)
3487 		args->alignment = 1;
3488 
3489 	ASSERT(args->minlen > 0);
3490 	ASSERT(args->maxlen > 0);
3491 	ASSERT(args->alignment > 0);
3492 	ASSERT(args->resv != XFS_AG_RESV_AGFL);
3493 
3494 	ASSERT(XFS_FSB_TO_AGNO(mp, target) < mp->m_sb.sb_agcount);
3495 	ASSERT(XFS_FSB_TO_AGBNO(mp, target) < agsize);
3496 	ASSERT(args->minlen <= args->maxlen);
3497 	ASSERT(args->minlen <= agsize);
3498 	ASSERT(args->mod < args->prod);
3499 
3500 	if (XFS_FSB_TO_AGNO(mp, target) >= mp->m_sb.sb_agcount ||
3501 	    XFS_FSB_TO_AGBNO(mp, target) >= agsize ||
3502 	    args->minlen > args->maxlen || args->minlen > agsize ||
3503 	    args->mod >= args->prod) {
3504 		trace_xfs_alloc_vextent_badargs(args);
3505 		return -ENOSPC;
3506 	}
3507 
3508 	if (args->agno != NULLAGNUMBER && *minimum_agno > args->agno) {
3509 		trace_xfs_alloc_vextent_skip_deadlock(args);
3510 		return -ENOSPC;
3511 	}
3512 	return 0;
3513 
3514 }
3515 
3516 /*
3517  * Prepare an AG for allocation. If the AG is not prepared to accept the
3518  * allocation, return failure.
3519  *
3520  * XXX(dgc): The complexity of "need_pag" will go away as all caller paths are
3521  * modified to hold their own perag references.
3522  */
3523 static int
xfs_alloc_vextent_prepare_ag(struct xfs_alloc_arg * args,uint32_t alloc_flags)3524 xfs_alloc_vextent_prepare_ag(
3525 	struct xfs_alloc_arg	*args,
3526 	uint32_t		alloc_flags)
3527 {
3528 	bool			need_pag = !args->pag;
3529 	int			error;
3530 
3531 	if (need_pag)
3532 		args->pag = xfs_perag_get(args->mp, args->agno);
3533 
3534 	args->agbp = NULL;
3535 	error = xfs_alloc_fix_freelist(args, alloc_flags);
3536 	if (error) {
3537 		trace_xfs_alloc_vextent_nofix(args);
3538 		if (need_pag)
3539 			xfs_perag_put(args->pag);
3540 		args->agbno = NULLAGBLOCK;
3541 		return error;
3542 	}
3543 	if (!args->agbp) {
3544 		/* cannot allocate in this AG at all */
3545 		trace_xfs_alloc_vextent_noagbp(args);
3546 		args->agbno = NULLAGBLOCK;
3547 		return 0;
3548 	}
3549 	args->wasfromfl = 0;
3550 	return 0;
3551 }
3552 
3553 /*
3554  * Post-process allocation results to account for the allocation if it succeed
3555  * and set the allocated block number correctly for the caller.
3556  *
3557  * XXX: we should really be returning ENOSPC for ENOSPC, not
3558  * hiding it behind a "successful" NULLFSBLOCK allocation.
3559  */
3560 static int
xfs_alloc_vextent_finish(struct xfs_alloc_arg * args,xfs_agnumber_t minimum_agno,int alloc_error,bool drop_perag)3561 xfs_alloc_vextent_finish(
3562 	struct xfs_alloc_arg	*args,
3563 	xfs_agnumber_t		minimum_agno,
3564 	int			alloc_error,
3565 	bool			drop_perag)
3566 {
3567 	struct xfs_mount	*mp = args->mp;
3568 	int			error = 0;
3569 
3570 	/*
3571 	 * We can end up here with a locked AGF. If we failed, the caller is
3572 	 * likely going to try to allocate again with different parameters, and
3573 	 * that can widen the AGs that are searched for free space. If we have
3574 	 * to do BMBT block allocation, we have to do a new allocation.
3575 	 *
3576 	 * Hence leaving this function with the AGF locked opens up potential
3577 	 * ABBA AGF deadlocks because a future allocation attempt in this
3578 	 * transaction may attempt to lock a lower number AGF.
3579 	 *
3580 	 * We can't release the AGF until the transaction is commited, so at
3581 	 * this point we must update the "first allocation" tracker to point at
3582 	 * this AG if the tracker is empty or points to a lower AG. This allows
3583 	 * the next allocation attempt to be modified appropriately to avoid
3584 	 * deadlocks.
3585 	 */
3586 	if (args->agbp &&
3587 	    (args->tp->t_highest_agno == NULLAGNUMBER ||
3588 	     args->agno > minimum_agno))
3589 		args->tp->t_highest_agno = args->agno;
3590 
3591 	/*
3592 	 * If the allocation failed with an error or we had an ENOSPC result,
3593 	 * preserve the returned error whilst also marking the allocation result
3594 	 * as "no extent allocated". This ensures that callers that fail to
3595 	 * capture the error will still treat it as a failed allocation.
3596 	 */
3597 	if (alloc_error || args->agbno == NULLAGBLOCK) {
3598 		args->fsbno = NULLFSBLOCK;
3599 		error = alloc_error;
3600 		goto out_drop_perag;
3601 	}
3602 
3603 	args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
3604 
3605 	ASSERT(args->len >= args->minlen);
3606 	ASSERT(args->len <= args->maxlen);
3607 	ASSERT(args->agbno % args->alignment == 0);
3608 	XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno), args->len);
3609 
3610 	/* if not file data, insert new block into the reverse map btree */
3611 	if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
3612 		error = xfs_rmap_alloc(args->tp, args->agbp, args->pag,
3613 				       args->agbno, args->len, &args->oinfo);
3614 		if (error)
3615 			goto out_drop_perag;
3616 	}
3617 
3618 	if (!args->wasfromfl) {
3619 		error = xfs_alloc_update_counters(args->tp, args->agbp,
3620 						  -((long)(args->len)));
3621 		if (error)
3622 			goto out_drop_perag;
3623 
3624 		ASSERT(!xfs_extent_busy_search(mp, args->pag, args->agbno,
3625 				args->len));
3626 	}
3627 
3628 	xfs_ag_resv_alloc_extent(args->pag, args->resv, args);
3629 
3630 	XFS_STATS_INC(mp, xs_allocx);
3631 	XFS_STATS_ADD(mp, xs_allocb, args->len);
3632 
3633 	trace_xfs_alloc_vextent_finish(args);
3634 
3635 out_drop_perag:
3636 	if (drop_perag && args->pag) {
3637 		xfs_perag_rele(args->pag);
3638 		args->pag = NULL;
3639 	}
3640 	return error;
3641 }
3642 
3643 /*
3644  * Allocate within a single AG only. This uses a best-fit length algorithm so if
3645  * you need an exact sized allocation without locality constraints, this is the
3646  * fastest way to do it.
3647  *
3648  * Caller is expected to hold a perag reference in args->pag.
3649  */
3650 int
xfs_alloc_vextent_this_ag(struct xfs_alloc_arg * args,xfs_agnumber_t agno)3651 xfs_alloc_vextent_this_ag(
3652 	struct xfs_alloc_arg	*args,
3653 	xfs_agnumber_t		agno)
3654 {
3655 	struct xfs_mount	*mp = args->mp;
3656 	xfs_agnumber_t		minimum_agno;
3657 	uint32_t		alloc_flags = 0;
3658 	int			error;
3659 
3660 	ASSERT(args->pag != NULL);
3661 	ASSERT(args->pag->pag_agno == agno);
3662 
3663 	args->agno = agno;
3664 	args->agbno = 0;
3665 
3666 	trace_xfs_alloc_vextent_this_ag(args);
3667 
3668 	error = xfs_alloc_vextent_check_args(args, XFS_AGB_TO_FSB(mp, agno, 0),
3669 			&minimum_agno);
3670 	if (error) {
3671 		if (error == -ENOSPC)
3672 			return 0;
3673 		return error;
3674 	}
3675 
3676 	error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
3677 	if (!error && args->agbp)
3678 		error = xfs_alloc_ag_vextent_size(args, alloc_flags);
3679 
3680 	return xfs_alloc_vextent_finish(args, minimum_agno, error, false);
3681 }
3682 
3683 /*
3684  * Iterate all AGs trying to allocate an extent starting from @start_ag.
3685  *
3686  * If the incoming allocation type is XFS_ALLOCTYPE_NEAR_BNO, it means the
3687  * allocation attempts in @start_agno have locality information. If we fail to
3688  * allocate in that AG, then we revert to anywhere-in-AG for all the other AGs
3689  * we attempt to allocation in as there is no locality optimisation possible for
3690  * those allocations.
3691  *
3692  * On return, args->pag may be left referenced if we finish before the "all
3693  * failed" return point. The allocation finish still needs the perag, and
3694  * so the caller will release it once they've finished the allocation.
3695  *
3696  * When we wrap the AG iteration at the end of the filesystem, we have to be
3697  * careful not to wrap into AGs below ones we already have locked in the
3698  * transaction if we are doing a blocking iteration. This will result in an
3699  * out-of-order locking of AGFs and hence can cause deadlocks.
3700  */
3701 static int
xfs_alloc_vextent_iterate_ags(struct xfs_alloc_arg * args,xfs_agnumber_t minimum_agno,xfs_agnumber_t start_agno,xfs_agblock_t target_agbno,uint32_t alloc_flags)3702 xfs_alloc_vextent_iterate_ags(
3703 	struct xfs_alloc_arg	*args,
3704 	xfs_agnumber_t		minimum_agno,
3705 	xfs_agnumber_t		start_agno,
3706 	xfs_agblock_t		target_agbno,
3707 	uint32_t		alloc_flags)
3708 {
3709 	struct xfs_mount	*mp = args->mp;
3710 	xfs_agnumber_t		restart_agno = minimum_agno;
3711 	xfs_agnumber_t		agno;
3712 	int			error = 0;
3713 
3714 	if (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK)
3715 		restart_agno = 0;
3716 restart:
3717 	for_each_perag_wrap_range(mp, start_agno, restart_agno,
3718 			mp->m_sb.sb_agcount, agno, args->pag) {
3719 		args->agno = agno;
3720 		error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
3721 		if (error)
3722 			break;
3723 		if (!args->agbp) {
3724 			trace_xfs_alloc_vextent_loopfailed(args);
3725 			continue;
3726 		}
3727 
3728 		/*
3729 		 * Allocation is supposed to succeed now, so break out of the
3730 		 * loop regardless of whether we succeed or not.
3731 		 */
3732 		if (args->agno == start_agno && target_agbno) {
3733 			args->agbno = target_agbno;
3734 			error = xfs_alloc_ag_vextent_near(args, alloc_flags);
3735 		} else {
3736 			args->agbno = 0;
3737 			error = xfs_alloc_ag_vextent_size(args, alloc_flags);
3738 		}
3739 		break;
3740 	}
3741 	if (error) {
3742 		xfs_perag_rele(args->pag);
3743 		args->pag = NULL;
3744 		return error;
3745 	}
3746 	if (args->agbp)
3747 		return 0;
3748 
3749 	/*
3750 	 * We didn't find an AG we can alloation from. If we were given
3751 	 * constraining flags by the caller, drop them and retry the allocation
3752 	 * without any constraints being set.
3753 	 */
3754 	if (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK) {
3755 		alloc_flags &= ~XFS_ALLOC_FLAG_TRYLOCK;
3756 		restart_agno = minimum_agno;
3757 		goto restart;
3758 	}
3759 
3760 	ASSERT(args->pag == NULL);
3761 	trace_xfs_alloc_vextent_allfailed(args);
3762 	return 0;
3763 }
3764 
3765 /*
3766  * Iterate from the AGs from the start AG to the end of the filesystem, trying
3767  * to allocate blocks. It starts with a near allocation attempt in the initial
3768  * AG, then falls back to anywhere-in-ag after the first AG fails. It will wrap
3769  * back to zero if allowed by previous allocations in this transaction,
3770  * otherwise will wrap back to the start AG and run a second blocking pass to
3771  * the end of the filesystem.
3772  */
3773 int
xfs_alloc_vextent_start_ag(struct xfs_alloc_arg * args,xfs_fsblock_t target)3774 xfs_alloc_vextent_start_ag(
3775 	struct xfs_alloc_arg	*args,
3776 	xfs_fsblock_t		target)
3777 {
3778 	struct xfs_mount	*mp = args->mp;
3779 	xfs_agnumber_t		minimum_agno;
3780 	xfs_agnumber_t		start_agno;
3781 	xfs_agnumber_t		rotorstep = xfs_rotorstep;
3782 	bool			bump_rotor = false;
3783 	uint32_t		alloc_flags = XFS_ALLOC_FLAG_TRYLOCK;
3784 	int			error;
3785 
3786 	ASSERT(args->pag == NULL);
3787 
3788 	args->agno = NULLAGNUMBER;
3789 	args->agbno = NULLAGBLOCK;
3790 
3791 	trace_xfs_alloc_vextent_start_ag(args);
3792 
3793 	error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3794 	if (error) {
3795 		if (error == -ENOSPC)
3796 			return 0;
3797 		return error;
3798 	}
3799 
3800 	if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) &&
3801 	    xfs_is_inode32(mp)) {
3802 		target = XFS_AGB_TO_FSB(mp,
3803 				((mp->m_agfrotor / rotorstep) %
3804 				mp->m_sb.sb_agcount), 0);
3805 		bump_rotor = 1;
3806 	}
3807 
3808 	start_agno = max(minimum_agno, XFS_FSB_TO_AGNO(mp, target));
3809 	error = xfs_alloc_vextent_iterate_ags(args, minimum_agno, start_agno,
3810 			XFS_FSB_TO_AGBNO(mp, target), alloc_flags);
3811 
3812 	if (bump_rotor) {
3813 		if (args->agno == start_agno)
3814 			mp->m_agfrotor = (mp->m_agfrotor + 1) %
3815 				(mp->m_sb.sb_agcount * rotorstep);
3816 		else
3817 			mp->m_agfrotor = (args->agno * rotorstep + 1) %
3818 				(mp->m_sb.sb_agcount * rotorstep);
3819 	}
3820 
3821 	return xfs_alloc_vextent_finish(args, minimum_agno, error, true);
3822 }
3823 
3824 /*
3825  * Iterate from the agno indicated via @target through to the end of the
3826  * filesystem attempting blocking allocation. This does not wrap or try a second
3827  * pass, so will not recurse into AGs lower than indicated by the target.
3828  */
3829 int
xfs_alloc_vextent_first_ag(struct xfs_alloc_arg * args,xfs_fsblock_t target)3830 xfs_alloc_vextent_first_ag(
3831 	struct xfs_alloc_arg	*args,
3832 	xfs_fsblock_t		target)
3833  {
3834 	struct xfs_mount	*mp = args->mp;
3835 	xfs_agnumber_t		minimum_agno;
3836 	xfs_agnumber_t		start_agno;
3837 	uint32_t		alloc_flags = XFS_ALLOC_FLAG_TRYLOCK;
3838 	int			error;
3839 
3840 	ASSERT(args->pag == NULL);
3841 
3842 	args->agno = NULLAGNUMBER;
3843 	args->agbno = NULLAGBLOCK;
3844 
3845 	trace_xfs_alloc_vextent_first_ag(args);
3846 
3847 	error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3848 	if (error) {
3849 		if (error == -ENOSPC)
3850 			return 0;
3851 		return error;
3852 	}
3853 
3854 	start_agno = max(minimum_agno, XFS_FSB_TO_AGNO(mp, target));
3855 	error = xfs_alloc_vextent_iterate_ags(args, minimum_agno, start_agno,
3856 			XFS_FSB_TO_AGBNO(mp, target), alloc_flags);
3857 	return xfs_alloc_vextent_finish(args, minimum_agno, error, true);
3858 }
3859 
3860 /*
3861  * Allocate at the exact block target or fail. Caller is expected to hold a
3862  * perag reference in args->pag.
3863  */
3864 int
xfs_alloc_vextent_exact_bno(struct xfs_alloc_arg * args,xfs_fsblock_t target)3865 xfs_alloc_vextent_exact_bno(
3866 	struct xfs_alloc_arg	*args,
3867 	xfs_fsblock_t		target)
3868 {
3869 	struct xfs_mount	*mp = args->mp;
3870 	xfs_agnumber_t		minimum_agno;
3871 	int			error;
3872 
3873 	ASSERT(args->pag != NULL);
3874 	ASSERT(args->pag->pag_agno == XFS_FSB_TO_AGNO(mp, target));
3875 
3876 	args->agno = XFS_FSB_TO_AGNO(mp, target);
3877 	args->agbno = XFS_FSB_TO_AGBNO(mp, target);
3878 
3879 	trace_xfs_alloc_vextent_exact_bno(args);
3880 
3881 	error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3882 	if (error) {
3883 		if (error == -ENOSPC)
3884 			return 0;
3885 		return error;
3886 	}
3887 
3888 	error = xfs_alloc_vextent_prepare_ag(args, 0);
3889 	if (!error && args->agbp)
3890 		error = xfs_alloc_ag_vextent_exact(args);
3891 
3892 	return xfs_alloc_vextent_finish(args, minimum_agno, error, false);
3893 }
3894 
3895 /*
3896  * Allocate an extent as close to the target as possible. If there are not
3897  * viable candidates in the AG, then fail the allocation.
3898  *
3899  * Caller may or may not have a per-ag reference in args->pag.
3900  */
3901 int
xfs_alloc_vextent_near_bno(struct xfs_alloc_arg * args,xfs_fsblock_t target)3902 xfs_alloc_vextent_near_bno(
3903 	struct xfs_alloc_arg	*args,
3904 	xfs_fsblock_t		target)
3905 {
3906 	struct xfs_mount	*mp = args->mp;
3907 	xfs_agnumber_t		minimum_agno;
3908 	bool			needs_perag = args->pag == NULL;
3909 	uint32_t		alloc_flags = 0;
3910 	int			error;
3911 
3912 	if (!needs_perag)
3913 		ASSERT(args->pag->pag_agno == XFS_FSB_TO_AGNO(mp, target));
3914 
3915 	args->agno = XFS_FSB_TO_AGNO(mp, target);
3916 	args->agbno = XFS_FSB_TO_AGBNO(mp, target);
3917 
3918 	trace_xfs_alloc_vextent_near_bno(args);
3919 
3920 	error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3921 	if (error) {
3922 		if (error == -ENOSPC)
3923 			return 0;
3924 		return error;
3925 	}
3926 
3927 	if (needs_perag)
3928 		args->pag = xfs_perag_grab(mp, args->agno);
3929 
3930 	error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
3931 	if (!error && args->agbp)
3932 		error = xfs_alloc_ag_vextent_near(args, alloc_flags);
3933 
3934 	return xfs_alloc_vextent_finish(args, minimum_agno, error, needs_perag);
3935 }
3936 
3937 /* Ensure that the freelist is at full capacity. */
3938 int
xfs_free_extent_fix_freelist(struct xfs_trans * tp,struct xfs_perag * pag,struct xfs_buf ** agbp)3939 xfs_free_extent_fix_freelist(
3940 	struct xfs_trans	*tp,
3941 	struct xfs_perag	*pag,
3942 	struct xfs_buf		**agbp)
3943 {
3944 	struct xfs_alloc_arg	args;
3945 	int			error;
3946 
3947 	memset(&args, 0, sizeof(struct xfs_alloc_arg));
3948 	args.tp = tp;
3949 	args.mp = tp->t_mountp;
3950 	args.agno = pag->pag_agno;
3951 	args.pag = pag;
3952 
3953 	/*
3954 	 * validate that the block number is legal - the enables us to detect
3955 	 * and handle a silent filesystem corruption rather than crashing.
3956 	 */
3957 	if (args.agno >= args.mp->m_sb.sb_agcount)
3958 		return -EFSCORRUPTED;
3959 
3960 	error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
3961 	if (error)
3962 		return error;
3963 
3964 	*agbp = args.agbp;
3965 	return 0;
3966 }
3967 
3968 /*
3969  * Free an extent.
3970  * Just break up the extent address and hand off to xfs_free_ag_extent
3971  * after fixing up the freelist.
3972  */
3973 int
__xfs_free_extent(struct xfs_trans * tp,struct xfs_perag * pag,xfs_agblock_t agbno,xfs_extlen_t len,const struct xfs_owner_info * oinfo,enum xfs_ag_resv_type type,bool skip_discard)3974 __xfs_free_extent(
3975 	struct xfs_trans		*tp,
3976 	struct xfs_perag		*pag,
3977 	xfs_agblock_t			agbno,
3978 	xfs_extlen_t			len,
3979 	const struct xfs_owner_info	*oinfo,
3980 	enum xfs_ag_resv_type		type,
3981 	bool				skip_discard)
3982 {
3983 	struct xfs_mount		*mp = tp->t_mountp;
3984 	struct xfs_buf			*agbp;
3985 	struct xfs_agf			*agf;
3986 	int				error;
3987 	unsigned int			busy_flags = 0;
3988 
3989 	ASSERT(len != 0);
3990 	ASSERT(type != XFS_AG_RESV_AGFL);
3991 
3992 	if (XFS_TEST_ERROR(false, mp,
3993 			XFS_ERRTAG_FREE_EXTENT))
3994 		return -EIO;
3995 
3996 	error = xfs_free_extent_fix_freelist(tp, pag, &agbp);
3997 	if (error) {
3998 		if (xfs_metadata_is_sick(error))
3999 			xfs_ag_mark_sick(pag, XFS_SICK_AG_BNOBT);
4000 		return error;
4001 	}
4002 
4003 	agf = agbp->b_addr;
4004 
4005 	if (XFS_IS_CORRUPT(mp, agbno >= mp->m_sb.sb_agblocks)) {
4006 		xfs_ag_mark_sick(pag, XFS_SICK_AG_BNOBT);
4007 		error = -EFSCORRUPTED;
4008 		goto err_release;
4009 	}
4010 
4011 	/* validate the extent size is legal now we have the agf locked */
4012 	if (XFS_IS_CORRUPT(mp, agbno + len > be32_to_cpu(agf->agf_length))) {
4013 		xfs_ag_mark_sick(pag, XFS_SICK_AG_BNOBT);
4014 		error = -EFSCORRUPTED;
4015 		goto err_release;
4016 	}
4017 
4018 	error = xfs_free_ag_extent(tp, agbp, pag->pag_agno, agbno, len, oinfo,
4019 			type);
4020 	if (error)
4021 		goto err_release;
4022 
4023 	if (skip_discard)
4024 		busy_flags |= XFS_EXTENT_BUSY_SKIP_DISCARD;
4025 	xfs_extent_busy_insert(tp, pag, agbno, len, busy_flags);
4026 	return 0;
4027 
4028 err_release:
4029 	xfs_trans_brelse(tp, agbp);
4030 	return error;
4031 }
4032 
4033 struct xfs_alloc_query_range_info {
4034 	xfs_alloc_query_range_fn	fn;
4035 	void				*priv;
4036 };
4037 
4038 /* Format btree record and pass to our callback. */
4039 STATIC int
xfs_alloc_query_range_helper(struct xfs_btree_cur * cur,const union xfs_btree_rec * rec,void * priv)4040 xfs_alloc_query_range_helper(
4041 	struct xfs_btree_cur		*cur,
4042 	const union xfs_btree_rec	*rec,
4043 	void				*priv)
4044 {
4045 	struct xfs_alloc_query_range_info	*query = priv;
4046 	struct xfs_alloc_rec_incore		irec;
4047 	xfs_failaddr_t				fa;
4048 
4049 	xfs_alloc_btrec_to_irec(rec, &irec);
4050 	fa = xfs_alloc_check_irec(cur->bc_ag.pag, &irec);
4051 	if (fa)
4052 		return xfs_alloc_complain_bad_rec(cur, fa, &irec);
4053 
4054 	return query->fn(cur, &irec, query->priv);
4055 }
4056 
4057 /* Find all free space within a given range of blocks. */
4058 int
xfs_alloc_query_range(struct xfs_btree_cur * cur,const struct xfs_alloc_rec_incore * low_rec,const struct xfs_alloc_rec_incore * high_rec,xfs_alloc_query_range_fn fn,void * priv)4059 xfs_alloc_query_range(
4060 	struct xfs_btree_cur			*cur,
4061 	const struct xfs_alloc_rec_incore	*low_rec,
4062 	const struct xfs_alloc_rec_incore	*high_rec,
4063 	xfs_alloc_query_range_fn		fn,
4064 	void					*priv)
4065 {
4066 	union xfs_btree_irec			low_brec = { .a = *low_rec };
4067 	union xfs_btree_irec			high_brec = { .a = *high_rec };
4068 	struct xfs_alloc_query_range_info	query = { .priv = priv, .fn = fn };
4069 
4070 	ASSERT(xfs_btree_is_bno(cur->bc_ops));
4071 	return xfs_btree_query_range(cur, &low_brec, &high_brec,
4072 			xfs_alloc_query_range_helper, &query);
4073 }
4074 
4075 /* Find all free space records. */
4076 int
xfs_alloc_query_all(struct xfs_btree_cur * cur,xfs_alloc_query_range_fn fn,void * priv)4077 xfs_alloc_query_all(
4078 	struct xfs_btree_cur			*cur,
4079 	xfs_alloc_query_range_fn		fn,
4080 	void					*priv)
4081 {
4082 	struct xfs_alloc_query_range_info	query;
4083 
4084 	ASSERT(xfs_btree_is_bno(cur->bc_ops));
4085 	query.priv = priv;
4086 	query.fn = fn;
4087 	return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
4088 }
4089 
4090 /*
4091  * Scan part of the keyspace of the free space and tell us if the area has no
4092  * records, is fully mapped by records, or is partially filled.
4093  */
4094 int
xfs_alloc_has_records(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,enum xbtree_recpacking * outcome)4095 xfs_alloc_has_records(
4096 	struct xfs_btree_cur	*cur,
4097 	xfs_agblock_t		bno,
4098 	xfs_extlen_t		len,
4099 	enum xbtree_recpacking	*outcome)
4100 {
4101 	union xfs_btree_irec	low;
4102 	union xfs_btree_irec	high;
4103 
4104 	memset(&low, 0, sizeof(low));
4105 	low.a.ar_startblock = bno;
4106 	memset(&high, 0xFF, sizeof(high));
4107 	high.a.ar_startblock = bno + len - 1;
4108 
4109 	return xfs_btree_has_records(cur, &low, &high, NULL, outcome);
4110 }
4111 
4112 /*
4113  * Walk all the blocks in the AGFL.  The @walk_fn can return any negative
4114  * error code or XFS_ITER_*.
4115  */
4116 int
xfs_agfl_walk(struct xfs_mount * mp,struct xfs_agf * agf,struct xfs_buf * agflbp,xfs_agfl_walk_fn walk_fn,void * priv)4117 xfs_agfl_walk(
4118 	struct xfs_mount	*mp,
4119 	struct xfs_agf		*agf,
4120 	struct xfs_buf		*agflbp,
4121 	xfs_agfl_walk_fn	walk_fn,
4122 	void			*priv)
4123 {
4124 	__be32			*agfl_bno;
4125 	unsigned int		i;
4126 	int			error;
4127 
4128 	agfl_bno = xfs_buf_to_agfl_bno(agflbp);
4129 	i = be32_to_cpu(agf->agf_flfirst);
4130 
4131 	/* Nothing to walk in an empty AGFL. */
4132 	if (agf->agf_flcount == cpu_to_be32(0))
4133 		return 0;
4134 
4135 	/* Otherwise, walk from first to last, wrapping as needed. */
4136 	for (;;) {
4137 		error = walk_fn(mp, be32_to_cpu(agfl_bno[i]), priv);
4138 		if (error)
4139 			return error;
4140 		if (i == be32_to_cpu(agf->agf_fllast))
4141 			break;
4142 		if (++i == xfs_agfl_size(mp))
4143 			i = 0;
4144 	}
4145 
4146 	return 0;
4147 }
4148 
4149 int __init
xfs_extfree_intent_init_cache(void)4150 xfs_extfree_intent_init_cache(void)
4151 {
4152 	xfs_extfree_item_cache = kmem_cache_create("xfs_extfree_intent",
4153 			sizeof(struct xfs_extent_free_item),
4154 			0, 0, NULL);
4155 
4156 	return xfs_extfree_item_cache != NULL ? 0 : -ENOMEM;
4157 }
4158 
4159 void
xfs_extfree_intent_destroy_cache(void)4160 xfs_extfree_intent_destroy_cache(void)
4161 {
4162 	kmem_cache_destroy(xfs_extfree_item_cache);
4163 	xfs_extfree_item_cache = NULL;
4164 }
4165