1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_shared.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_mount.h"
14 #include "xfs_defer.h"
15 #include "xfs_btree.h"
16 #include "xfs_rmap.h"
17 #include "xfs_alloc_btree.h"
18 #include "xfs_alloc.h"
19 #include "xfs_extent_busy.h"
20 #include "xfs_errortag.h"
21 #include "xfs_error.h"
22 #include "xfs_trace.h"
23 #include "xfs_trans.h"
24 #include "xfs_buf_item.h"
25 #include "xfs_log.h"
26 #include "xfs_ag.h"
27 #include "xfs_ag_resv.h"
28 #include "xfs_bmap.h"
29 #include "xfs_health.h"
30 #include "xfs_extfree_item.h"
31
32 struct kmem_cache *xfs_extfree_item_cache;
33
34 struct workqueue_struct *xfs_alloc_wq;
35
36 #define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
37
38 #define XFSA_FIXUP_BNO_OK 1
39 #define XFSA_FIXUP_CNT_OK 2
40
41 /*
42 * Size of the AGFL. For CRC-enabled filesystes we steal a couple of slots in
43 * the beginning of the block for a proper header with the location information
44 * and CRC.
45 */
46 unsigned int
xfs_agfl_size(struct xfs_mount * mp)47 xfs_agfl_size(
48 struct xfs_mount *mp)
49 {
50 unsigned int size = mp->m_sb.sb_sectsize;
51
52 if (xfs_has_crc(mp))
53 size -= sizeof(struct xfs_agfl);
54
55 return size / sizeof(xfs_agblock_t);
56 }
57
58 unsigned int
xfs_refc_block(struct xfs_mount * mp)59 xfs_refc_block(
60 struct xfs_mount *mp)
61 {
62 if (xfs_has_rmapbt(mp))
63 return XFS_RMAP_BLOCK(mp) + 1;
64 if (xfs_has_finobt(mp))
65 return XFS_FIBT_BLOCK(mp) + 1;
66 return XFS_IBT_BLOCK(mp) + 1;
67 }
68
69 xfs_extlen_t
xfs_prealloc_blocks(struct xfs_mount * mp)70 xfs_prealloc_blocks(
71 struct xfs_mount *mp)
72 {
73 if (xfs_has_reflink(mp))
74 return xfs_refc_block(mp) + 1;
75 if (xfs_has_rmapbt(mp))
76 return XFS_RMAP_BLOCK(mp) + 1;
77 if (xfs_has_finobt(mp))
78 return XFS_FIBT_BLOCK(mp) + 1;
79 return XFS_IBT_BLOCK(mp) + 1;
80 }
81
82 /*
83 * The number of blocks per AG that we withhold from xfs_dec_fdblocks to
84 * guarantee that we can refill the AGFL prior to allocating space in a nearly
85 * full AG. Although the space described by the free space btrees, the
86 * blocks used by the freesp btrees themselves, and the blocks owned by the
87 * AGFL are counted in the ondisk fdblocks, it's a mistake to let the ondisk
88 * free space in the AG drop so low that the free space btrees cannot refill an
89 * empty AGFL up to the minimum level. Rather than grind through empty AGs
90 * until the fs goes down, we subtract this many AG blocks from the incore
91 * fdblocks to ensure user allocation does not overcommit the space the
92 * filesystem needs for the AGFLs. The rmap btree uses a per-AG reservation to
93 * withhold space from xfs_dec_fdblocks, so we do not account for that here.
94 */
95 #define XFS_ALLOCBT_AGFL_RESERVE 4
96
97 /*
98 * Compute the number of blocks that we set aside to guarantee the ability to
99 * refill the AGFL and handle a full bmap btree split.
100 *
101 * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
102 * AGF buffer (PV 947395), we place constraints on the relationship among
103 * actual allocations for data blocks, freelist blocks, and potential file data
104 * bmap btree blocks. However, these restrictions may result in no actual space
105 * allocated for a delayed extent, for example, a data block in a certain AG is
106 * allocated but there is no additional block for the additional bmap btree
107 * block due to a split of the bmap btree of the file. The result of this may
108 * lead to an infinite loop when the file gets flushed to disk and all delayed
109 * extents need to be actually allocated. To get around this, we explicitly set
110 * aside a few blocks which will not be reserved in delayed allocation.
111 *
112 * For each AG, we need to reserve enough blocks to replenish a totally empty
113 * AGFL and 4 more to handle a potential split of the file's bmap btree.
114 */
115 unsigned int
xfs_alloc_set_aside(struct xfs_mount * mp)116 xfs_alloc_set_aside(
117 struct xfs_mount *mp)
118 {
119 return mp->m_sb.sb_agcount * (XFS_ALLOCBT_AGFL_RESERVE + 4);
120 }
121
122 /*
123 * When deciding how much space to allocate out of an AG, we limit the
124 * allocation maximum size to the size the AG. However, we cannot use all the
125 * blocks in the AG - some are permanently used by metadata. These
126 * blocks are generally:
127 * - the AG superblock, AGF, AGI and AGFL
128 * - the AGF (bno and cnt) and AGI btree root blocks, and optionally
129 * the AGI free inode and rmap btree root blocks.
130 * - blocks on the AGFL according to xfs_alloc_set_aside() limits
131 * - the rmapbt root block
132 *
133 * The AG headers are sector sized, so the amount of space they take up is
134 * dependent on filesystem geometry. The others are all single blocks.
135 */
136 unsigned int
xfs_alloc_ag_max_usable(struct xfs_mount * mp)137 xfs_alloc_ag_max_usable(
138 struct xfs_mount *mp)
139 {
140 unsigned int blocks;
141
142 blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
143 blocks += XFS_ALLOCBT_AGFL_RESERVE;
144 blocks += 3; /* AGF, AGI btree root blocks */
145 if (xfs_has_finobt(mp))
146 blocks++; /* finobt root block */
147 if (xfs_has_rmapbt(mp))
148 blocks++; /* rmap root block */
149 if (xfs_has_reflink(mp))
150 blocks++; /* refcount root block */
151
152 return mp->m_sb.sb_agblocks - blocks;
153 }
154
155
156 static int
xfs_alloc_lookup(struct xfs_btree_cur * cur,xfs_lookup_t dir,xfs_agblock_t bno,xfs_extlen_t len,int * stat)157 xfs_alloc_lookup(
158 struct xfs_btree_cur *cur,
159 xfs_lookup_t dir,
160 xfs_agblock_t bno,
161 xfs_extlen_t len,
162 int *stat)
163 {
164 int error;
165
166 cur->bc_rec.a.ar_startblock = bno;
167 cur->bc_rec.a.ar_blockcount = len;
168 error = xfs_btree_lookup(cur, dir, stat);
169 if (*stat == 1)
170 cur->bc_flags |= XFS_BTREE_ALLOCBT_ACTIVE;
171 else
172 cur->bc_flags &= ~XFS_BTREE_ALLOCBT_ACTIVE;
173 return error;
174 }
175
176 /*
177 * Lookup the record equal to [bno, len] in the btree given by cur.
178 */
179 static inline int /* error */
xfs_alloc_lookup_eq(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,int * stat)180 xfs_alloc_lookup_eq(
181 struct xfs_btree_cur *cur, /* btree cursor */
182 xfs_agblock_t bno, /* starting block of extent */
183 xfs_extlen_t len, /* length of extent */
184 int *stat) /* success/failure */
185 {
186 return xfs_alloc_lookup(cur, XFS_LOOKUP_EQ, bno, len, stat);
187 }
188
189 /*
190 * Lookup the first record greater than or equal to [bno, len]
191 * in the btree given by cur.
192 */
193 int /* error */
xfs_alloc_lookup_ge(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,int * stat)194 xfs_alloc_lookup_ge(
195 struct xfs_btree_cur *cur, /* btree cursor */
196 xfs_agblock_t bno, /* starting block of extent */
197 xfs_extlen_t len, /* length of extent */
198 int *stat) /* success/failure */
199 {
200 return xfs_alloc_lookup(cur, XFS_LOOKUP_GE, bno, len, stat);
201 }
202
203 /*
204 * Lookup the first record less than or equal to [bno, len]
205 * in the btree given by cur.
206 */
207 int /* error */
xfs_alloc_lookup_le(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,int * stat)208 xfs_alloc_lookup_le(
209 struct xfs_btree_cur *cur, /* btree cursor */
210 xfs_agblock_t bno, /* starting block of extent */
211 xfs_extlen_t len, /* length of extent */
212 int *stat) /* success/failure */
213 {
214 return xfs_alloc_lookup(cur, XFS_LOOKUP_LE, bno, len, stat);
215 }
216
217 static inline bool
xfs_alloc_cur_active(struct xfs_btree_cur * cur)218 xfs_alloc_cur_active(
219 struct xfs_btree_cur *cur)
220 {
221 return cur && (cur->bc_flags & XFS_BTREE_ALLOCBT_ACTIVE);
222 }
223
224 /*
225 * Update the record referred to by cur to the value given
226 * by [bno, len].
227 * This either works (return 0) or gets an EFSCORRUPTED error.
228 */
229 STATIC int /* error */
xfs_alloc_update(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len)230 xfs_alloc_update(
231 struct xfs_btree_cur *cur, /* btree cursor */
232 xfs_agblock_t bno, /* starting block of extent */
233 xfs_extlen_t len) /* length of extent */
234 {
235 union xfs_btree_rec rec;
236
237 rec.alloc.ar_startblock = cpu_to_be32(bno);
238 rec.alloc.ar_blockcount = cpu_to_be32(len);
239 return xfs_btree_update(cur, &rec);
240 }
241
242 /* Convert the ondisk btree record to its incore representation. */
243 void
xfs_alloc_btrec_to_irec(const union xfs_btree_rec * rec,struct xfs_alloc_rec_incore * irec)244 xfs_alloc_btrec_to_irec(
245 const union xfs_btree_rec *rec,
246 struct xfs_alloc_rec_incore *irec)
247 {
248 irec->ar_startblock = be32_to_cpu(rec->alloc.ar_startblock);
249 irec->ar_blockcount = be32_to_cpu(rec->alloc.ar_blockcount);
250 }
251
252 /* Simple checks for free space records. */
253 xfs_failaddr_t
xfs_alloc_check_irec(struct xfs_perag * pag,const struct xfs_alloc_rec_incore * irec)254 xfs_alloc_check_irec(
255 struct xfs_perag *pag,
256 const struct xfs_alloc_rec_incore *irec)
257 {
258 if (irec->ar_blockcount == 0)
259 return __this_address;
260
261 /* check for valid extent range, including overflow */
262 if (!xfs_verify_agbext(pag, irec->ar_startblock, irec->ar_blockcount))
263 return __this_address;
264
265 return NULL;
266 }
267
268 static inline int
xfs_alloc_complain_bad_rec(struct xfs_btree_cur * cur,xfs_failaddr_t fa,const struct xfs_alloc_rec_incore * irec)269 xfs_alloc_complain_bad_rec(
270 struct xfs_btree_cur *cur,
271 xfs_failaddr_t fa,
272 const struct xfs_alloc_rec_incore *irec)
273 {
274 struct xfs_mount *mp = cur->bc_mp;
275
276 xfs_warn(mp,
277 "%sbt record corruption in AG %d detected at %pS!",
278 cur->bc_ops->name, cur->bc_ag.pag->pag_agno, fa);
279 xfs_warn(mp,
280 "start block 0x%x block count 0x%x", irec->ar_startblock,
281 irec->ar_blockcount);
282 xfs_btree_mark_sick(cur);
283 return -EFSCORRUPTED;
284 }
285
286 /*
287 * Get the data from the pointed-to record.
288 */
289 int /* error */
xfs_alloc_get_rec(struct xfs_btree_cur * cur,xfs_agblock_t * bno,xfs_extlen_t * len,int * stat)290 xfs_alloc_get_rec(
291 struct xfs_btree_cur *cur, /* btree cursor */
292 xfs_agblock_t *bno, /* output: starting block of extent */
293 xfs_extlen_t *len, /* output: length of extent */
294 int *stat) /* output: success/failure */
295 {
296 struct xfs_alloc_rec_incore irec;
297 union xfs_btree_rec *rec;
298 xfs_failaddr_t fa;
299 int error;
300
301 error = xfs_btree_get_rec(cur, &rec, stat);
302 if (error || !(*stat))
303 return error;
304
305 xfs_alloc_btrec_to_irec(rec, &irec);
306 fa = xfs_alloc_check_irec(cur->bc_ag.pag, &irec);
307 if (fa)
308 return xfs_alloc_complain_bad_rec(cur, fa, &irec);
309
310 *bno = irec.ar_startblock;
311 *len = irec.ar_blockcount;
312 return 0;
313 }
314
315 /*
316 * Compute aligned version of the found extent.
317 * Takes alignment and min length into account.
318 */
319 STATIC bool
xfs_alloc_compute_aligned(xfs_alloc_arg_t * args,xfs_agblock_t foundbno,xfs_extlen_t foundlen,xfs_agblock_t * resbno,xfs_extlen_t * reslen,unsigned * busy_gen)320 xfs_alloc_compute_aligned(
321 xfs_alloc_arg_t *args, /* allocation argument structure */
322 xfs_agblock_t foundbno, /* starting block in found extent */
323 xfs_extlen_t foundlen, /* length in found extent */
324 xfs_agblock_t *resbno, /* result block number */
325 xfs_extlen_t *reslen, /* result length */
326 unsigned *busy_gen)
327 {
328 xfs_agblock_t bno = foundbno;
329 xfs_extlen_t len = foundlen;
330 xfs_extlen_t diff;
331 bool busy;
332
333 /* Trim busy sections out of found extent */
334 busy = xfs_extent_busy_trim(args, &bno, &len, busy_gen);
335
336 /*
337 * If we have a largish extent that happens to start before min_agbno,
338 * see if we can shift it into range...
339 */
340 if (bno < args->min_agbno && bno + len > args->min_agbno) {
341 diff = args->min_agbno - bno;
342 if (len > diff) {
343 bno += diff;
344 len -= diff;
345 }
346 }
347
348 if (args->alignment > 1 && len >= args->minlen) {
349 xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
350
351 diff = aligned_bno - bno;
352
353 *resbno = aligned_bno;
354 *reslen = diff >= len ? 0 : len - diff;
355 } else {
356 *resbno = bno;
357 *reslen = len;
358 }
359
360 return busy;
361 }
362
363 /*
364 * Compute best start block and diff for "near" allocations.
365 * freelen >= wantlen already checked by caller.
366 */
367 STATIC xfs_extlen_t /* difference value (absolute) */
xfs_alloc_compute_diff(xfs_agblock_t wantbno,xfs_extlen_t wantlen,xfs_extlen_t alignment,int datatype,xfs_agblock_t freebno,xfs_extlen_t freelen,xfs_agblock_t * newbnop)368 xfs_alloc_compute_diff(
369 xfs_agblock_t wantbno, /* target starting block */
370 xfs_extlen_t wantlen, /* target length */
371 xfs_extlen_t alignment, /* target alignment */
372 int datatype, /* are we allocating data? */
373 xfs_agblock_t freebno, /* freespace's starting block */
374 xfs_extlen_t freelen, /* freespace's length */
375 xfs_agblock_t *newbnop) /* result: best start block from free */
376 {
377 xfs_agblock_t freeend; /* end of freespace extent */
378 xfs_agblock_t newbno1; /* return block number */
379 xfs_agblock_t newbno2; /* other new block number */
380 xfs_extlen_t newlen1=0; /* length with newbno1 */
381 xfs_extlen_t newlen2=0; /* length with newbno2 */
382 xfs_agblock_t wantend; /* end of target extent */
383 bool userdata = datatype & XFS_ALLOC_USERDATA;
384
385 ASSERT(freelen >= wantlen);
386 freeend = freebno + freelen;
387 wantend = wantbno + wantlen;
388 /*
389 * We want to allocate from the start of a free extent if it is past
390 * the desired block or if we are allocating user data and the free
391 * extent is before desired block. The second case is there to allow
392 * for contiguous allocation from the remaining free space if the file
393 * grows in the short term.
394 */
395 if (freebno >= wantbno || (userdata && freeend < wantend)) {
396 if ((newbno1 = roundup(freebno, alignment)) >= freeend)
397 newbno1 = NULLAGBLOCK;
398 } else if (freeend >= wantend && alignment > 1) {
399 newbno1 = roundup(wantbno, alignment);
400 newbno2 = newbno1 - alignment;
401 if (newbno1 >= freeend)
402 newbno1 = NULLAGBLOCK;
403 else
404 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
405 if (newbno2 < freebno)
406 newbno2 = NULLAGBLOCK;
407 else
408 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
409 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
410 if (newlen1 < newlen2 ||
411 (newlen1 == newlen2 &&
412 XFS_ABSDIFF(newbno1, wantbno) >
413 XFS_ABSDIFF(newbno2, wantbno)))
414 newbno1 = newbno2;
415 } else if (newbno2 != NULLAGBLOCK)
416 newbno1 = newbno2;
417 } else if (freeend >= wantend) {
418 newbno1 = wantbno;
419 } else if (alignment > 1) {
420 newbno1 = roundup(freeend - wantlen, alignment);
421 if (newbno1 > freeend - wantlen &&
422 newbno1 - alignment >= freebno)
423 newbno1 -= alignment;
424 else if (newbno1 >= freeend)
425 newbno1 = NULLAGBLOCK;
426 } else
427 newbno1 = freeend - wantlen;
428 *newbnop = newbno1;
429 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
430 }
431
432 /*
433 * Fix up the length, based on mod and prod.
434 * len should be k * prod + mod for some k.
435 * If len is too small it is returned unchanged.
436 * If len hits maxlen it is left alone.
437 */
438 STATIC void
xfs_alloc_fix_len(xfs_alloc_arg_t * args)439 xfs_alloc_fix_len(
440 xfs_alloc_arg_t *args) /* allocation argument structure */
441 {
442 xfs_extlen_t k;
443 xfs_extlen_t rlen;
444
445 ASSERT(args->mod < args->prod);
446 rlen = args->len;
447 ASSERT(rlen >= args->minlen);
448 ASSERT(rlen <= args->maxlen);
449 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
450 (args->mod == 0 && rlen < args->prod))
451 return;
452 k = rlen % args->prod;
453 if (k == args->mod)
454 return;
455 if (k > args->mod)
456 rlen = rlen - (k - args->mod);
457 else
458 rlen = rlen - args->prod + (args->mod - k);
459 /* casts to (int) catch length underflows */
460 if ((int)rlen < (int)args->minlen)
461 return;
462 ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
463 ASSERT(rlen % args->prod == args->mod);
464 ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
465 rlen + args->minleft);
466 args->len = rlen;
467 }
468
469 /*
470 * Determine if the cursor points to the block that contains the right-most
471 * block of records in the by-count btree. This block contains the largest
472 * contiguous free extent in the AG, so if we modify a record in this block we
473 * need to call xfs_alloc_fixup_longest() once the modifications are done to
474 * ensure the agf->agf_longest field is kept up to date with the longest free
475 * extent tracked by the by-count btree.
476 */
477 static bool
xfs_alloc_cursor_at_lastrec(struct xfs_btree_cur * cnt_cur)478 xfs_alloc_cursor_at_lastrec(
479 struct xfs_btree_cur *cnt_cur)
480 {
481 struct xfs_btree_block *block;
482 union xfs_btree_ptr ptr;
483 struct xfs_buf *bp;
484
485 block = xfs_btree_get_block(cnt_cur, 0, &bp);
486
487 xfs_btree_get_sibling(cnt_cur, block, &ptr, XFS_BB_RIGHTSIB);
488 return xfs_btree_ptr_is_null(cnt_cur, &ptr);
489 }
490
491 /*
492 * Find the rightmost record of the cntbt, and return the longest free space
493 * recorded in it. Simply set both the block number and the length to their
494 * maximum values before searching.
495 */
496 static int
xfs_cntbt_longest(struct xfs_btree_cur * cnt_cur,xfs_extlen_t * longest)497 xfs_cntbt_longest(
498 struct xfs_btree_cur *cnt_cur,
499 xfs_extlen_t *longest)
500 {
501 struct xfs_alloc_rec_incore irec;
502 union xfs_btree_rec *rec;
503 int stat = 0;
504 int error;
505
506 memset(&cnt_cur->bc_rec, 0xFF, sizeof(cnt_cur->bc_rec));
507 error = xfs_btree_lookup(cnt_cur, XFS_LOOKUP_LE, &stat);
508 if (error)
509 return error;
510 if (!stat) {
511 /* totally empty tree */
512 *longest = 0;
513 return 0;
514 }
515
516 error = xfs_btree_get_rec(cnt_cur, &rec, &stat);
517 if (error)
518 return error;
519 if (XFS_IS_CORRUPT(cnt_cur->bc_mp, !stat)) {
520 xfs_btree_mark_sick(cnt_cur);
521 return -EFSCORRUPTED;
522 }
523
524 xfs_alloc_btrec_to_irec(rec, &irec);
525 *longest = irec.ar_blockcount;
526 return 0;
527 }
528
529 /*
530 * Update the longest contiguous free extent in the AG from the by-count cursor
531 * that is passed to us. This should be done at the end of any allocation or
532 * freeing operation that touches the longest extent in the btree.
533 *
534 * Needing to update the longest extent can be determined by calling
535 * xfs_alloc_cursor_at_lastrec() after the cursor is positioned for record
536 * modification but before the modification begins.
537 */
538 static int
xfs_alloc_fixup_longest(struct xfs_btree_cur * cnt_cur)539 xfs_alloc_fixup_longest(
540 struct xfs_btree_cur *cnt_cur)
541 {
542 struct xfs_perag *pag = cnt_cur->bc_ag.pag;
543 struct xfs_buf *bp = cnt_cur->bc_ag.agbp;
544 struct xfs_agf *agf = bp->b_addr;
545 xfs_extlen_t longest = 0;
546 int error;
547
548 /* Lookup last rec in order to update AGF. */
549 error = xfs_cntbt_longest(cnt_cur, &longest);
550 if (error)
551 return error;
552
553 pag->pagf_longest = longest;
554 agf->agf_longest = cpu_to_be32(pag->pagf_longest);
555 xfs_alloc_log_agf(cnt_cur->bc_tp, bp, XFS_AGF_LONGEST);
556
557 return 0;
558 }
559
560 /*
561 * Update the two btrees, logically removing from freespace the extent
562 * starting at rbno, rlen blocks. The extent is contained within the
563 * actual (current) free extent fbno for flen blocks.
564 * Flags are passed in indicating whether the cursors are set to the
565 * relevant records.
566 */
567 STATIC int /* error code */
xfs_alloc_fixup_trees(struct xfs_btree_cur * cnt_cur,struct xfs_btree_cur * bno_cur,xfs_agblock_t fbno,xfs_extlen_t flen,xfs_agblock_t rbno,xfs_extlen_t rlen,int flags)568 xfs_alloc_fixup_trees(
569 struct xfs_btree_cur *cnt_cur, /* cursor for by-size btree */
570 struct xfs_btree_cur *bno_cur, /* cursor for by-block btree */
571 xfs_agblock_t fbno, /* starting block of free extent */
572 xfs_extlen_t flen, /* length of free extent */
573 xfs_agblock_t rbno, /* starting block of returned extent */
574 xfs_extlen_t rlen, /* length of returned extent */
575 int flags) /* flags, XFSA_FIXUP_... */
576 {
577 int error; /* error code */
578 int i; /* operation results */
579 xfs_agblock_t nfbno1; /* first new free startblock */
580 xfs_agblock_t nfbno2; /* second new free startblock */
581 xfs_extlen_t nflen1=0; /* first new free length */
582 xfs_extlen_t nflen2=0; /* second new free length */
583 struct xfs_mount *mp;
584 bool fixup_longest = false;
585
586 mp = cnt_cur->bc_mp;
587
588 /*
589 * Look up the record in the by-size tree if necessary.
590 */
591 if (flags & XFSA_FIXUP_CNT_OK) {
592 #ifdef DEBUG
593 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
594 return error;
595 if (XFS_IS_CORRUPT(mp,
596 i != 1 ||
597 nfbno1 != fbno ||
598 nflen1 != flen)) {
599 xfs_btree_mark_sick(cnt_cur);
600 return -EFSCORRUPTED;
601 }
602 #endif
603 } else {
604 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
605 return error;
606 if (XFS_IS_CORRUPT(mp, i != 1)) {
607 xfs_btree_mark_sick(cnt_cur);
608 return -EFSCORRUPTED;
609 }
610 }
611 /*
612 * Look up the record in the by-block tree if necessary.
613 */
614 if (flags & XFSA_FIXUP_BNO_OK) {
615 #ifdef DEBUG
616 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
617 return error;
618 if (XFS_IS_CORRUPT(mp,
619 i != 1 ||
620 nfbno1 != fbno ||
621 nflen1 != flen)) {
622 xfs_btree_mark_sick(bno_cur);
623 return -EFSCORRUPTED;
624 }
625 #endif
626 } else {
627 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
628 return error;
629 if (XFS_IS_CORRUPT(mp, i != 1)) {
630 xfs_btree_mark_sick(bno_cur);
631 return -EFSCORRUPTED;
632 }
633 }
634
635 #ifdef DEBUG
636 if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
637 struct xfs_btree_block *bnoblock;
638 struct xfs_btree_block *cntblock;
639
640 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_levels[0].bp);
641 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_levels[0].bp);
642
643 if (XFS_IS_CORRUPT(mp,
644 bnoblock->bb_numrecs !=
645 cntblock->bb_numrecs)) {
646 xfs_btree_mark_sick(bno_cur);
647 return -EFSCORRUPTED;
648 }
649 }
650 #endif
651
652 /*
653 * Deal with all four cases: the allocated record is contained
654 * within the freespace record, so we can have new freespace
655 * at either (or both) end, or no freespace remaining.
656 */
657 if (rbno == fbno && rlen == flen)
658 nfbno1 = nfbno2 = NULLAGBLOCK;
659 else if (rbno == fbno) {
660 nfbno1 = rbno + rlen;
661 nflen1 = flen - rlen;
662 nfbno2 = NULLAGBLOCK;
663 } else if (rbno + rlen == fbno + flen) {
664 nfbno1 = fbno;
665 nflen1 = flen - rlen;
666 nfbno2 = NULLAGBLOCK;
667 } else {
668 nfbno1 = fbno;
669 nflen1 = rbno - fbno;
670 nfbno2 = rbno + rlen;
671 nflen2 = (fbno + flen) - nfbno2;
672 }
673
674 if (xfs_alloc_cursor_at_lastrec(cnt_cur))
675 fixup_longest = true;
676
677 /*
678 * Delete the entry from the by-size btree.
679 */
680 if ((error = xfs_btree_delete(cnt_cur, &i)))
681 return error;
682 if (XFS_IS_CORRUPT(mp, i != 1)) {
683 xfs_btree_mark_sick(cnt_cur);
684 return -EFSCORRUPTED;
685 }
686 /*
687 * Add new by-size btree entry(s).
688 */
689 if (nfbno1 != NULLAGBLOCK) {
690 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
691 return error;
692 if (XFS_IS_CORRUPT(mp, i != 0)) {
693 xfs_btree_mark_sick(cnt_cur);
694 return -EFSCORRUPTED;
695 }
696 if ((error = xfs_btree_insert(cnt_cur, &i)))
697 return error;
698 if (XFS_IS_CORRUPT(mp, i != 1)) {
699 xfs_btree_mark_sick(cnt_cur);
700 return -EFSCORRUPTED;
701 }
702 }
703 if (nfbno2 != NULLAGBLOCK) {
704 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
705 return error;
706 if (XFS_IS_CORRUPT(mp, i != 0)) {
707 xfs_btree_mark_sick(cnt_cur);
708 return -EFSCORRUPTED;
709 }
710 if ((error = xfs_btree_insert(cnt_cur, &i)))
711 return error;
712 if (XFS_IS_CORRUPT(mp, i != 1)) {
713 xfs_btree_mark_sick(cnt_cur);
714 return -EFSCORRUPTED;
715 }
716 }
717 /*
718 * Fix up the by-block btree entry(s).
719 */
720 if (nfbno1 == NULLAGBLOCK) {
721 /*
722 * No remaining freespace, just delete the by-block tree entry.
723 */
724 if ((error = xfs_btree_delete(bno_cur, &i)))
725 return error;
726 if (XFS_IS_CORRUPT(mp, i != 1)) {
727 xfs_btree_mark_sick(bno_cur);
728 return -EFSCORRUPTED;
729 }
730 } else {
731 /*
732 * Update the by-block entry to start later|be shorter.
733 */
734 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
735 return error;
736 }
737 if (nfbno2 != NULLAGBLOCK) {
738 /*
739 * 2 resulting free entries, need to add one.
740 */
741 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
742 return error;
743 if (XFS_IS_CORRUPT(mp, i != 0)) {
744 xfs_btree_mark_sick(bno_cur);
745 return -EFSCORRUPTED;
746 }
747 if ((error = xfs_btree_insert(bno_cur, &i)))
748 return error;
749 if (XFS_IS_CORRUPT(mp, i != 1)) {
750 xfs_btree_mark_sick(bno_cur);
751 return -EFSCORRUPTED;
752 }
753 }
754
755 if (fixup_longest)
756 return xfs_alloc_fixup_longest(cnt_cur);
757
758 return 0;
759 }
760
761 /*
762 * We do not verify the AGFL contents against AGF-based index counters here,
763 * even though we may have access to the perag that contains shadow copies. We
764 * don't know if the AGF based counters have been checked, and if they have they
765 * still may be inconsistent because they haven't yet been reset on the first
766 * allocation after the AGF has been read in.
767 *
768 * This means we can only check that all agfl entries contain valid or null
769 * values because we can't reliably determine the active range to exclude
770 * NULLAGBNO as a valid value.
771 *
772 * However, we can't even do that for v4 format filesystems because there are
773 * old versions of mkfs out there that does not initialise the AGFL to known,
774 * verifiable values. HEnce we can't tell the difference between a AGFL block
775 * allocated by mkfs and a corrupted AGFL block here on v4 filesystems.
776 *
777 * As a result, we can only fully validate AGFL block numbers when we pull them
778 * from the freelist in xfs_alloc_get_freelist().
779 */
780 static xfs_failaddr_t
xfs_agfl_verify(struct xfs_buf * bp)781 xfs_agfl_verify(
782 struct xfs_buf *bp)
783 {
784 struct xfs_mount *mp = bp->b_mount;
785 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
786 __be32 *agfl_bno = xfs_buf_to_agfl_bno(bp);
787 int i;
788
789 if (!xfs_has_crc(mp))
790 return NULL;
791
792 if (!xfs_verify_magic(bp, agfl->agfl_magicnum))
793 return __this_address;
794 if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
795 return __this_address;
796 /*
797 * during growfs operations, the perag is not fully initialised,
798 * so we can't use it for any useful checking. growfs ensures we can't
799 * use it by using uncached buffers that don't have the perag attached
800 * so we can detect and avoid this problem.
801 */
802 if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
803 return __this_address;
804
805 for (i = 0; i < xfs_agfl_size(mp); i++) {
806 if (be32_to_cpu(agfl_bno[i]) != NULLAGBLOCK &&
807 be32_to_cpu(agfl_bno[i]) >= mp->m_sb.sb_agblocks)
808 return __this_address;
809 }
810
811 if (!xfs_log_check_lsn(mp, be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn)))
812 return __this_address;
813 return NULL;
814 }
815
816 static void
xfs_agfl_read_verify(struct xfs_buf * bp)817 xfs_agfl_read_verify(
818 struct xfs_buf *bp)
819 {
820 struct xfs_mount *mp = bp->b_mount;
821 xfs_failaddr_t fa;
822
823 /*
824 * There is no verification of non-crc AGFLs because mkfs does not
825 * initialise the AGFL to zero or NULL. Hence the only valid part of the
826 * AGFL is what the AGF says is active. We can't get to the AGF, so we
827 * can't verify just those entries are valid.
828 */
829 if (!xfs_has_crc(mp))
830 return;
831
832 if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
833 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
834 else {
835 fa = xfs_agfl_verify(bp);
836 if (fa)
837 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
838 }
839 }
840
841 static void
xfs_agfl_write_verify(struct xfs_buf * bp)842 xfs_agfl_write_verify(
843 struct xfs_buf *bp)
844 {
845 struct xfs_mount *mp = bp->b_mount;
846 struct xfs_buf_log_item *bip = bp->b_log_item;
847 xfs_failaddr_t fa;
848
849 /* no verification of non-crc AGFLs */
850 if (!xfs_has_crc(mp))
851 return;
852
853 fa = xfs_agfl_verify(bp);
854 if (fa) {
855 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
856 return;
857 }
858
859 if (bip)
860 XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
861
862 xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
863 }
864
865 const struct xfs_buf_ops xfs_agfl_buf_ops = {
866 .name = "xfs_agfl",
867 .magic = { cpu_to_be32(XFS_AGFL_MAGIC), cpu_to_be32(XFS_AGFL_MAGIC) },
868 .verify_read = xfs_agfl_read_verify,
869 .verify_write = xfs_agfl_write_verify,
870 .verify_struct = xfs_agfl_verify,
871 };
872
873 /*
874 * Read in the allocation group free block array.
875 */
876 int
xfs_alloc_read_agfl(struct xfs_perag * pag,struct xfs_trans * tp,struct xfs_buf ** bpp)877 xfs_alloc_read_agfl(
878 struct xfs_perag *pag,
879 struct xfs_trans *tp,
880 struct xfs_buf **bpp)
881 {
882 struct xfs_mount *mp = pag->pag_mount;
883 struct xfs_buf *bp;
884 int error;
885
886 error = xfs_trans_read_buf(
887 mp, tp, mp->m_ddev_targp,
888 XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGFL_DADDR(mp)),
889 XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
890 if (xfs_metadata_is_sick(error))
891 xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL);
892 if (error)
893 return error;
894 xfs_buf_set_ref(bp, XFS_AGFL_REF);
895 *bpp = bp;
896 return 0;
897 }
898
899 STATIC int
xfs_alloc_update_counters(struct xfs_trans * tp,struct xfs_buf * agbp,long len)900 xfs_alloc_update_counters(
901 struct xfs_trans *tp,
902 struct xfs_buf *agbp,
903 long len)
904 {
905 struct xfs_agf *agf = agbp->b_addr;
906
907 agbp->b_pag->pagf_freeblks += len;
908 be32_add_cpu(&agf->agf_freeblks, len);
909
910 if (unlikely(be32_to_cpu(agf->agf_freeblks) >
911 be32_to_cpu(agf->agf_length))) {
912 xfs_buf_mark_corrupt(agbp);
913 xfs_ag_mark_sick(agbp->b_pag, XFS_SICK_AG_AGF);
914 return -EFSCORRUPTED;
915 }
916
917 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
918 return 0;
919 }
920
921 /*
922 * Block allocation algorithm and data structures.
923 */
924 struct xfs_alloc_cur {
925 struct xfs_btree_cur *cnt; /* btree cursors */
926 struct xfs_btree_cur *bnolt;
927 struct xfs_btree_cur *bnogt;
928 xfs_extlen_t cur_len;/* current search length */
929 xfs_agblock_t rec_bno;/* extent startblock */
930 xfs_extlen_t rec_len;/* extent length */
931 xfs_agblock_t bno; /* alloc bno */
932 xfs_extlen_t len; /* alloc len */
933 xfs_extlen_t diff; /* diff from search bno */
934 unsigned int busy_gen;/* busy state */
935 bool busy;
936 };
937
938 /*
939 * Set up cursors, etc. in the extent allocation cursor. This function can be
940 * called multiple times to reset an initialized structure without having to
941 * reallocate cursors.
942 */
943 static int
xfs_alloc_cur_setup(struct xfs_alloc_arg * args,struct xfs_alloc_cur * acur)944 xfs_alloc_cur_setup(
945 struct xfs_alloc_arg *args,
946 struct xfs_alloc_cur *acur)
947 {
948 int error;
949 int i;
950
951 acur->cur_len = args->maxlen;
952 acur->rec_bno = 0;
953 acur->rec_len = 0;
954 acur->bno = 0;
955 acur->len = 0;
956 acur->diff = -1;
957 acur->busy = false;
958 acur->busy_gen = 0;
959
960 /*
961 * Perform an initial cntbt lookup to check for availability of maxlen
962 * extents. If this fails, we'll return -ENOSPC to signal the caller to
963 * attempt a small allocation.
964 */
965 if (!acur->cnt)
966 acur->cnt = xfs_cntbt_init_cursor(args->mp, args->tp,
967 args->agbp, args->pag);
968 error = xfs_alloc_lookup_ge(acur->cnt, 0, args->maxlen, &i);
969 if (error)
970 return error;
971
972 /*
973 * Allocate the bnobt left and right search cursors.
974 */
975 if (!acur->bnolt)
976 acur->bnolt = xfs_bnobt_init_cursor(args->mp, args->tp,
977 args->agbp, args->pag);
978 if (!acur->bnogt)
979 acur->bnogt = xfs_bnobt_init_cursor(args->mp, args->tp,
980 args->agbp, args->pag);
981 return i == 1 ? 0 : -ENOSPC;
982 }
983
984 static void
xfs_alloc_cur_close(struct xfs_alloc_cur * acur,bool error)985 xfs_alloc_cur_close(
986 struct xfs_alloc_cur *acur,
987 bool error)
988 {
989 int cur_error = XFS_BTREE_NOERROR;
990
991 if (error)
992 cur_error = XFS_BTREE_ERROR;
993
994 if (acur->cnt)
995 xfs_btree_del_cursor(acur->cnt, cur_error);
996 if (acur->bnolt)
997 xfs_btree_del_cursor(acur->bnolt, cur_error);
998 if (acur->bnogt)
999 xfs_btree_del_cursor(acur->bnogt, cur_error);
1000 acur->cnt = acur->bnolt = acur->bnogt = NULL;
1001 }
1002
1003 /*
1004 * Check an extent for allocation and track the best available candidate in the
1005 * allocation structure. The cursor is deactivated if it has entered an out of
1006 * range state based on allocation arguments. Optionally return the extent
1007 * extent geometry and allocation status if requested by the caller.
1008 */
1009 static int
xfs_alloc_cur_check(struct xfs_alloc_arg * args,struct xfs_alloc_cur * acur,struct xfs_btree_cur * cur,int * new)1010 xfs_alloc_cur_check(
1011 struct xfs_alloc_arg *args,
1012 struct xfs_alloc_cur *acur,
1013 struct xfs_btree_cur *cur,
1014 int *new)
1015 {
1016 int error, i;
1017 xfs_agblock_t bno, bnoa, bnew;
1018 xfs_extlen_t len, lena, diff = -1;
1019 bool busy;
1020 unsigned busy_gen = 0;
1021 bool deactivate = false;
1022 bool isbnobt = xfs_btree_is_bno(cur->bc_ops);
1023
1024 *new = 0;
1025
1026 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
1027 if (error)
1028 return error;
1029 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1030 xfs_btree_mark_sick(cur);
1031 return -EFSCORRUPTED;
1032 }
1033
1034 /*
1035 * Check minlen and deactivate a cntbt cursor if out of acceptable size
1036 * range (i.e., walking backwards looking for a minlen extent).
1037 */
1038 if (len < args->minlen) {
1039 deactivate = !isbnobt;
1040 goto out;
1041 }
1042
1043 busy = xfs_alloc_compute_aligned(args, bno, len, &bnoa, &lena,
1044 &busy_gen);
1045 acur->busy |= busy;
1046 if (busy)
1047 acur->busy_gen = busy_gen;
1048 /* deactivate a bnobt cursor outside of locality range */
1049 if (bnoa < args->min_agbno || bnoa > args->max_agbno) {
1050 deactivate = isbnobt;
1051 goto out;
1052 }
1053 if (lena < args->minlen)
1054 goto out;
1055
1056 args->len = XFS_EXTLEN_MIN(lena, args->maxlen);
1057 xfs_alloc_fix_len(args);
1058 ASSERT(args->len >= args->minlen);
1059 if (args->len < acur->len)
1060 goto out;
1061
1062 /*
1063 * We have an aligned record that satisfies minlen and beats or matches
1064 * the candidate extent size. Compare locality for near allocation mode.
1065 */
1066 diff = xfs_alloc_compute_diff(args->agbno, args->len,
1067 args->alignment, args->datatype,
1068 bnoa, lena, &bnew);
1069 if (bnew == NULLAGBLOCK)
1070 goto out;
1071
1072 /*
1073 * Deactivate a bnobt cursor with worse locality than the current best.
1074 */
1075 if (diff > acur->diff) {
1076 deactivate = isbnobt;
1077 goto out;
1078 }
1079
1080 ASSERT(args->len > acur->len ||
1081 (args->len == acur->len && diff <= acur->diff));
1082 acur->rec_bno = bno;
1083 acur->rec_len = len;
1084 acur->bno = bnew;
1085 acur->len = args->len;
1086 acur->diff = diff;
1087 *new = 1;
1088
1089 /*
1090 * We're done if we found a perfect allocation. This only deactivates
1091 * the current cursor, but this is just an optimization to terminate a
1092 * cntbt search that otherwise runs to the edge of the tree.
1093 */
1094 if (acur->diff == 0 && acur->len == args->maxlen)
1095 deactivate = true;
1096 out:
1097 if (deactivate)
1098 cur->bc_flags &= ~XFS_BTREE_ALLOCBT_ACTIVE;
1099 trace_xfs_alloc_cur_check(cur, bno, len, diff, *new);
1100 return 0;
1101 }
1102
1103 /*
1104 * Complete an allocation of a candidate extent. Remove the extent from both
1105 * trees and update the args structure.
1106 */
1107 STATIC int
xfs_alloc_cur_finish(struct xfs_alloc_arg * args,struct xfs_alloc_cur * acur)1108 xfs_alloc_cur_finish(
1109 struct xfs_alloc_arg *args,
1110 struct xfs_alloc_cur *acur)
1111 {
1112 int error;
1113
1114 ASSERT(acur->cnt && acur->bnolt);
1115 ASSERT(acur->bno >= acur->rec_bno);
1116 ASSERT(acur->bno + acur->len <= acur->rec_bno + acur->rec_len);
1117 ASSERT(xfs_verify_agbext(args->pag, acur->rec_bno, acur->rec_len));
1118
1119 error = xfs_alloc_fixup_trees(acur->cnt, acur->bnolt, acur->rec_bno,
1120 acur->rec_len, acur->bno, acur->len, 0);
1121 if (error)
1122 return error;
1123
1124 args->agbno = acur->bno;
1125 args->len = acur->len;
1126 args->wasfromfl = 0;
1127
1128 trace_xfs_alloc_cur(args);
1129 return 0;
1130 }
1131
1132 /*
1133 * Locality allocation lookup algorithm. This expects a cntbt cursor and uses
1134 * bno optimized lookup to search for extents with ideal size and locality.
1135 */
1136 STATIC int
xfs_alloc_cntbt_iter(struct xfs_alloc_arg * args,struct xfs_alloc_cur * acur)1137 xfs_alloc_cntbt_iter(
1138 struct xfs_alloc_arg *args,
1139 struct xfs_alloc_cur *acur)
1140 {
1141 struct xfs_btree_cur *cur = acur->cnt;
1142 xfs_agblock_t bno;
1143 xfs_extlen_t len, cur_len;
1144 int error;
1145 int i;
1146
1147 if (!xfs_alloc_cur_active(cur))
1148 return 0;
1149
1150 /* locality optimized lookup */
1151 cur_len = acur->cur_len;
1152 error = xfs_alloc_lookup_ge(cur, args->agbno, cur_len, &i);
1153 if (error)
1154 return error;
1155 if (i == 0)
1156 return 0;
1157 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
1158 if (error)
1159 return error;
1160
1161 /* check the current record and update search length from it */
1162 error = xfs_alloc_cur_check(args, acur, cur, &i);
1163 if (error)
1164 return error;
1165 ASSERT(len >= acur->cur_len);
1166 acur->cur_len = len;
1167
1168 /*
1169 * We looked up the first record >= [agbno, len] above. The agbno is a
1170 * secondary key and so the current record may lie just before or after
1171 * agbno. If it is past agbno, check the previous record too so long as
1172 * the length matches as it may be closer. Don't check a smaller record
1173 * because that could deactivate our cursor.
1174 */
1175 if (bno > args->agbno) {
1176 error = xfs_btree_decrement(cur, 0, &i);
1177 if (!error && i) {
1178 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
1179 if (!error && i && len == acur->cur_len)
1180 error = xfs_alloc_cur_check(args, acur, cur,
1181 &i);
1182 }
1183 if (error)
1184 return error;
1185 }
1186
1187 /*
1188 * Increment the search key until we find at least one allocation
1189 * candidate or if the extent we found was larger. Otherwise, double the
1190 * search key to optimize the search. Efficiency is more important here
1191 * than absolute best locality.
1192 */
1193 cur_len <<= 1;
1194 if (!acur->len || acur->cur_len >= cur_len)
1195 acur->cur_len++;
1196 else
1197 acur->cur_len = cur_len;
1198
1199 return error;
1200 }
1201
1202 /*
1203 * Deal with the case where only small freespaces remain. Either return the
1204 * contents of the last freespace record, or allocate space from the freelist if
1205 * there is nothing in the tree.
1206 */
1207 STATIC int /* error */
xfs_alloc_ag_vextent_small(struct xfs_alloc_arg * args,struct xfs_btree_cur * ccur,xfs_agblock_t * fbnop,xfs_extlen_t * flenp,int * stat)1208 xfs_alloc_ag_vextent_small(
1209 struct xfs_alloc_arg *args, /* allocation argument structure */
1210 struct xfs_btree_cur *ccur, /* optional by-size cursor */
1211 xfs_agblock_t *fbnop, /* result block number */
1212 xfs_extlen_t *flenp, /* result length */
1213 int *stat) /* status: 0-freelist, 1-normal/none */
1214 {
1215 struct xfs_agf *agf = args->agbp->b_addr;
1216 int error = 0;
1217 xfs_agblock_t fbno = NULLAGBLOCK;
1218 xfs_extlen_t flen = 0;
1219 int i = 0;
1220
1221 /*
1222 * If a cntbt cursor is provided, try to allocate the largest record in
1223 * the tree. Try the AGFL if the cntbt is empty, otherwise fail the
1224 * allocation. Make sure to respect minleft even when pulling from the
1225 * freelist.
1226 */
1227 if (ccur)
1228 error = xfs_btree_decrement(ccur, 0, &i);
1229 if (error)
1230 goto error;
1231 if (i) {
1232 error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i);
1233 if (error)
1234 goto error;
1235 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1236 xfs_btree_mark_sick(ccur);
1237 error = -EFSCORRUPTED;
1238 goto error;
1239 }
1240 goto out;
1241 }
1242
1243 if (args->minlen != 1 || args->alignment != 1 ||
1244 args->resv == XFS_AG_RESV_AGFL ||
1245 be32_to_cpu(agf->agf_flcount) <= args->minleft)
1246 goto out;
1247
1248 error = xfs_alloc_get_freelist(args->pag, args->tp, args->agbp,
1249 &fbno, 0);
1250 if (error)
1251 goto error;
1252 if (fbno == NULLAGBLOCK)
1253 goto out;
1254
1255 xfs_extent_busy_reuse(args->mp, args->pag, fbno, 1,
1256 (args->datatype & XFS_ALLOC_NOBUSY));
1257
1258 if (args->datatype & XFS_ALLOC_USERDATA) {
1259 struct xfs_buf *bp;
1260
1261 error = xfs_trans_get_buf(args->tp, args->mp->m_ddev_targp,
1262 XFS_AGB_TO_DADDR(args->mp, args->agno, fbno),
1263 args->mp->m_bsize, 0, &bp);
1264 if (error)
1265 goto error;
1266 xfs_trans_binval(args->tp, bp);
1267 }
1268 *fbnop = args->agbno = fbno;
1269 *flenp = args->len = 1;
1270 if (XFS_IS_CORRUPT(args->mp, fbno >= be32_to_cpu(agf->agf_length))) {
1271 xfs_btree_mark_sick(ccur);
1272 error = -EFSCORRUPTED;
1273 goto error;
1274 }
1275 args->wasfromfl = 1;
1276 trace_xfs_alloc_small_freelist(args);
1277
1278 /*
1279 * If we're feeding an AGFL block to something that doesn't live in the
1280 * free space, we need to clear out the OWN_AG rmap.
1281 */
1282 error = xfs_rmap_free(args->tp, args->agbp, args->pag, fbno, 1,
1283 &XFS_RMAP_OINFO_AG);
1284 if (error)
1285 goto error;
1286
1287 *stat = 0;
1288 return 0;
1289
1290 out:
1291 /*
1292 * Can't do the allocation, give up.
1293 */
1294 if (flen < args->minlen) {
1295 args->agbno = NULLAGBLOCK;
1296 trace_xfs_alloc_small_notenough(args);
1297 flen = 0;
1298 }
1299 *fbnop = fbno;
1300 *flenp = flen;
1301 *stat = 1;
1302 trace_xfs_alloc_small_done(args);
1303 return 0;
1304
1305 error:
1306 trace_xfs_alloc_small_error(args);
1307 return error;
1308 }
1309
1310 /*
1311 * Allocate a variable extent at exactly agno/bno.
1312 * Extent's length (returned in *len) will be between minlen and maxlen,
1313 * and of the form k * prod + mod unless there's nothing that large.
1314 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
1315 */
1316 STATIC int /* error */
xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t * args)1317 xfs_alloc_ag_vextent_exact(
1318 xfs_alloc_arg_t *args) /* allocation argument structure */
1319 {
1320 struct xfs_btree_cur *bno_cur;/* by block-number btree cursor */
1321 struct xfs_btree_cur *cnt_cur;/* by count btree cursor */
1322 int error;
1323 xfs_agblock_t fbno; /* start block of found extent */
1324 xfs_extlen_t flen; /* length of found extent */
1325 xfs_agblock_t tbno; /* start block of busy extent */
1326 xfs_extlen_t tlen; /* length of busy extent */
1327 xfs_agblock_t tend; /* end block of busy extent */
1328 int i; /* success/failure of operation */
1329 unsigned busy_gen;
1330
1331 ASSERT(args->alignment == 1);
1332
1333 /*
1334 * Allocate/initialize a cursor for the by-number freespace btree.
1335 */
1336 bno_cur = xfs_bnobt_init_cursor(args->mp, args->tp, args->agbp,
1337 args->pag);
1338
1339 /*
1340 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
1341 * Look for the closest free block <= bno, it must contain bno
1342 * if any free block does.
1343 */
1344 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
1345 if (error)
1346 goto error0;
1347 if (!i)
1348 goto not_found;
1349
1350 /*
1351 * Grab the freespace record.
1352 */
1353 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
1354 if (error)
1355 goto error0;
1356 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1357 xfs_btree_mark_sick(bno_cur);
1358 error = -EFSCORRUPTED;
1359 goto error0;
1360 }
1361 ASSERT(fbno <= args->agbno);
1362
1363 /*
1364 * Check for overlapping busy extents.
1365 */
1366 tbno = fbno;
1367 tlen = flen;
1368 xfs_extent_busy_trim(args, &tbno, &tlen, &busy_gen);
1369
1370 /*
1371 * Give up if the start of the extent is busy, or the freespace isn't
1372 * long enough for the minimum request.
1373 */
1374 if (tbno > args->agbno)
1375 goto not_found;
1376 if (tlen < args->minlen)
1377 goto not_found;
1378 tend = tbno + tlen;
1379 if (tend < args->agbno + args->minlen)
1380 goto not_found;
1381
1382 /*
1383 * End of extent will be smaller of the freespace end and the
1384 * maximal requested end.
1385 *
1386 * Fix the length according to mod and prod if given.
1387 */
1388 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
1389 - args->agbno;
1390 xfs_alloc_fix_len(args);
1391 ASSERT(args->agbno + args->len <= tend);
1392
1393 /*
1394 * We are allocating agbno for args->len
1395 * Allocate/initialize a cursor for the by-size btree.
1396 */
1397 cnt_cur = xfs_cntbt_init_cursor(args->mp, args->tp, args->agbp,
1398 args->pag);
1399 ASSERT(xfs_verify_agbext(args->pag, args->agbno, args->len));
1400 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
1401 args->len, XFSA_FIXUP_BNO_OK);
1402 if (error) {
1403 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1404 goto error0;
1405 }
1406
1407 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1408 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1409
1410 args->wasfromfl = 0;
1411 trace_xfs_alloc_exact_done(args);
1412 return 0;
1413
1414 not_found:
1415 /* Didn't find it, return null. */
1416 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1417 args->agbno = NULLAGBLOCK;
1418 trace_xfs_alloc_exact_notfound(args);
1419 return 0;
1420
1421 error0:
1422 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1423 trace_xfs_alloc_exact_error(args);
1424 return error;
1425 }
1426
1427 /*
1428 * Search a given number of btree records in a given direction. Check each
1429 * record against the good extent we've already found.
1430 */
1431 STATIC int
xfs_alloc_walk_iter(struct xfs_alloc_arg * args,struct xfs_alloc_cur * acur,struct xfs_btree_cur * cur,bool increment,bool find_one,int count,int * stat)1432 xfs_alloc_walk_iter(
1433 struct xfs_alloc_arg *args,
1434 struct xfs_alloc_cur *acur,
1435 struct xfs_btree_cur *cur,
1436 bool increment,
1437 bool find_one, /* quit on first candidate */
1438 int count, /* rec count (-1 for infinite) */
1439 int *stat)
1440 {
1441 int error;
1442 int i;
1443
1444 *stat = 0;
1445
1446 /*
1447 * Search so long as the cursor is active or we find a better extent.
1448 * The cursor is deactivated if it extends beyond the range of the
1449 * current allocation candidate.
1450 */
1451 while (xfs_alloc_cur_active(cur) && count) {
1452 error = xfs_alloc_cur_check(args, acur, cur, &i);
1453 if (error)
1454 return error;
1455 if (i == 1) {
1456 *stat = 1;
1457 if (find_one)
1458 break;
1459 }
1460 if (!xfs_alloc_cur_active(cur))
1461 break;
1462
1463 if (increment)
1464 error = xfs_btree_increment(cur, 0, &i);
1465 else
1466 error = xfs_btree_decrement(cur, 0, &i);
1467 if (error)
1468 return error;
1469 if (i == 0)
1470 cur->bc_flags &= ~XFS_BTREE_ALLOCBT_ACTIVE;
1471
1472 if (count > 0)
1473 count--;
1474 }
1475
1476 return 0;
1477 }
1478
1479 /*
1480 * Search the by-bno and by-size btrees in parallel in search of an extent with
1481 * ideal locality based on the NEAR mode ->agbno locality hint.
1482 */
1483 STATIC int
xfs_alloc_ag_vextent_locality(struct xfs_alloc_arg * args,struct xfs_alloc_cur * acur,int * stat)1484 xfs_alloc_ag_vextent_locality(
1485 struct xfs_alloc_arg *args,
1486 struct xfs_alloc_cur *acur,
1487 int *stat)
1488 {
1489 struct xfs_btree_cur *fbcur = NULL;
1490 int error;
1491 int i;
1492 bool fbinc;
1493
1494 ASSERT(acur->len == 0);
1495
1496 *stat = 0;
1497
1498 error = xfs_alloc_lookup_ge(acur->cnt, args->agbno, acur->cur_len, &i);
1499 if (error)
1500 return error;
1501 error = xfs_alloc_lookup_le(acur->bnolt, args->agbno, 0, &i);
1502 if (error)
1503 return error;
1504 error = xfs_alloc_lookup_ge(acur->bnogt, args->agbno, 0, &i);
1505 if (error)
1506 return error;
1507
1508 /*
1509 * Search the bnobt and cntbt in parallel. Search the bnobt left and
1510 * right and lookup the closest extent to the locality hint for each
1511 * extent size key in the cntbt. The entire search terminates
1512 * immediately on a bnobt hit because that means we've found best case
1513 * locality. Otherwise the search continues until the cntbt cursor runs
1514 * off the end of the tree. If no allocation candidate is found at this
1515 * point, give up on locality, walk backwards from the end of the cntbt
1516 * and take the first available extent.
1517 *
1518 * The parallel tree searches balance each other out to provide fairly
1519 * consistent performance for various situations. The bnobt search can
1520 * have pathological behavior in the worst case scenario of larger
1521 * allocation requests and fragmented free space. On the other hand, the
1522 * bnobt is able to satisfy most smaller allocation requests much more
1523 * quickly than the cntbt. The cntbt search can sift through fragmented
1524 * free space and sets of free extents for larger allocation requests
1525 * more quickly than the bnobt. Since the locality hint is just a hint
1526 * and we don't want to scan the entire bnobt for perfect locality, the
1527 * cntbt search essentially bounds the bnobt search such that we can
1528 * find good enough locality at reasonable performance in most cases.
1529 */
1530 while (xfs_alloc_cur_active(acur->bnolt) ||
1531 xfs_alloc_cur_active(acur->bnogt) ||
1532 xfs_alloc_cur_active(acur->cnt)) {
1533
1534 trace_xfs_alloc_cur_lookup(args);
1535
1536 /*
1537 * Search the bnobt left and right. In the case of a hit, finish
1538 * the search in the opposite direction and we're done.
1539 */
1540 error = xfs_alloc_walk_iter(args, acur, acur->bnolt, false,
1541 true, 1, &i);
1542 if (error)
1543 return error;
1544 if (i == 1) {
1545 trace_xfs_alloc_cur_left(args);
1546 fbcur = acur->bnogt;
1547 fbinc = true;
1548 break;
1549 }
1550 error = xfs_alloc_walk_iter(args, acur, acur->bnogt, true, true,
1551 1, &i);
1552 if (error)
1553 return error;
1554 if (i == 1) {
1555 trace_xfs_alloc_cur_right(args);
1556 fbcur = acur->bnolt;
1557 fbinc = false;
1558 break;
1559 }
1560
1561 /*
1562 * Check the extent with best locality based on the current
1563 * extent size search key and keep track of the best candidate.
1564 */
1565 error = xfs_alloc_cntbt_iter(args, acur);
1566 if (error)
1567 return error;
1568 if (!xfs_alloc_cur_active(acur->cnt)) {
1569 trace_xfs_alloc_cur_lookup_done(args);
1570 break;
1571 }
1572 }
1573
1574 /*
1575 * If we failed to find anything due to busy extents, return empty
1576 * handed so the caller can flush and retry. If no busy extents were
1577 * found, walk backwards from the end of the cntbt as a last resort.
1578 */
1579 if (!xfs_alloc_cur_active(acur->cnt) && !acur->len && !acur->busy) {
1580 error = xfs_btree_decrement(acur->cnt, 0, &i);
1581 if (error)
1582 return error;
1583 if (i) {
1584 acur->cnt->bc_flags |= XFS_BTREE_ALLOCBT_ACTIVE;
1585 fbcur = acur->cnt;
1586 fbinc = false;
1587 }
1588 }
1589
1590 /*
1591 * Search in the opposite direction for a better entry in the case of
1592 * a bnobt hit or walk backwards from the end of the cntbt.
1593 */
1594 if (fbcur) {
1595 error = xfs_alloc_walk_iter(args, acur, fbcur, fbinc, true, -1,
1596 &i);
1597 if (error)
1598 return error;
1599 }
1600
1601 if (acur->len)
1602 *stat = 1;
1603
1604 return 0;
1605 }
1606
1607 /* Check the last block of the cnt btree for allocations. */
1608 static int
xfs_alloc_ag_vextent_lastblock(struct xfs_alloc_arg * args,struct xfs_alloc_cur * acur,xfs_agblock_t * bno,xfs_extlen_t * len,bool * allocated)1609 xfs_alloc_ag_vextent_lastblock(
1610 struct xfs_alloc_arg *args,
1611 struct xfs_alloc_cur *acur,
1612 xfs_agblock_t *bno,
1613 xfs_extlen_t *len,
1614 bool *allocated)
1615 {
1616 int error;
1617 int i;
1618
1619 #ifdef DEBUG
1620 /* Randomly don't execute the first algorithm. */
1621 if (get_random_u32_below(2))
1622 return 0;
1623 #endif
1624
1625 /*
1626 * Start from the entry that lookup found, sequence through all larger
1627 * free blocks. If we're actually pointing at a record smaller than
1628 * maxlen, go to the start of this block, and skip all those smaller
1629 * than minlen.
1630 */
1631 if (*len || args->alignment > 1) {
1632 acur->cnt->bc_levels[0].ptr = 1;
1633 do {
1634 error = xfs_alloc_get_rec(acur->cnt, bno, len, &i);
1635 if (error)
1636 return error;
1637 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1638 xfs_btree_mark_sick(acur->cnt);
1639 return -EFSCORRUPTED;
1640 }
1641 if (*len >= args->minlen)
1642 break;
1643 error = xfs_btree_increment(acur->cnt, 0, &i);
1644 if (error)
1645 return error;
1646 } while (i);
1647 ASSERT(*len >= args->minlen);
1648 if (!i)
1649 return 0;
1650 }
1651
1652 error = xfs_alloc_walk_iter(args, acur, acur->cnt, true, false, -1, &i);
1653 if (error)
1654 return error;
1655
1656 /*
1657 * It didn't work. We COULD be in a case where there's a good record
1658 * somewhere, so try again.
1659 */
1660 if (acur->len == 0)
1661 return 0;
1662
1663 trace_xfs_alloc_near_first(args);
1664 *allocated = true;
1665 return 0;
1666 }
1667
1668 /*
1669 * Allocate a variable extent near bno in the allocation group agno.
1670 * Extent's length (returned in len) will be between minlen and maxlen,
1671 * and of the form k * prod + mod unless there's nothing that large.
1672 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1673 */
1674 STATIC int
xfs_alloc_ag_vextent_near(struct xfs_alloc_arg * args,uint32_t alloc_flags)1675 xfs_alloc_ag_vextent_near(
1676 struct xfs_alloc_arg *args,
1677 uint32_t alloc_flags)
1678 {
1679 struct xfs_alloc_cur acur = {};
1680 int error; /* error code */
1681 int i; /* result code, temporary */
1682 xfs_agblock_t bno;
1683 xfs_extlen_t len;
1684
1685 /* handle uninitialized agbno range so caller doesn't have to */
1686 if (!args->min_agbno && !args->max_agbno)
1687 args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
1688 ASSERT(args->min_agbno <= args->max_agbno);
1689
1690 /* clamp agbno to the range if it's outside */
1691 if (args->agbno < args->min_agbno)
1692 args->agbno = args->min_agbno;
1693 if (args->agbno > args->max_agbno)
1694 args->agbno = args->max_agbno;
1695
1696 /* Retry once quickly if we find busy extents before blocking. */
1697 alloc_flags |= XFS_ALLOC_FLAG_TRYFLUSH;
1698 restart:
1699 len = 0;
1700
1701 /*
1702 * Set up cursors and see if there are any free extents as big as
1703 * maxlen. If not, pick the last entry in the tree unless the tree is
1704 * empty.
1705 */
1706 error = xfs_alloc_cur_setup(args, &acur);
1707 if (error == -ENOSPC) {
1708 error = xfs_alloc_ag_vextent_small(args, acur.cnt, &bno,
1709 &len, &i);
1710 if (error)
1711 goto out;
1712 if (i == 0 || len == 0) {
1713 trace_xfs_alloc_near_noentry(args);
1714 goto out;
1715 }
1716 ASSERT(i == 1);
1717 } else if (error) {
1718 goto out;
1719 }
1720
1721 /*
1722 * First algorithm.
1723 * If the requested extent is large wrt the freespaces available
1724 * in this a.g., then the cursor will be pointing to a btree entry
1725 * near the right edge of the tree. If it's in the last btree leaf
1726 * block, then we just examine all the entries in that block
1727 * that are big enough, and pick the best one.
1728 */
1729 if (xfs_btree_islastblock(acur.cnt, 0)) {
1730 bool allocated = false;
1731
1732 error = xfs_alloc_ag_vextent_lastblock(args, &acur, &bno, &len,
1733 &allocated);
1734 if (error)
1735 goto out;
1736 if (allocated)
1737 goto alloc_finish;
1738 }
1739
1740 /*
1741 * Second algorithm. Combined cntbt and bnobt search to find ideal
1742 * locality.
1743 */
1744 error = xfs_alloc_ag_vextent_locality(args, &acur, &i);
1745 if (error)
1746 goto out;
1747
1748 /*
1749 * If we couldn't get anything, give up.
1750 */
1751 if (!acur.len) {
1752 if (acur.busy) {
1753 /*
1754 * Our only valid extents must have been busy. Flush and
1755 * retry the allocation again. If we get an -EAGAIN
1756 * error, we're being told that a deadlock was avoided
1757 * and the current transaction needs committing before
1758 * the allocation can be retried.
1759 */
1760 trace_xfs_alloc_near_busy(args);
1761 error = xfs_extent_busy_flush(args->tp, args->pag,
1762 acur.busy_gen, alloc_flags);
1763 if (error)
1764 goto out;
1765
1766 alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH;
1767 goto restart;
1768 }
1769 trace_xfs_alloc_size_neither(args);
1770 args->agbno = NULLAGBLOCK;
1771 goto out;
1772 }
1773
1774 alloc_finish:
1775 /* fix up btrees on a successful allocation */
1776 error = xfs_alloc_cur_finish(args, &acur);
1777
1778 out:
1779 xfs_alloc_cur_close(&acur, error);
1780 return error;
1781 }
1782
1783 /*
1784 * Allocate a variable extent anywhere in the allocation group agno.
1785 * Extent's length (returned in len) will be between minlen and maxlen,
1786 * and of the form k * prod + mod unless there's nothing that large.
1787 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1788 */
1789 static int
xfs_alloc_ag_vextent_size(struct xfs_alloc_arg * args,uint32_t alloc_flags)1790 xfs_alloc_ag_vextent_size(
1791 struct xfs_alloc_arg *args,
1792 uint32_t alloc_flags)
1793 {
1794 struct xfs_agf *agf = args->agbp->b_addr;
1795 struct xfs_btree_cur *bno_cur;
1796 struct xfs_btree_cur *cnt_cur;
1797 xfs_agblock_t fbno; /* start of found freespace */
1798 xfs_extlen_t flen; /* length of found freespace */
1799 xfs_agblock_t rbno; /* returned block number */
1800 xfs_extlen_t rlen; /* length of returned extent */
1801 bool busy;
1802 unsigned busy_gen;
1803 int error;
1804 int i;
1805
1806 /* Retry once quickly if we find busy extents before blocking. */
1807 alloc_flags |= XFS_ALLOC_FLAG_TRYFLUSH;
1808 restart:
1809 /*
1810 * Allocate and initialize a cursor for the by-size btree.
1811 */
1812 cnt_cur = xfs_cntbt_init_cursor(args->mp, args->tp, args->agbp,
1813 args->pag);
1814 bno_cur = NULL;
1815
1816 /*
1817 * Look for an entry >= maxlen+alignment-1 blocks.
1818 */
1819 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1820 args->maxlen + args->alignment - 1, &i)))
1821 goto error0;
1822
1823 /*
1824 * If none then we have to settle for a smaller extent. In the case that
1825 * there are no large extents, this will return the last entry in the
1826 * tree unless the tree is empty. In the case that there are only busy
1827 * large extents, this will return the largest small extent unless there
1828 * are no smaller extents available.
1829 */
1830 if (!i) {
1831 error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1832 &fbno, &flen, &i);
1833 if (error)
1834 goto error0;
1835 if (i == 0 || flen == 0) {
1836 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1837 trace_xfs_alloc_size_noentry(args);
1838 return 0;
1839 }
1840 ASSERT(i == 1);
1841 busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno,
1842 &rlen, &busy_gen);
1843 } else {
1844 /*
1845 * Search for a non-busy extent that is large enough.
1846 */
1847 for (;;) {
1848 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1849 if (error)
1850 goto error0;
1851 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1852 xfs_btree_mark_sick(cnt_cur);
1853 error = -EFSCORRUPTED;
1854 goto error0;
1855 }
1856
1857 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1858 &rbno, &rlen, &busy_gen);
1859
1860 if (rlen >= args->maxlen)
1861 break;
1862
1863 error = xfs_btree_increment(cnt_cur, 0, &i);
1864 if (error)
1865 goto error0;
1866 if (i)
1867 continue;
1868
1869 /*
1870 * Our only valid extents must have been busy. Flush and
1871 * retry the allocation again. If we get an -EAGAIN
1872 * error, we're being told that a deadlock was avoided
1873 * and the current transaction needs committing before
1874 * the allocation can be retried.
1875 */
1876 trace_xfs_alloc_size_busy(args);
1877 error = xfs_extent_busy_flush(args->tp, args->pag,
1878 busy_gen, alloc_flags);
1879 if (error)
1880 goto error0;
1881
1882 alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH;
1883 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1884 goto restart;
1885 }
1886 }
1887
1888 /*
1889 * In the first case above, we got the last entry in the
1890 * by-size btree. Now we check to see if the space hits maxlen
1891 * once aligned; if not, we search left for something better.
1892 * This can't happen in the second case above.
1893 */
1894 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1895 if (XFS_IS_CORRUPT(args->mp,
1896 rlen != 0 &&
1897 (rlen > flen ||
1898 rbno + rlen > fbno + flen))) {
1899 xfs_btree_mark_sick(cnt_cur);
1900 error = -EFSCORRUPTED;
1901 goto error0;
1902 }
1903 if (rlen < args->maxlen) {
1904 xfs_agblock_t bestfbno;
1905 xfs_extlen_t bestflen;
1906 xfs_agblock_t bestrbno;
1907 xfs_extlen_t bestrlen;
1908
1909 bestrlen = rlen;
1910 bestrbno = rbno;
1911 bestflen = flen;
1912 bestfbno = fbno;
1913 for (;;) {
1914 if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1915 goto error0;
1916 if (i == 0)
1917 break;
1918 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1919 &i)))
1920 goto error0;
1921 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1922 xfs_btree_mark_sick(cnt_cur);
1923 error = -EFSCORRUPTED;
1924 goto error0;
1925 }
1926 if (flen <= bestrlen)
1927 break;
1928 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1929 &rbno, &rlen, &busy_gen);
1930 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1931 if (XFS_IS_CORRUPT(args->mp,
1932 rlen != 0 &&
1933 (rlen > flen ||
1934 rbno + rlen > fbno + flen))) {
1935 xfs_btree_mark_sick(cnt_cur);
1936 error = -EFSCORRUPTED;
1937 goto error0;
1938 }
1939 if (rlen > bestrlen) {
1940 bestrlen = rlen;
1941 bestrbno = rbno;
1942 bestflen = flen;
1943 bestfbno = fbno;
1944 if (rlen == args->maxlen)
1945 break;
1946 }
1947 }
1948 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1949 &i)))
1950 goto error0;
1951 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1952 xfs_btree_mark_sick(cnt_cur);
1953 error = -EFSCORRUPTED;
1954 goto error0;
1955 }
1956 rlen = bestrlen;
1957 rbno = bestrbno;
1958 flen = bestflen;
1959 fbno = bestfbno;
1960 }
1961 args->wasfromfl = 0;
1962 /*
1963 * Fix up the length.
1964 */
1965 args->len = rlen;
1966 if (rlen < args->minlen) {
1967 if (busy) {
1968 /*
1969 * Our only valid extents must have been busy. Flush and
1970 * retry the allocation again. If we get an -EAGAIN
1971 * error, we're being told that a deadlock was avoided
1972 * and the current transaction needs committing before
1973 * the allocation can be retried.
1974 */
1975 trace_xfs_alloc_size_busy(args);
1976 error = xfs_extent_busy_flush(args->tp, args->pag,
1977 busy_gen, alloc_flags);
1978 if (error)
1979 goto error0;
1980
1981 alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH;
1982 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1983 goto restart;
1984 }
1985 goto out_nominleft;
1986 }
1987 xfs_alloc_fix_len(args);
1988
1989 rlen = args->len;
1990 if (XFS_IS_CORRUPT(args->mp, rlen > flen)) {
1991 xfs_btree_mark_sick(cnt_cur);
1992 error = -EFSCORRUPTED;
1993 goto error0;
1994 }
1995 /*
1996 * Allocate and initialize a cursor for the by-block tree.
1997 */
1998 bno_cur = xfs_bnobt_init_cursor(args->mp, args->tp, args->agbp,
1999 args->pag);
2000 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
2001 rbno, rlen, XFSA_FIXUP_CNT_OK)))
2002 goto error0;
2003 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
2004 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
2005 cnt_cur = bno_cur = NULL;
2006 args->len = rlen;
2007 args->agbno = rbno;
2008 if (XFS_IS_CORRUPT(args->mp,
2009 args->agbno + args->len >
2010 be32_to_cpu(agf->agf_length))) {
2011 xfs_ag_mark_sick(args->pag, XFS_SICK_AG_BNOBT);
2012 error = -EFSCORRUPTED;
2013 goto error0;
2014 }
2015 trace_xfs_alloc_size_done(args);
2016 return 0;
2017
2018 error0:
2019 trace_xfs_alloc_size_error(args);
2020 if (cnt_cur)
2021 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
2022 if (bno_cur)
2023 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
2024 return error;
2025
2026 out_nominleft:
2027 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
2028 trace_xfs_alloc_size_nominleft(args);
2029 args->agbno = NULLAGBLOCK;
2030 return 0;
2031 }
2032
2033 /*
2034 * Free the extent starting at agno/bno for length.
2035 */
2036 int
xfs_free_ag_extent(struct xfs_trans * tp,struct xfs_buf * agbp,xfs_agnumber_t agno,xfs_agblock_t bno,xfs_extlen_t len,const struct xfs_owner_info * oinfo,enum xfs_ag_resv_type type)2037 xfs_free_ag_extent(
2038 struct xfs_trans *tp,
2039 struct xfs_buf *agbp,
2040 xfs_agnumber_t agno,
2041 xfs_agblock_t bno,
2042 xfs_extlen_t len,
2043 const struct xfs_owner_info *oinfo,
2044 enum xfs_ag_resv_type type)
2045 {
2046 struct xfs_mount *mp;
2047 struct xfs_btree_cur *bno_cur;
2048 struct xfs_btree_cur *cnt_cur;
2049 xfs_agblock_t gtbno; /* start of right neighbor */
2050 xfs_extlen_t gtlen; /* length of right neighbor */
2051 xfs_agblock_t ltbno; /* start of left neighbor */
2052 xfs_extlen_t ltlen; /* length of left neighbor */
2053 xfs_agblock_t nbno; /* new starting block of freesp */
2054 xfs_extlen_t nlen; /* new length of freespace */
2055 int haveleft; /* have a left neighbor */
2056 int haveright; /* have a right neighbor */
2057 int i;
2058 int error;
2059 struct xfs_perag *pag = agbp->b_pag;
2060 bool fixup_longest = false;
2061
2062 bno_cur = cnt_cur = NULL;
2063 mp = tp->t_mountp;
2064
2065 if (!xfs_rmap_should_skip_owner_update(oinfo)) {
2066 error = xfs_rmap_free(tp, agbp, pag, bno, len, oinfo);
2067 if (error)
2068 goto error0;
2069 }
2070
2071 /*
2072 * Allocate and initialize a cursor for the by-block btree.
2073 */
2074 bno_cur = xfs_bnobt_init_cursor(mp, tp, agbp, pag);
2075 /*
2076 * Look for a neighboring block on the left (lower block numbers)
2077 * that is contiguous with this space.
2078 */
2079 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
2080 goto error0;
2081 if (haveleft) {
2082 /*
2083 * There is a block to our left.
2084 */
2085 if ((error = xfs_alloc_get_rec(bno_cur, <bno, <len, &i)))
2086 goto error0;
2087 if (XFS_IS_CORRUPT(mp, i != 1)) {
2088 xfs_btree_mark_sick(bno_cur);
2089 error = -EFSCORRUPTED;
2090 goto error0;
2091 }
2092 /*
2093 * It's not contiguous, though.
2094 */
2095 if (ltbno + ltlen < bno)
2096 haveleft = 0;
2097 else {
2098 /*
2099 * If this failure happens the request to free this
2100 * space was invalid, it's (partly) already free.
2101 * Very bad.
2102 */
2103 if (XFS_IS_CORRUPT(mp, ltbno + ltlen > bno)) {
2104 xfs_btree_mark_sick(bno_cur);
2105 error = -EFSCORRUPTED;
2106 goto error0;
2107 }
2108 }
2109 }
2110 /*
2111 * Look for a neighboring block on the right (higher block numbers)
2112 * that is contiguous with this space.
2113 */
2114 if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
2115 goto error0;
2116 if (haveright) {
2117 /*
2118 * There is a block to our right.
2119 */
2120 if ((error = xfs_alloc_get_rec(bno_cur, >bno, >len, &i)))
2121 goto error0;
2122 if (XFS_IS_CORRUPT(mp, i != 1)) {
2123 xfs_btree_mark_sick(bno_cur);
2124 error = -EFSCORRUPTED;
2125 goto error0;
2126 }
2127 /*
2128 * It's not contiguous, though.
2129 */
2130 if (bno + len < gtbno)
2131 haveright = 0;
2132 else {
2133 /*
2134 * If this failure happens the request to free this
2135 * space was invalid, it's (partly) already free.
2136 * Very bad.
2137 */
2138 if (XFS_IS_CORRUPT(mp, bno + len > gtbno)) {
2139 xfs_btree_mark_sick(bno_cur);
2140 error = -EFSCORRUPTED;
2141 goto error0;
2142 }
2143 }
2144 }
2145 /*
2146 * Now allocate and initialize a cursor for the by-size tree.
2147 */
2148 cnt_cur = xfs_cntbt_init_cursor(mp, tp, agbp, pag);
2149 /*
2150 * Have both left and right contiguous neighbors.
2151 * Merge all three into a single free block.
2152 */
2153 if (haveleft && haveright) {
2154 /*
2155 * Delete the old by-size entry on the left.
2156 */
2157 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
2158 goto error0;
2159 if (XFS_IS_CORRUPT(mp, i != 1)) {
2160 xfs_btree_mark_sick(cnt_cur);
2161 error = -EFSCORRUPTED;
2162 goto error0;
2163 }
2164 if ((error = xfs_btree_delete(cnt_cur, &i)))
2165 goto error0;
2166 if (XFS_IS_CORRUPT(mp, i != 1)) {
2167 xfs_btree_mark_sick(cnt_cur);
2168 error = -EFSCORRUPTED;
2169 goto error0;
2170 }
2171 /*
2172 * Delete the old by-size entry on the right.
2173 */
2174 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
2175 goto error0;
2176 if (XFS_IS_CORRUPT(mp, i != 1)) {
2177 xfs_btree_mark_sick(cnt_cur);
2178 error = -EFSCORRUPTED;
2179 goto error0;
2180 }
2181 if ((error = xfs_btree_delete(cnt_cur, &i)))
2182 goto error0;
2183 if (XFS_IS_CORRUPT(mp, i != 1)) {
2184 xfs_btree_mark_sick(cnt_cur);
2185 error = -EFSCORRUPTED;
2186 goto error0;
2187 }
2188 /*
2189 * Delete the old by-block entry for the right block.
2190 */
2191 if ((error = xfs_btree_delete(bno_cur, &i)))
2192 goto error0;
2193 if (XFS_IS_CORRUPT(mp, i != 1)) {
2194 xfs_btree_mark_sick(bno_cur);
2195 error = -EFSCORRUPTED;
2196 goto error0;
2197 }
2198 /*
2199 * Move the by-block cursor back to the left neighbor.
2200 */
2201 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
2202 goto error0;
2203 if (XFS_IS_CORRUPT(mp, i != 1)) {
2204 xfs_btree_mark_sick(bno_cur);
2205 error = -EFSCORRUPTED;
2206 goto error0;
2207 }
2208 #ifdef DEBUG
2209 /*
2210 * Check that this is the right record: delete didn't
2211 * mangle the cursor.
2212 */
2213 {
2214 xfs_agblock_t xxbno;
2215 xfs_extlen_t xxlen;
2216
2217 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
2218 &i)))
2219 goto error0;
2220 if (XFS_IS_CORRUPT(mp,
2221 i != 1 ||
2222 xxbno != ltbno ||
2223 xxlen != ltlen)) {
2224 xfs_btree_mark_sick(bno_cur);
2225 error = -EFSCORRUPTED;
2226 goto error0;
2227 }
2228 }
2229 #endif
2230 /*
2231 * Update remaining by-block entry to the new, joined block.
2232 */
2233 nbno = ltbno;
2234 nlen = len + ltlen + gtlen;
2235 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2236 goto error0;
2237 }
2238 /*
2239 * Have only a left contiguous neighbor.
2240 * Merge it together with the new freespace.
2241 */
2242 else if (haveleft) {
2243 /*
2244 * Delete the old by-size entry on the left.
2245 */
2246 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
2247 goto error0;
2248 if (XFS_IS_CORRUPT(mp, i != 1)) {
2249 xfs_btree_mark_sick(cnt_cur);
2250 error = -EFSCORRUPTED;
2251 goto error0;
2252 }
2253 if ((error = xfs_btree_delete(cnt_cur, &i)))
2254 goto error0;
2255 if (XFS_IS_CORRUPT(mp, i != 1)) {
2256 xfs_btree_mark_sick(cnt_cur);
2257 error = -EFSCORRUPTED;
2258 goto error0;
2259 }
2260 /*
2261 * Back up the by-block cursor to the left neighbor, and
2262 * update its length.
2263 */
2264 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
2265 goto error0;
2266 if (XFS_IS_CORRUPT(mp, i != 1)) {
2267 xfs_btree_mark_sick(bno_cur);
2268 error = -EFSCORRUPTED;
2269 goto error0;
2270 }
2271 nbno = ltbno;
2272 nlen = len + ltlen;
2273 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2274 goto error0;
2275 }
2276 /*
2277 * Have only a right contiguous neighbor.
2278 * Merge it together with the new freespace.
2279 */
2280 else if (haveright) {
2281 /*
2282 * Delete the old by-size entry on the right.
2283 */
2284 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
2285 goto error0;
2286 if (XFS_IS_CORRUPT(mp, i != 1)) {
2287 xfs_btree_mark_sick(cnt_cur);
2288 error = -EFSCORRUPTED;
2289 goto error0;
2290 }
2291 if ((error = xfs_btree_delete(cnt_cur, &i)))
2292 goto error0;
2293 if (XFS_IS_CORRUPT(mp, i != 1)) {
2294 xfs_btree_mark_sick(cnt_cur);
2295 error = -EFSCORRUPTED;
2296 goto error0;
2297 }
2298 /*
2299 * Update the starting block and length of the right
2300 * neighbor in the by-block tree.
2301 */
2302 nbno = bno;
2303 nlen = len + gtlen;
2304 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2305 goto error0;
2306 }
2307 /*
2308 * No contiguous neighbors.
2309 * Insert the new freespace into the by-block tree.
2310 */
2311 else {
2312 nbno = bno;
2313 nlen = len;
2314 if ((error = xfs_btree_insert(bno_cur, &i)))
2315 goto error0;
2316 if (XFS_IS_CORRUPT(mp, i != 1)) {
2317 xfs_btree_mark_sick(bno_cur);
2318 error = -EFSCORRUPTED;
2319 goto error0;
2320 }
2321 }
2322 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
2323 bno_cur = NULL;
2324
2325 /*
2326 * In all cases we need to insert the new freespace in the by-size tree.
2327 *
2328 * If this new freespace is being inserted in the block that contains
2329 * the largest free space in the btree, make sure we also fix up the
2330 * agf->agf-longest tracker field.
2331 */
2332 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
2333 goto error0;
2334 if (XFS_IS_CORRUPT(mp, i != 0)) {
2335 xfs_btree_mark_sick(cnt_cur);
2336 error = -EFSCORRUPTED;
2337 goto error0;
2338 }
2339 if (xfs_alloc_cursor_at_lastrec(cnt_cur))
2340 fixup_longest = true;
2341 if ((error = xfs_btree_insert(cnt_cur, &i)))
2342 goto error0;
2343 if (XFS_IS_CORRUPT(mp, i != 1)) {
2344 xfs_btree_mark_sick(cnt_cur);
2345 error = -EFSCORRUPTED;
2346 goto error0;
2347 }
2348 if (fixup_longest) {
2349 error = xfs_alloc_fixup_longest(cnt_cur);
2350 if (error)
2351 goto error0;
2352 }
2353
2354 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
2355 cnt_cur = NULL;
2356
2357 /*
2358 * Update the freespace totals in the ag and superblock.
2359 */
2360 error = xfs_alloc_update_counters(tp, agbp, len);
2361 xfs_ag_resv_free_extent(agbp->b_pag, type, tp, len);
2362 if (error)
2363 goto error0;
2364
2365 XFS_STATS_INC(mp, xs_freex);
2366 XFS_STATS_ADD(mp, xs_freeb, len);
2367
2368 trace_xfs_free_extent(mp, agno, bno, len, type, haveleft, haveright);
2369
2370 return 0;
2371
2372 error0:
2373 trace_xfs_free_extent(mp, agno, bno, len, type, -1, -1);
2374 if (bno_cur)
2375 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
2376 if (cnt_cur)
2377 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
2378 return error;
2379 }
2380
2381 /*
2382 * Visible (exported) allocation/free functions.
2383 * Some of these are used just by xfs_alloc_btree.c and this file.
2384 */
2385
2386 /*
2387 * Compute and fill in value of m_alloc_maxlevels.
2388 */
2389 void
xfs_alloc_compute_maxlevels(xfs_mount_t * mp)2390 xfs_alloc_compute_maxlevels(
2391 xfs_mount_t *mp) /* file system mount structure */
2392 {
2393 mp->m_alloc_maxlevels = xfs_btree_compute_maxlevels(mp->m_alloc_mnr,
2394 (mp->m_sb.sb_agblocks + 1) / 2);
2395 ASSERT(mp->m_alloc_maxlevels <= xfs_allocbt_maxlevels_ondisk());
2396 }
2397
2398 /*
2399 * Find the length of the longest extent in an AG. The 'need' parameter
2400 * specifies how much space we're going to need for the AGFL and the
2401 * 'reserved' parameter tells us how many blocks in this AG are reserved for
2402 * other callers.
2403 */
2404 xfs_extlen_t
xfs_alloc_longest_free_extent(struct xfs_perag * pag,xfs_extlen_t need,xfs_extlen_t reserved)2405 xfs_alloc_longest_free_extent(
2406 struct xfs_perag *pag,
2407 xfs_extlen_t need,
2408 xfs_extlen_t reserved)
2409 {
2410 xfs_extlen_t delta = 0;
2411
2412 /*
2413 * If the AGFL needs a recharge, we'll have to subtract that from the
2414 * longest extent.
2415 */
2416 if (need > pag->pagf_flcount)
2417 delta = need - pag->pagf_flcount;
2418
2419 /*
2420 * If we cannot maintain others' reservations with space from the
2421 * not-longest freesp extents, we'll have to subtract /that/ from
2422 * the longest extent too.
2423 */
2424 if (pag->pagf_freeblks - pag->pagf_longest < reserved)
2425 delta += reserved - (pag->pagf_freeblks - pag->pagf_longest);
2426
2427 /*
2428 * If the longest extent is long enough to satisfy all the
2429 * reservations and AGFL rules in place, we can return this extent.
2430 */
2431 if (pag->pagf_longest > delta)
2432 return min_t(xfs_extlen_t, pag->pag_mount->m_ag_max_usable,
2433 pag->pagf_longest - delta);
2434
2435 /* Otherwise, let the caller try for 1 block if there's space. */
2436 return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
2437 }
2438
2439 /*
2440 * Compute the minimum length of the AGFL in the given AG. If @pag is NULL,
2441 * return the largest possible minimum length.
2442 */
2443 unsigned int
xfs_alloc_min_freelist(struct xfs_mount * mp,struct xfs_perag * pag)2444 xfs_alloc_min_freelist(
2445 struct xfs_mount *mp,
2446 struct xfs_perag *pag)
2447 {
2448 /* AG btrees have at least 1 level. */
2449 const unsigned int bno_level = pag ? pag->pagf_bno_level : 1;
2450 const unsigned int cnt_level = pag ? pag->pagf_cnt_level : 1;
2451 const unsigned int rmap_level = pag ? pag->pagf_rmap_level : 1;
2452 unsigned int min_free;
2453
2454 ASSERT(mp->m_alloc_maxlevels > 0);
2455
2456 /*
2457 * For a btree shorter than the maximum height, the worst case is that
2458 * every level gets split and a new level is added, then while inserting
2459 * another entry to refill the AGFL, every level under the old root gets
2460 * split again. This is:
2461 *
2462 * (full height split reservation) + (AGFL refill split height)
2463 * = (current height + 1) + (current height - 1)
2464 * = (new height) + (new height - 2)
2465 * = 2 * new height - 2
2466 *
2467 * For a btree of maximum height, the worst case is that every level
2468 * under the root gets split, then while inserting another entry to
2469 * refill the AGFL, every level under the root gets split again. This is
2470 * also:
2471 *
2472 * 2 * (current height - 1)
2473 * = 2 * (new height - 1)
2474 * = 2 * new height - 2
2475 */
2476
2477 /* space needed by-bno freespace btree */
2478 min_free = min(bno_level + 1, mp->m_alloc_maxlevels) * 2 - 2;
2479 /* space needed by-size freespace btree */
2480 min_free += min(cnt_level + 1, mp->m_alloc_maxlevels) * 2 - 2;
2481 /* space needed reverse mapping used space btree */
2482 if (xfs_has_rmapbt(mp))
2483 min_free += min(rmap_level + 1, mp->m_rmap_maxlevels) * 2 - 2;
2484 return min_free;
2485 }
2486
2487 /*
2488 * Check if the operation we are fixing up the freelist for should go ahead or
2489 * not. If we are freeing blocks, we always allow it, otherwise the allocation
2490 * is dependent on whether the size and shape of free space available will
2491 * permit the requested allocation to take place.
2492 */
2493 static bool
xfs_alloc_space_available(struct xfs_alloc_arg * args,xfs_extlen_t min_free,int flags)2494 xfs_alloc_space_available(
2495 struct xfs_alloc_arg *args,
2496 xfs_extlen_t min_free,
2497 int flags)
2498 {
2499 struct xfs_perag *pag = args->pag;
2500 xfs_extlen_t alloc_len, longest;
2501 xfs_extlen_t reservation; /* blocks that are still reserved */
2502 int available;
2503 xfs_extlen_t agflcount;
2504
2505 if (flags & XFS_ALLOC_FLAG_FREEING)
2506 return true;
2507
2508 reservation = xfs_ag_resv_needed(pag, args->resv);
2509
2510 /* do we have enough contiguous free space for the allocation? */
2511 alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
2512 longest = xfs_alloc_longest_free_extent(pag, min_free, reservation);
2513 if (longest < alloc_len)
2514 return false;
2515
2516 /*
2517 * Do we have enough free space remaining for the allocation? Don't
2518 * account extra agfl blocks because we are about to defer free them,
2519 * making them unavailable until the current transaction commits.
2520 */
2521 agflcount = min_t(xfs_extlen_t, pag->pagf_flcount, min_free);
2522 available = (int)(pag->pagf_freeblks + agflcount -
2523 reservation - min_free - args->minleft);
2524 if (available < (int)max(args->total, alloc_len))
2525 return false;
2526
2527 /*
2528 * Clamp maxlen to the amount of free space available for the actual
2529 * extent allocation.
2530 */
2531 if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
2532 args->maxlen = available;
2533 ASSERT(args->maxlen > 0);
2534 ASSERT(args->maxlen >= args->minlen);
2535 }
2536
2537 return true;
2538 }
2539
2540 /*
2541 * Check the agfl fields of the agf for inconsistency or corruption.
2542 *
2543 * The original purpose was to detect an agfl header padding mismatch between
2544 * current and early v5 kernels. This problem manifests as a 1-slot size
2545 * difference between the on-disk flcount and the active [first, last] range of
2546 * a wrapped agfl.
2547 *
2548 * However, we need to use these same checks to catch agfl count corruptions
2549 * unrelated to padding. This could occur on any v4 or v5 filesystem, so either
2550 * way, we need to reset the agfl and warn the user.
2551 *
2552 * Return true if a reset is required before the agfl can be used, false
2553 * otherwise.
2554 */
2555 static bool
xfs_agfl_needs_reset(struct xfs_mount * mp,struct xfs_agf * agf)2556 xfs_agfl_needs_reset(
2557 struct xfs_mount *mp,
2558 struct xfs_agf *agf)
2559 {
2560 uint32_t f = be32_to_cpu(agf->agf_flfirst);
2561 uint32_t l = be32_to_cpu(agf->agf_fllast);
2562 uint32_t c = be32_to_cpu(agf->agf_flcount);
2563 int agfl_size = xfs_agfl_size(mp);
2564 int active;
2565
2566 /*
2567 * The agf read verifier catches severe corruption of these fields.
2568 * Repeat some sanity checks to cover a packed -> unpacked mismatch if
2569 * the verifier allows it.
2570 */
2571 if (f >= agfl_size || l >= agfl_size)
2572 return true;
2573 if (c > agfl_size)
2574 return true;
2575
2576 /*
2577 * Check consistency between the on-disk count and the active range. An
2578 * agfl padding mismatch manifests as an inconsistent flcount.
2579 */
2580 if (c && l >= f)
2581 active = l - f + 1;
2582 else if (c)
2583 active = agfl_size - f + l + 1;
2584 else
2585 active = 0;
2586
2587 return active != c;
2588 }
2589
2590 /*
2591 * Reset the agfl to an empty state. Ignore/drop any existing blocks since the
2592 * agfl content cannot be trusted. Warn the user that a repair is required to
2593 * recover leaked blocks.
2594 *
2595 * The purpose of this mechanism is to handle filesystems affected by the agfl
2596 * header padding mismatch problem. A reset keeps the filesystem online with a
2597 * relatively minor free space accounting inconsistency rather than suffer the
2598 * inevitable crash from use of an invalid agfl block.
2599 */
2600 static void
xfs_agfl_reset(struct xfs_trans * tp,struct xfs_buf * agbp,struct xfs_perag * pag)2601 xfs_agfl_reset(
2602 struct xfs_trans *tp,
2603 struct xfs_buf *agbp,
2604 struct xfs_perag *pag)
2605 {
2606 struct xfs_mount *mp = tp->t_mountp;
2607 struct xfs_agf *agf = agbp->b_addr;
2608
2609 ASSERT(xfs_perag_agfl_needs_reset(pag));
2610 trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
2611
2612 xfs_warn(mp,
2613 "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
2614 "Please unmount and run xfs_repair.",
2615 pag->pag_agno, pag->pagf_flcount);
2616
2617 agf->agf_flfirst = 0;
2618 agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1);
2619 agf->agf_flcount = 0;
2620 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
2621 XFS_AGF_FLCOUNT);
2622
2623 pag->pagf_flcount = 0;
2624 clear_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
2625 }
2626
2627 /*
2628 * Add the extent to the list of extents to be free at transaction end.
2629 * The list is maintained sorted (by block number).
2630 */
2631 static int
xfs_defer_extent_free(struct xfs_trans * tp,xfs_fsblock_t bno,xfs_filblks_t len,const struct xfs_owner_info * oinfo,enum xfs_ag_resv_type type,unsigned int free_flags,struct xfs_defer_pending ** dfpp)2632 xfs_defer_extent_free(
2633 struct xfs_trans *tp,
2634 xfs_fsblock_t bno,
2635 xfs_filblks_t len,
2636 const struct xfs_owner_info *oinfo,
2637 enum xfs_ag_resv_type type,
2638 unsigned int free_flags,
2639 struct xfs_defer_pending **dfpp)
2640 {
2641 struct xfs_extent_free_item *xefi;
2642 struct xfs_mount *mp = tp->t_mountp;
2643
2644 ASSERT(len <= XFS_MAX_BMBT_EXTLEN);
2645 ASSERT(!isnullstartblock(bno));
2646 ASSERT(!(free_flags & ~XFS_FREE_EXTENT_ALL_FLAGS));
2647
2648 if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbext(mp, bno, len)))
2649 return -EFSCORRUPTED;
2650
2651 xefi = kmem_cache_zalloc(xfs_extfree_item_cache,
2652 GFP_KERNEL | __GFP_NOFAIL);
2653 xefi->xefi_startblock = bno;
2654 xefi->xefi_blockcount = (xfs_extlen_t)len;
2655 xefi->xefi_agresv = type;
2656 if (free_flags & XFS_FREE_EXTENT_SKIP_DISCARD)
2657 xefi->xefi_flags |= XFS_EFI_SKIP_DISCARD;
2658 if (oinfo) {
2659 ASSERT(oinfo->oi_offset == 0);
2660
2661 if (oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK)
2662 xefi->xefi_flags |= XFS_EFI_ATTR_FORK;
2663 if (oinfo->oi_flags & XFS_OWNER_INFO_BMBT_BLOCK)
2664 xefi->xefi_flags |= XFS_EFI_BMBT_BLOCK;
2665 xefi->xefi_owner = oinfo->oi_owner;
2666 } else {
2667 xefi->xefi_owner = XFS_RMAP_OWN_NULL;
2668 }
2669
2670 xfs_extent_free_defer_add(tp, xefi, dfpp);
2671 return 0;
2672 }
2673
2674 int
xfs_free_extent_later(struct xfs_trans * tp,xfs_fsblock_t bno,xfs_filblks_t len,const struct xfs_owner_info * oinfo,enum xfs_ag_resv_type type,unsigned int free_flags)2675 xfs_free_extent_later(
2676 struct xfs_trans *tp,
2677 xfs_fsblock_t bno,
2678 xfs_filblks_t len,
2679 const struct xfs_owner_info *oinfo,
2680 enum xfs_ag_resv_type type,
2681 unsigned int free_flags)
2682 {
2683 struct xfs_defer_pending *dontcare = NULL;
2684
2685 return xfs_defer_extent_free(tp, bno, len, oinfo, type, free_flags,
2686 &dontcare);
2687 }
2688
2689 /*
2690 * Set up automatic freeing of unwritten space in the filesystem.
2691 *
2692 * This function attached a paused deferred extent free item to the
2693 * transaction. Pausing means that the EFI will be logged in the next
2694 * transaction commit, but the pending EFI will not be finished until the
2695 * pending item is unpaused.
2696 *
2697 * If the system goes down after the EFI has been persisted to the log but
2698 * before the pending item is unpaused, log recovery will find the EFI, fail to
2699 * find the EFD, and free the space.
2700 *
2701 * If the pending item is unpaused, the next transaction commit will log an EFD
2702 * without freeing the space.
2703 *
2704 * Caller must ensure that the tp, fsbno, len, oinfo, and resv flags of the
2705 * @args structure are set to the relevant values.
2706 */
2707 int
xfs_alloc_schedule_autoreap(const struct xfs_alloc_arg * args,unsigned int free_flags,struct xfs_alloc_autoreap * aarp)2708 xfs_alloc_schedule_autoreap(
2709 const struct xfs_alloc_arg *args,
2710 unsigned int free_flags,
2711 struct xfs_alloc_autoreap *aarp)
2712 {
2713 int error;
2714
2715 error = xfs_defer_extent_free(args->tp, args->fsbno, args->len,
2716 &args->oinfo, args->resv, free_flags, &aarp->dfp);
2717 if (error)
2718 return error;
2719
2720 xfs_defer_item_pause(args->tp, aarp->dfp);
2721 return 0;
2722 }
2723
2724 /*
2725 * Cancel automatic freeing of unwritten space in the filesystem.
2726 *
2727 * Earlier, we created a paused deferred extent free item and attached it to
2728 * this transaction so that we could automatically roll back a new space
2729 * allocation if the system went down. Now we want to cancel the paused work
2730 * item by marking the EFI stale so we don't actually free the space, unpausing
2731 * the pending item and logging an EFD.
2732 *
2733 * The caller generally should have already mapped the space into the ondisk
2734 * filesystem. If the reserved space was partially used, the caller must call
2735 * xfs_free_extent_later to create a new EFI to free the unused space.
2736 */
2737 void
xfs_alloc_cancel_autoreap(struct xfs_trans * tp,struct xfs_alloc_autoreap * aarp)2738 xfs_alloc_cancel_autoreap(
2739 struct xfs_trans *tp,
2740 struct xfs_alloc_autoreap *aarp)
2741 {
2742 struct xfs_defer_pending *dfp = aarp->dfp;
2743 struct xfs_extent_free_item *xefi;
2744
2745 if (!dfp)
2746 return;
2747
2748 list_for_each_entry(xefi, &dfp->dfp_work, xefi_list)
2749 xefi->xefi_flags |= XFS_EFI_CANCELLED;
2750
2751 xfs_defer_item_unpause(tp, dfp);
2752 }
2753
2754 /*
2755 * Commit automatic freeing of unwritten space in the filesystem.
2756 *
2757 * This unpauses an earlier _schedule_autoreap and commits to freeing the
2758 * allocated space. Call this if none of the reserved space was used.
2759 */
2760 void
xfs_alloc_commit_autoreap(struct xfs_trans * tp,struct xfs_alloc_autoreap * aarp)2761 xfs_alloc_commit_autoreap(
2762 struct xfs_trans *tp,
2763 struct xfs_alloc_autoreap *aarp)
2764 {
2765 if (aarp->dfp)
2766 xfs_defer_item_unpause(tp, aarp->dfp);
2767 }
2768
2769 /*
2770 * Check if an AGF has a free extent record whose length is equal to
2771 * args->minlen.
2772 */
2773 STATIC int
xfs_exact_minlen_extent_available(struct xfs_alloc_arg * args,struct xfs_buf * agbp,int * stat)2774 xfs_exact_minlen_extent_available(
2775 struct xfs_alloc_arg *args,
2776 struct xfs_buf *agbp,
2777 int *stat)
2778 {
2779 struct xfs_btree_cur *cnt_cur;
2780 xfs_agblock_t fbno;
2781 xfs_extlen_t flen;
2782 int error = 0;
2783
2784 cnt_cur = xfs_cntbt_init_cursor(args->mp, args->tp, agbp,
2785 args->pag);
2786 error = xfs_alloc_lookup_ge(cnt_cur, 0, args->minlen, stat);
2787 if (error)
2788 goto out;
2789
2790 if (*stat == 0) {
2791 xfs_btree_mark_sick(cnt_cur);
2792 error = -EFSCORRUPTED;
2793 goto out;
2794 }
2795
2796 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, stat);
2797 if (error)
2798 goto out;
2799
2800 if (*stat == 1 && flen != args->minlen)
2801 *stat = 0;
2802
2803 out:
2804 xfs_btree_del_cursor(cnt_cur, error);
2805
2806 return error;
2807 }
2808
2809 /*
2810 * Decide whether to use this allocation group for this allocation.
2811 * If so, fix up the btree freelist's size.
2812 */
2813 int /* error */
xfs_alloc_fix_freelist(struct xfs_alloc_arg * args,uint32_t alloc_flags)2814 xfs_alloc_fix_freelist(
2815 struct xfs_alloc_arg *args, /* allocation argument structure */
2816 uint32_t alloc_flags)
2817 {
2818 struct xfs_mount *mp = args->mp;
2819 struct xfs_perag *pag = args->pag;
2820 struct xfs_trans *tp = args->tp;
2821 struct xfs_buf *agbp = NULL;
2822 struct xfs_buf *agflbp = NULL;
2823 struct xfs_alloc_arg targs; /* local allocation arguments */
2824 xfs_agblock_t bno; /* freelist block */
2825 xfs_extlen_t need; /* total blocks needed in freelist */
2826 int error = 0;
2827
2828 /* deferred ops (AGFL block frees) require permanent transactions */
2829 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
2830
2831 if (!xfs_perag_initialised_agf(pag)) {
2832 error = xfs_alloc_read_agf(pag, tp, alloc_flags, &agbp);
2833 if (error) {
2834 /* Couldn't lock the AGF so skip this AG. */
2835 if (error == -EAGAIN)
2836 error = 0;
2837 goto out_no_agbp;
2838 }
2839 }
2840
2841 /*
2842 * If this is a metadata preferred pag and we are user data then try
2843 * somewhere else if we are not being asked to try harder at this
2844 * point
2845 */
2846 if (xfs_perag_prefers_metadata(pag) &&
2847 (args->datatype & XFS_ALLOC_USERDATA) &&
2848 (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK)) {
2849 ASSERT(!(alloc_flags & XFS_ALLOC_FLAG_FREEING));
2850 goto out_agbp_relse;
2851 }
2852
2853 need = xfs_alloc_min_freelist(mp, pag);
2854 if (!xfs_alloc_space_available(args, need, alloc_flags |
2855 XFS_ALLOC_FLAG_CHECK))
2856 goto out_agbp_relse;
2857
2858 /*
2859 * Get the a.g. freespace buffer.
2860 * Can fail if we're not blocking on locks, and it's held.
2861 */
2862 if (!agbp) {
2863 error = xfs_alloc_read_agf(pag, tp, alloc_flags, &agbp);
2864 if (error) {
2865 /* Couldn't lock the AGF so skip this AG. */
2866 if (error == -EAGAIN)
2867 error = 0;
2868 goto out_no_agbp;
2869 }
2870 }
2871
2872 /* reset a padding mismatched agfl before final free space check */
2873 if (xfs_perag_agfl_needs_reset(pag))
2874 xfs_agfl_reset(tp, agbp, pag);
2875
2876 /* If there isn't enough total space or single-extent, reject it. */
2877 need = xfs_alloc_min_freelist(mp, pag);
2878 if (!xfs_alloc_space_available(args, need, alloc_flags))
2879 goto out_agbp_relse;
2880
2881 if (IS_ENABLED(CONFIG_XFS_DEBUG) && args->alloc_minlen_only) {
2882 int stat;
2883
2884 error = xfs_exact_minlen_extent_available(args, agbp, &stat);
2885 if (error || !stat)
2886 goto out_agbp_relse;
2887 }
2888
2889 /*
2890 * Make the freelist shorter if it's too long.
2891 *
2892 * Note that from this point onwards, we will always release the agf and
2893 * agfl buffers on error. This handles the case where we error out and
2894 * the buffers are clean or may not have been joined to the transaction
2895 * and hence need to be released manually. If they have been joined to
2896 * the transaction, then xfs_trans_brelse() will handle them
2897 * appropriately based on the recursion count and dirty state of the
2898 * buffer.
2899 *
2900 * XXX (dgc): When we have lots of free space, does this buy us
2901 * anything other than extra overhead when we need to put more blocks
2902 * back on the free list? Maybe we should only do this when space is
2903 * getting low or the AGFL is more than half full?
2904 *
2905 * The NOSHRINK flag prevents the AGFL from being shrunk if it's too
2906 * big; the NORMAP flag prevents AGFL expand/shrink operations from
2907 * updating the rmapbt. Both flags are used in xfs_repair while we're
2908 * rebuilding the rmapbt, and neither are used by the kernel. They're
2909 * both required to ensure that rmaps are correctly recorded for the
2910 * regenerated AGFL, bnobt, and cntbt. See repair/phase5.c and
2911 * repair/rmap.c in xfsprogs for details.
2912 */
2913 memset(&targs, 0, sizeof(targs));
2914 /* struct copy below */
2915 if (alloc_flags & XFS_ALLOC_FLAG_NORMAP)
2916 targs.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
2917 else
2918 targs.oinfo = XFS_RMAP_OINFO_AG;
2919 while (!(alloc_flags & XFS_ALLOC_FLAG_NOSHRINK) &&
2920 pag->pagf_flcount > need) {
2921 error = xfs_alloc_get_freelist(pag, tp, agbp, &bno, 0);
2922 if (error)
2923 goto out_agbp_relse;
2924
2925 /*
2926 * Defer the AGFL block free.
2927 *
2928 * This helps to prevent log reservation overruns due to too
2929 * many allocation operations in a transaction. AGFL frees are
2930 * prone to this problem because for one they are always freed
2931 * one at a time. Further, an immediate AGFL block free can
2932 * cause a btree join and require another block free before the
2933 * real allocation can proceed.
2934 * Deferring the free disconnects freeing up the AGFL slot from
2935 * freeing the block.
2936 */
2937 error = xfs_free_extent_later(tp,
2938 XFS_AGB_TO_FSB(mp, args->agno, bno), 1,
2939 &targs.oinfo, XFS_AG_RESV_AGFL, 0);
2940 if (error)
2941 goto out_agbp_relse;
2942 }
2943
2944 targs.tp = tp;
2945 targs.mp = mp;
2946 targs.agbp = agbp;
2947 targs.agno = args->agno;
2948 targs.alignment = targs.minlen = targs.prod = 1;
2949 targs.pag = pag;
2950 error = xfs_alloc_read_agfl(pag, tp, &agflbp);
2951 if (error)
2952 goto out_agbp_relse;
2953
2954 /* Make the freelist longer if it's too short. */
2955 while (pag->pagf_flcount < need) {
2956 targs.agbno = 0;
2957 targs.maxlen = need - pag->pagf_flcount;
2958 targs.resv = XFS_AG_RESV_AGFL;
2959
2960 /* Allocate as many blocks as possible at once. */
2961 error = xfs_alloc_ag_vextent_size(&targs, alloc_flags);
2962 if (error)
2963 goto out_agflbp_relse;
2964
2965 /*
2966 * Stop if we run out. Won't happen if callers are obeying
2967 * the restrictions correctly. Can happen for free calls
2968 * on a completely full ag.
2969 */
2970 if (targs.agbno == NULLAGBLOCK) {
2971 if (alloc_flags & XFS_ALLOC_FLAG_FREEING)
2972 break;
2973 goto out_agflbp_relse;
2974 }
2975
2976 if (!xfs_rmap_should_skip_owner_update(&targs.oinfo)) {
2977 error = xfs_rmap_alloc(tp, agbp, pag,
2978 targs.agbno, targs.len, &targs.oinfo);
2979 if (error)
2980 goto out_agflbp_relse;
2981 }
2982 error = xfs_alloc_update_counters(tp, agbp,
2983 -((long)(targs.len)));
2984 if (error)
2985 goto out_agflbp_relse;
2986
2987 /*
2988 * Put each allocated block on the list.
2989 */
2990 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
2991 error = xfs_alloc_put_freelist(pag, tp, agbp,
2992 agflbp, bno, 0);
2993 if (error)
2994 goto out_agflbp_relse;
2995 }
2996 }
2997 xfs_trans_brelse(tp, agflbp);
2998 args->agbp = agbp;
2999 return 0;
3000
3001 out_agflbp_relse:
3002 xfs_trans_brelse(tp, agflbp);
3003 out_agbp_relse:
3004 if (agbp)
3005 xfs_trans_brelse(tp, agbp);
3006 out_no_agbp:
3007 args->agbp = NULL;
3008 return error;
3009 }
3010
3011 /*
3012 * Get a block from the freelist.
3013 * Returns with the buffer for the block gotten.
3014 */
3015 int
xfs_alloc_get_freelist(struct xfs_perag * pag,struct xfs_trans * tp,struct xfs_buf * agbp,xfs_agblock_t * bnop,int btreeblk)3016 xfs_alloc_get_freelist(
3017 struct xfs_perag *pag,
3018 struct xfs_trans *tp,
3019 struct xfs_buf *agbp,
3020 xfs_agblock_t *bnop,
3021 int btreeblk)
3022 {
3023 struct xfs_agf *agf = agbp->b_addr;
3024 struct xfs_buf *agflbp;
3025 xfs_agblock_t bno;
3026 __be32 *agfl_bno;
3027 int error;
3028 uint32_t logflags;
3029 struct xfs_mount *mp = tp->t_mountp;
3030
3031 /*
3032 * Freelist is empty, give up.
3033 */
3034 if (!agf->agf_flcount) {
3035 *bnop = NULLAGBLOCK;
3036 return 0;
3037 }
3038 /*
3039 * Read the array of free blocks.
3040 */
3041 error = xfs_alloc_read_agfl(pag, tp, &agflbp);
3042 if (error)
3043 return error;
3044
3045
3046 /*
3047 * Get the block number and update the data structures.
3048 */
3049 agfl_bno = xfs_buf_to_agfl_bno(agflbp);
3050 bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
3051 if (XFS_IS_CORRUPT(tp->t_mountp, !xfs_verify_agbno(pag, bno)))
3052 return -EFSCORRUPTED;
3053
3054 be32_add_cpu(&agf->agf_flfirst, 1);
3055 xfs_trans_brelse(tp, agflbp);
3056 if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
3057 agf->agf_flfirst = 0;
3058
3059 ASSERT(!xfs_perag_agfl_needs_reset(pag));
3060 be32_add_cpu(&agf->agf_flcount, -1);
3061 pag->pagf_flcount--;
3062
3063 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
3064 if (btreeblk) {
3065 be32_add_cpu(&agf->agf_btreeblks, 1);
3066 pag->pagf_btreeblks++;
3067 logflags |= XFS_AGF_BTREEBLKS;
3068 }
3069
3070 xfs_alloc_log_agf(tp, agbp, logflags);
3071 *bnop = bno;
3072
3073 return 0;
3074 }
3075
3076 /*
3077 * Log the given fields from the agf structure.
3078 */
3079 void
xfs_alloc_log_agf(struct xfs_trans * tp,struct xfs_buf * bp,uint32_t fields)3080 xfs_alloc_log_agf(
3081 struct xfs_trans *tp,
3082 struct xfs_buf *bp,
3083 uint32_t fields)
3084 {
3085 int first; /* first byte offset */
3086 int last; /* last byte offset */
3087 static const short offsets[] = {
3088 offsetof(xfs_agf_t, agf_magicnum),
3089 offsetof(xfs_agf_t, agf_versionnum),
3090 offsetof(xfs_agf_t, agf_seqno),
3091 offsetof(xfs_agf_t, agf_length),
3092 offsetof(xfs_agf_t, agf_bno_root), /* also cnt/rmap root */
3093 offsetof(xfs_agf_t, agf_bno_level), /* also cnt/rmap levels */
3094 offsetof(xfs_agf_t, agf_flfirst),
3095 offsetof(xfs_agf_t, agf_fllast),
3096 offsetof(xfs_agf_t, agf_flcount),
3097 offsetof(xfs_agf_t, agf_freeblks),
3098 offsetof(xfs_agf_t, agf_longest),
3099 offsetof(xfs_agf_t, agf_btreeblks),
3100 offsetof(xfs_agf_t, agf_uuid),
3101 offsetof(xfs_agf_t, agf_rmap_blocks),
3102 offsetof(xfs_agf_t, agf_refcount_blocks),
3103 offsetof(xfs_agf_t, agf_refcount_root),
3104 offsetof(xfs_agf_t, agf_refcount_level),
3105 /* needed so that we don't log the whole rest of the structure: */
3106 offsetof(xfs_agf_t, agf_spare64),
3107 sizeof(xfs_agf_t)
3108 };
3109
3110 trace_xfs_agf(tp->t_mountp, bp->b_addr, fields, _RET_IP_);
3111
3112 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
3113
3114 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
3115 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
3116 }
3117
3118 /*
3119 * Put the block on the freelist for the allocation group.
3120 */
3121 int
xfs_alloc_put_freelist(struct xfs_perag * pag,struct xfs_trans * tp,struct xfs_buf * agbp,struct xfs_buf * agflbp,xfs_agblock_t bno,int btreeblk)3122 xfs_alloc_put_freelist(
3123 struct xfs_perag *pag,
3124 struct xfs_trans *tp,
3125 struct xfs_buf *agbp,
3126 struct xfs_buf *agflbp,
3127 xfs_agblock_t bno,
3128 int btreeblk)
3129 {
3130 struct xfs_mount *mp = tp->t_mountp;
3131 struct xfs_agf *agf = agbp->b_addr;
3132 __be32 *blockp;
3133 int error;
3134 uint32_t logflags;
3135 __be32 *agfl_bno;
3136 int startoff;
3137
3138 if (!agflbp) {
3139 error = xfs_alloc_read_agfl(pag, tp, &agflbp);
3140 if (error)
3141 return error;
3142 }
3143
3144 be32_add_cpu(&agf->agf_fllast, 1);
3145 if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
3146 agf->agf_fllast = 0;
3147
3148 ASSERT(!xfs_perag_agfl_needs_reset(pag));
3149 be32_add_cpu(&agf->agf_flcount, 1);
3150 pag->pagf_flcount++;
3151
3152 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
3153 if (btreeblk) {
3154 be32_add_cpu(&agf->agf_btreeblks, -1);
3155 pag->pagf_btreeblks--;
3156 logflags |= XFS_AGF_BTREEBLKS;
3157 }
3158
3159 xfs_alloc_log_agf(tp, agbp, logflags);
3160
3161 ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
3162
3163 agfl_bno = xfs_buf_to_agfl_bno(agflbp);
3164 blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
3165 *blockp = cpu_to_be32(bno);
3166 startoff = (char *)blockp - (char *)agflbp->b_addr;
3167
3168 xfs_alloc_log_agf(tp, agbp, logflags);
3169
3170 xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
3171 xfs_trans_log_buf(tp, agflbp, startoff,
3172 startoff + sizeof(xfs_agblock_t) - 1);
3173 return 0;
3174 }
3175
3176 /*
3177 * Check that this AGF/AGI header's sequence number and length matches the AG
3178 * number and size in fsblocks.
3179 */
3180 xfs_failaddr_t
xfs_validate_ag_length(struct xfs_buf * bp,uint32_t seqno,uint32_t length)3181 xfs_validate_ag_length(
3182 struct xfs_buf *bp,
3183 uint32_t seqno,
3184 uint32_t length)
3185 {
3186 struct xfs_mount *mp = bp->b_mount;
3187 /*
3188 * During growfs operations, the perag is not fully initialised,
3189 * so we can't use it for any useful checking. growfs ensures we can't
3190 * use it by using uncached buffers that don't have the perag attached
3191 * so we can detect and avoid this problem.
3192 */
3193 if (bp->b_pag && seqno != bp->b_pag->pag_agno)
3194 return __this_address;
3195
3196 /*
3197 * Only the last AG in the filesystem is allowed to be shorter
3198 * than the AG size recorded in the superblock.
3199 */
3200 if (length != mp->m_sb.sb_agblocks) {
3201 /*
3202 * During growfs, the new last AG can get here before we
3203 * have updated the superblock. Give it a pass on the seqno
3204 * check.
3205 */
3206 if (bp->b_pag && seqno != mp->m_sb.sb_agcount - 1)
3207 return __this_address;
3208 if (length < XFS_MIN_AG_BLOCKS)
3209 return __this_address;
3210 if (length > mp->m_sb.sb_agblocks)
3211 return __this_address;
3212 }
3213
3214 return NULL;
3215 }
3216
3217 /*
3218 * Verify the AGF is consistent.
3219 *
3220 * We do not verify the AGFL indexes in the AGF are fully consistent here
3221 * because of issues with variable on-disk structure sizes. Instead, we check
3222 * the agfl indexes for consistency when we initialise the perag from the AGF
3223 * information after a read completes.
3224 *
3225 * If the index is inconsistent, then we mark the perag as needing an AGFL
3226 * reset. The first AGFL update performed then resets the AGFL indexes and
3227 * refills the AGFL with known good free blocks, allowing the filesystem to
3228 * continue operating normally at the cost of a few leaked free space blocks.
3229 */
3230 static xfs_failaddr_t
xfs_agf_verify(struct xfs_buf * bp)3231 xfs_agf_verify(
3232 struct xfs_buf *bp)
3233 {
3234 struct xfs_mount *mp = bp->b_mount;
3235 struct xfs_agf *agf = bp->b_addr;
3236 xfs_failaddr_t fa;
3237 uint32_t agf_seqno = be32_to_cpu(agf->agf_seqno);
3238 uint32_t agf_length = be32_to_cpu(agf->agf_length);
3239
3240 if (xfs_has_crc(mp)) {
3241 if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
3242 return __this_address;
3243 if (!xfs_log_check_lsn(mp, be64_to_cpu(agf->agf_lsn)))
3244 return __this_address;
3245 }
3246
3247 if (!xfs_verify_magic(bp, agf->agf_magicnum))
3248 return __this_address;
3249
3250 if (!XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)))
3251 return __this_address;
3252
3253 /*
3254 * Both agf_seqno and agf_length need to validated before anything else
3255 * block number related in the AGF or AGFL can be checked.
3256 */
3257 fa = xfs_validate_ag_length(bp, agf_seqno, agf_length);
3258 if (fa)
3259 return fa;
3260
3261 if (be32_to_cpu(agf->agf_flfirst) >= xfs_agfl_size(mp))
3262 return __this_address;
3263 if (be32_to_cpu(agf->agf_fllast) >= xfs_agfl_size(mp))
3264 return __this_address;
3265 if (be32_to_cpu(agf->agf_flcount) > xfs_agfl_size(mp))
3266 return __this_address;
3267
3268 if (be32_to_cpu(agf->agf_freeblks) < be32_to_cpu(agf->agf_longest) ||
3269 be32_to_cpu(agf->agf_freeblks) > agf_length)
3270 return __this_address;
3271
3272 if (be32_to_cpu(agf->agf_bno_level) < 1 ||
3273 be32_to_cpu(agf->agf_cnt_level) < 1 ||
3274 be32_to_cpu(agf->agf_bno_level) > mp->m_alloc_maxlevels ||
3275 be32_to_cpu(agf->agf_cnt_level) > mp->m_alloc_maxlevels)
3276 return __this_address;
3277
3278 if (xfs_has_lazysbcount(mp) &&
3279 be32_to_cpu(agf->agf_btreeblks) > agf_length)
3280 return __this_address;
3281
3282 if (xfs_has_rmapbt(mp)) {
3283 if (be32_to_cpu(agf->agf_rmap_blocks) > agf_length)
3284 return __this_address;
3285
3286 if (be32_to_cpu(agf->agf_rmap_level) < 1 ||
3287 be32_to_cpu(agf->agf_rmap_level) > mp->m_rmap_maxlevels)
3288 return __this_address;
3289 }
3290
3291 if (xfs_has_reflink(mp)) {
3292 if (be32_to_cpu(agf->agf_refcount_blocks) > agf_length)
3293 return __this_address;
3294
3295 if (be32_to_cpu(agf->agf_refcount_level) < 1 ||
3296 be32_to_cpu(agf->agf_refcount_level) > mp->m_refc_maxlevels)
3297 return __this_address;
3298 }
3299
3300 return NULL;
3301 }
3302
3303 static void
xfs_agf_read_verify(struct xfs_buf * bp)3304 xfs_agf_read_verify(
3305 struct xfs_buf *bp)
3306 {
3307 struct xfs_mount *mp = bp->b_mount;
3308 xfs_failaddr_t fa;
3309
3310 if (xfs_has_crc(mp) &&
3311 !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
3312 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
3313 else {
3314 fa = xfs_agf_verify(bp);
3315 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_ALLOC_READ_AGF))
3316 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
3317 }
3318 }
3319
3320 static void
xfs_agf_write_verify(struct xfs_buf * bp)3321 xfs_agf_write_verify(
3322 struct xfs_buf *bp)
3323 {
3324 struct xfs_mount *mp = bp->b_mount;
3325 struct xfs_buf_log_item *bip = bp->b_log_item;
3326 struct xfs_agf *agf = bp->b_addr;
3327 xfs_failaddr_t fa;
3328
3329 fa = xfs_agf_verify(bp);
3330 if (fa) {
3331 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
3332 return;
3333 }
3334
3335 if (!xfs_has_crc(mp))
3336 return;
3337
3338 if (bip)
3339 agf->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
3340
3341 xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
3342 }
3343
3344 const struct xfs_buf_ops xfs_agf_buf_ops = {
3345 .name = "xfs_agf",
3346 .magic = { cpu_to_be32(XFS_AGF_MAGIC), cpu_to_be32(XFS_AGF_MAGIC) },
3347 .verify_read = xfs_agf_read_verify,
3348 .verify_write = xfs_agf_write_verify,
3349 .verify_struct = xfs_agf_verify,
3350 };
3351
3352 /*
3353 * Read in the allocation group header (free/alloc section).
3354 */
3355 int
xfs_read_agf(struct xfs_perag * pag,struct xfs_trans * tp,int flags,struct xfs_buf ** agfbpp)3356 xfs_read_agf(
3357 struct xfs_perag *pag,
3358 struct xfs_trans *tp,
3359 int flags,
3360 struct xfs_buf **agfbpp)
3361 {
3362 struct xfs_mount *mp = pag->pag_mount;
3363 int error;
3364
3365 trace_xfs_read_agf(pag->pag_mount, pag->pag_agno);
3366
3367 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
3368 XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGF_DADDR(mp)),
3369 XFS_FSS_TO_BB(mp, 1), flags, agfbpp, &xfs_agf_buf_ops);
3370 if (xfs_metadata_is_sick(error))
3371 xfs_ag_mark_sick(pag, XFS_SICK_AG_AGF);
3372 if (error)
3373 return error;
3374
3375 xfs_buf_set_ref(*agfbpp, XFS_AGF_REF);
3376 return 0;
3377 }
3378
3379 /*
3380 * Read in the allocation group header (free/alloc section) and initialise the
3381 * perag structure if necessary. If the caller provides @agfbpp, then return the
3382 * locked buffer to the caller, otherwise free it.
3383 */
3384 int
xfs_alloc_read_agf(struct xfs_perag * pag,struct xfs_trans * tp,int flags,struct xfs_buf ** agfbpp)3385 xfs_alloc_read_agf(
3386 struct xfs_perag *pag,
3387 struct xfs_trans *tp,
3388 int flags,
3389 struct xfs_buf **agfbpp)
3390 {
3391 struct xfs_buf *agfbp;
3392 struct xfs_agf *agf;
3393 int error;
3394 int allocbt_blks;
3395
3396 trace_xfs_alloc_read_agf(pag->pag_mount, pag->pag_agno);
3397
3398 /* We don't support trylock when freeing. */
3399 ASSERT((flags & (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK)) !=
3400 (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK));
3401 error = xfs_read_agf(pag, tp,
3402 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
3403 &agfbp);
3404 if (error)
3405 return error;
3406
3407 agf = agfbp->b_addr;
3408 if (!xfs_perag_initialised_agf(pag)) {
3409 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
3410 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
3411 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
3412 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
3413 pag->pagf_bno_level = be32_to_cpu(agf->agf_bno_level);
3414 pag->pagf_cnt_level = be32_to_cpu(agf->agf_cnt_level);
3415 pag->pagf_rmap_level = be32_to_cpu(agf->agf_rmap_level);
3416 pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
3417 if (xfs_agfl_needs_reset(pag->pag_mount, agf))
3418 set_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
3419 else
3420 clear_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
3421
3422 /*
3423 * Update the in-core allocbt counter. Filter out the rmapbt
3424 * subset of the btreeblks counter because the rmapbt is managed
3425 * by perag reservation. Subtract one for the rmapbt root block
3426 * because the rmap counter includes it while the btreeblks
3427 * counter only tracks non-root blocks.
3428 */
3429 allocbt_blks = pag->pagf_btreeblks;
3430 if (xfs_has_rmapbt(pag->pag_mount))
3431 allocbt_blks -= be32_to_cpu(agf->agf_rmap_blocks) - 1;
3432 if (allocbt_blks > 0)
3433 atomic64_add(allocbt_blks,
3434 &pag->pag_mount->m_allocbt_blks);
3435
3436 set_bit(XFS_AGSTATE_AGF_INIT, &pag->pag_opstate);
3437 }
3438 #ifdef DEBUG
3439 else if (!xfs_is_shutdown(pag->pag_mount)) {
3440 ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
3441 ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
3442 ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
3443 ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
3444 ASSERT(pag->pagf_bno_level == be32_to_cpu(agf->agf_bno_level));
3445 ASSERT(pag->pagf_cnt_level == be32_to_cpu(agf->agf_cnt_level));
3446 }
3447 #endif
3448 if (agfbpp)
3449 *agfbpp = agfbp;
3450 else
3451 xfs_trans_brelse(tp, agfbp);
3452 return 0;
3453 }
3454
3455 /*
3456 * Pre-proces allocation arguments to set initial state that we don't require
3457 * callers to set up correctly, as well as bounds check the allocation args
3458 * that are set up.
3459 */
3460 static int
xfs_alloc_vextent_check_args(struct xfs_alloc_arg * args,xfs_fsblock_t target,xfs_agnumber_t * minimum_agno)3461 xfs_alloc_vextent_check_args(
3462 struct xfs_alloc_arg *args,
3463 xfs_fsblock_t target,
3464 xfs_agnumber_t *minimum_agno)
3465 {
3466 struct xfs_mount *mp = args->mp;
3467 xfs_agblock_t agsize;
3468
3469 args->fsbno = NULLFSBLOCK;
3470
3471 *minimum_agno = 0;
3472 if (args->tp->t_highest_agno != NULLAGNUMBER)
3473 *minimum_agno = args->tp->t_highest_agno;
3474
3475 /*
3476 * Just fix this up, for the case where the last a.g. is shorter
3477 * (or there's only one a.g.) and the caller couldn't easily figure
3478 * that out (xfs_bmap_alloc).
3479 */
3480 agsize = mp->m_sb.sb_agblocks;
3481 if (args->maxlen > agsize)
3482 args->maxlen = agsize;
3483 if (args->alignment == 0)
3484 args->alignment = 1;
3485
3486 ASSERT(args->minlen > 0);
3487 ASSERT(args->maxlen > 0);
3488 ASSERT(args->alignment > 0);
3489 ASSERT(args->resv != XFS_AG_RESV_AGFL);
3490
3491 ASSERT(XFS_FSB_TO_AGNO(mp, target) < mp->m_sb.sb_agcount);
3492 ASSERT(XFS_FSB_TO_AGBNO(mp, target) < agsize);
3493 ASSERT(args->minlen <= args->maxlen);
3494 ASSERT(args->minlen <= agsize);
3495 ASSERT(args->mod < args->prod);
3496
3497 if (XFS_FSB_TO_AGNO(mp, target) >= mp->m_sb.sb_agcount ||
3498 XFS_FSB_TO_AGBNO(mp, target) >= agsize ||
3499 args->minlen > args->maxlen || args->minlen > agsize ||
3500 args->mod >= args->prod) {
3501 trace_xfs_alloc_vextent_badargs(args);
3502 return -ENOSPC;
3503 }
3504
3505 if (args->agno != NULLAGNUMBER && *minimum_agno > args->agno) {
3506 trace_xfs_alloc_vextent_skip_deadlock(args);
3507 return -ENOSPC;
3508 }
3509 return 0;
3510
3511 }
3512
3513 /*
3514 * Prepare an AG for allocation. If the AG is not prepared to accept the
3515 * allocation, return failure.
3516 *
3517 * XXX(dgc): The complexity of "need_pag" will go away as all caller paths are
3518 * modified to hold their own perag references.
3519 */
3520 static int
xfs_alloc_vextent_prepare_ag(struct xfs_alloc_arg * args,uint32_t alloc_flags)3521 xfs_alloc_vextent_prepare_ag(
3522 struct xfs_alloc_arg *args,
3523 uint32_t alloc_flags)
3524 {
3525 bool need_pag = !args->pag;
3526 int error;
3527
3528 if (need_pag)
3529 args->pag = xfs_perag_get(args->mp, args->agno);
3530
3531 args->agbp = NULL;
3532 error = xfs_alloc_fix_freelist(args, alloc_flags);
3533 if (error) {
3534 trace_xfs_alloc_vextent_nofix(args);
3535 if (need_pag)
3536 xfs_perag_put(args->pag);
3537 args->agbno = NULLAGBLOCK;
3538 return error;
3539 }
3540 if (!args->agbp) {
3541 /* cannot allocate in this AG at all */
3542 trace_xfs_alloc_vextent_noagbp(args);
3543 args->agbno = NULLAGBLOCK;
3544 return 0;
3545 }
3546 args->wasfromfl = 0;
3547 return 0;
3548 }
3549
3550 /*
3551 * Post-process allocation results to account for the allocation if it succeed
3552 * and set the allocated block number correctly for the caller.
3553 *
3554 * XXX: we should really be returning ENOSPC for ENOSPC, not
3555 * hiding it behind a "successful" NULLFSBLOCK allocation.
3556 */
3557 static int
xfs_alloc_vextent_finish(struct xfs_alloc_arg * args,xfs_agnumber_t minimum_agno,int alloc_error,bool drop_perag)3558 xfs_alloc_vextent_finish(
3559 struct xfs_alloc_arg *args,
3560 xfs_agnumber_t minimum_agno,
3561 int alloc_error,
3562 bool drop_perag)
3563 {
3564 struct xfs_mount *mp = args->mp;
3565 int error = 0;
3566
3567 /*
3568 * We can end up here with a locked AGF. If we failed, the caller is
3569 * likely going to try to allocate again with different parameters, and
3570 * that can widen the AGs that are searched for free space. If we have
3571 * to do BMBT block allocation, we have to do a new allocation.
3572 *
3573 * Hence leaving this function with the AGF locked opens up potential
3574 * ABBA AGF deadlocks because a future allocation attempt in this
3575 * transaction may attempt to lock a lower number AGF.
3576 *
3577 * We can't release the AGF until the transaction is commited, so at
3578 * this point we must update the "first allocation" tracker to point at
3579 * this AG if the tracker is empty or points to a lower AG. This allows
3580 * the next allocation attempt to be modified appropriately to avoid
3581 * deadlocks.
3582 */
3583 if (args->agbp &&
3584 (args->tp->t_highest_agno == NULLAGNUMBER ||
3585 args->agno > minimum_agno))
3586 args->tp->t_highest_agno = args->agno;
3587
3588 /*
3589 * If the allocation failed with an error or we had an ENOSPC result,
3590 * preserve the returned error whilst also marking the allocation result
3591 * as "no extent allocated". This ensures that callers that fail to
3592 * capture the error will still treat it as a failed allocation.
3593 */
3594 if (alloc_error || args->agbno == NULLAGBLOCK) {
3595 args->fsbno = NULLFSBLOCK;
3596 error = alloc_error;
3597 goto out_drop_perag;
3598 }
3599
3600 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
3601
3602 ASSERT(args->len >= args->minlen);
3603 ASSERT(args->len <= args->maxlen);
3604 ASSERT(args->agbno % args->alignment == 0);
3605 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno), args->len);
3606
3607 /* if not file data, insert new block into the reverse map btree */
3608 if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
3609 error = xfs_rmap_alloc(args->tp, args->agbp, args->pag,
3610 args->agbno, args->len, &args->oinfo);
3611 if (error)
3612 goto out_drop_perag;
3613 }
3614
3615 if (!args->wasfromfl) {
3616 error = xfs_alloc_update_counters(args->tp, args->agbp,
3617 -((long)(args->len)));
3618 if (error)
3619 goto out_drop_perag;
3620
3621 ASSERT(!xfs_extent_busy_search(mp, args->pag, args->agbno,
3622 args->len));
3623 }
3624
3625 xfs_ag_resv_alloc_extent(args->pag, args->resv, args);
3626
3627 XFS_STATS_INC(mp, xs_allocx);
3628 XFS_STATS_ADD(mp, xs_allocb, args->len);
3629
3630 trace_xfs_alloc_vextent_finish(args);
3631
3632 out_drop_perag:
3633 if (drop_perag && args->pag) {
3634 xfs_perag_rele(args->pag);
3635 args->pag = NULL;
3636 }
3637 return error;
3638 }
3639
3640 /*
3641 * Allocate within a single AG only. This uses a best-fit length algorithm so if
3642 * you need an exact sized allocation without locality constraints, this is the
3643 * fastest way to do it.
3644 *
3645 * Caller is expected to hold a perag reference in args->pag.
3646 */
3647 int
xfs_alloc_vextent_this_ag(struct xfs_alloc_arg * args,xfs_agnumber_t agno)3648 xfs_alloc_vextent_this_ag(
3649 struct xfs_alloc_arg *args,
3650 xfs_agnumber_t agno)
3651 {
3652 struct xfs_mount *mp = args->mp;
3653 xfs_agnumber_t minimum_agno;
3654 uint32_t alloc_flags = 0;
3655 int error;
3656
3657 ASSERT(args->pag != NULL);
3658 ASSERT(args->pag->pag_agno == agno);
3659
3660 args->agno = agno;
3661 args->agbno = 0;
3662
3663 trace_xfs_alloc_vextent_this_ag(args);
3664
3665 error = xfs_alloc_vextent_check_args(args, XFS_AGB_TO_FSB(mp, agno, 0),
3666 &minimum_agno);
3667 if (error) {
3668 if (error == -ENOSPC)
3669 return 0;
3670 return error;
3671 }
3672
3673 error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
3674 if (!error && args->agbp)
3675 error = xfs_alloc_ag_vextent_size(args, alloc_flags);
3676
3677 return xfs_alloc_vextent_finish(args, minimum_agno, error, false);
3678 }
3679
3680 /*
3681 * Iterate all AGs trying to allocate an extent starting from @start_ag.
3682 *
3683 * If the incoming allocation type is XFS_ALLOCTYPE_NEAR_BNO, it means the
3684 * allocation attempts in @start_agno have locality information. If we fail to
3685 * allocate in that AG, then we revert to anywhere-in-AG for all the other AGs
3686 * we attempt to allocation in as there is no locality optimisation possible for
3687 * those allocations.
3688 *
3689 * On return, args->pag may be left referenced if we finish before the "all
3690 * failed" return point. The allocation finish still needs the perag, and
3691 * so the caller will release it once they've finished the allocation.
3692 *
3693 * When we wrap the AG iteration at the end of the filesystem, we have to be
3694 * careful not to wrap into AGs below ones we already have locked in the
3695 * transaction if we are doing a blocking iteration. This will result in an
3696 * out-of-order locking of AGFs and hence can cause deadlocks.
3697 */
3698 static int
xfs_alloc_vextent_iterate_ags(struct xfs_alloc_arg * args,xfs_agnumber_t minimum_agno,xfs_agnumber_t start_agno,xfs_agblock_t target_agbno,uint32_t alloc_flags)3699 xfs_alloc_vextent_iterate_ags(
3700 struct xfs_alloc_arg *args,
3701 xfs_agnumber_t minimum_agno,
3702 xfs_agnumber_t start_agno,
3703 xfs_agblock_t target_agbno,
3704 uint32_t alloc_flags)
3705 {
3706 struct xfs_mount *mp = args->mp;
3707 xfs_agnumber_t restart_agno = minimum_agno;
3708 xfs_agnumber_t agno;
3709 int error = 0;
3710
3711 if (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK)
3712 restart_agno = 0;
3713 restart:
3714 for_each_perag_wrap_range(mp, start_agno, restart_agno,
3715 mp->m_sb.sb_agcount, agno, args->pag) {
3716 args->agno = agno;
3717 error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
3718 if (error)
3719 break;
3720 if (!args->agbp) {
3721 trace_xfs_alloc_vextent_loopfailed(args);
3722 continue;
3723 }
3724
3725 /*
3726 * Allocation is supposed to succeed now, so break out of the
3727 * loop regardless of whether we succeed or not.
3728 */
3729 if (args->agno == start_agno && target_agbno) {
3730 args->agbno = target_agbno;
3731 error = xfs_alloc_ag_vextent_near(args, alloc_flags);
3732 } else {
3733 args->agbno = 0;
3734 error = xfs_alloc_ag_vextent_size(args, alloc_flags);
3735 }
3736 break;
3737 }
3738 if (error) {
3739 xfs_perag_rele(args->pag);
3740 args->pag = NULL;
3741 return error;
3742 }
3743 if (args->agbp)
3744 return 0;
3745
3746 /*
3747 * We didn't find an AG we can alloation from. If we were given
3748 * constraining flags by the caller, drop them and retry the allocation
3749 * without any constraints being set.
3750 */
3751 if (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK) {
3752 alloc_flags &= ~XFS_ALLOC_FLAG_TRYLOCK;
3753 restart_agno = minimum_agno;
3754 goto restart;
3755 }
3756
3757 ASSERT(args->pag == NULL);
3758 trace_xfs_alloc_vextent_allfailed(args);
3759 return 0;
3760 }
3761
3762 /*
3763 * Iterate from the AGs from the start AG to the end of the filesystem, trying
3764 * to allocate blocks. It starts with a near allocation attempt in the initial
3765 * AG, then falls back to anywhere-in-ag after the first AG fails. It will wrap
3766 * back to zero if allowed by previous allocations in this transaction,
3767 * otherwise will wrap back to the start AG and run a second blocking pass to
3768 * the end of the filesystem.
3769 */
3770 int
xfs_alloc_vextent_start_ag(struct xfs_alloc_arg * args,xfs_fsblock_t target)3771 xfs_alloc_vextent_start_ag(
3772 struct xfs_alloc_arg *args,
3773 xfs_fsblock_t target)
3774 {
3775 struct xfs_mount *mp = args->mp;
3776 xfs_agnumber_t minimum_agno;
3777 xfs_agnumber_t start_agno;
3778 xfs_agnumber_t rotorstep = xfs_rotorstep;
3779 bool bump_rotor = false;
3780 uint32_t alloc_flags = XFS_ALLOC_FLAG_TRYLOCK;
3781 int error;
3782
3783 ASSERT(args->pag == NULL);
3784
3785 args->agno = NULLAGNUMBER;
3786 args->agbno = NULLAGBLOCK;
3787
3788 trace_xfs_alloc_vextent_start_ag(args);
3789
3790 error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3791 if (error) {
3792 if (error == -ENOSPC)
3793 return 0;
3794 return error;
3795 }
3796
3797 if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) &&
3798 xfs_is_inode32(mp)) {
3799 target = XFS_AGB_TO_FSB(mp,
3800 ((mp->m_agfrotor / rotorstep) %
3801 mp->m_sb.sb_agcount), 0);
3802 bump_rotor = 1;
3803 }
3804
3805 start_agno = max(minimum_agno, XFS_FSB_TO_AGNO(mp, target));
3806 error = xfs_alloc_vextent_iterate_ags(args, minimum_agno, start_agno,
3807 XFS_FSB_TO_AGBNO(mp, target), alloc_flags);
3808
3809 if (bump_rotor) {
3810 if (args->agno == start_agno)
3811 mp->m_agfrotor = (mp->m_agfrotor + 1) %
3812 (mp->m_sb.sb_agcount * rotorstep);
3813 else
3814 mp->m_agfrotor = (args->agno * rotorstep + 1) %
3815 (mp->m_sb.sb_agcount * rotorstep);
3816 }
3817
3818 return xfs_alloc_vextent_finish(args, minimum_agno, error, true);
3819 }
3820
3821 /*
3822 * Iterate from the agno indicated via @target through to the end of the
3823 * filesystem attempting blocking allocation. This does not wrap or try a second
3824 * pass, so will not recurse into AGs lower than indicated by the target.
3825 */
3826 int
xfs_alloc_vextent_first_ag(struct xfs_alloc_arg * args,xfs_fsblock_t target)3827 xfs_alloc_vextent_first_ag(
3828 struct xfs_alloc_arg *args,
3829 xfs_fsblock_t target)
3830 {
3831 struct xfs_mount *mp = args->mp;
3832 xfs_agnumber_t minimum_agno;
3833 xfs_agnumber_t start_agno;
3834 uint32_t alloc_flags = XFS_ALLOC_FLAG_TRYLOCK;
3835 int error;
3836
3837 ASSERT(args->pag == NULL);
3838
3839 args->agno = NULLAGNUMBER;
3840 args->agbno = NULLAGBLOCK;
3841
3842 trace_xfs_alloc_vextent_first_ag(args);
3843
3844 error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3845 if (error) {
3846 if (error == -ENOSPC)
3847 return 0;
3848 return error;
3849 }
3850
3851 start_agno = max(minimum_agno, XFS_FSB_TO_AGNO(mp, target));
3852 error = xfs_alloc_vextent_iterate_ags(args, minimum_agno, start_agno,
3853 XFS_FSB_TO_AGBNO(mp, target), alloc_flags);
3854 return xfs_alloc_vextent_finish(args, minimum_agno, error, true);
3855 }
3856
3857 /*
3858 * Allocate at the exact block target or fail. Caller is expected to hold a
3859 * perag reference in args->pag.
3860 */
3861 int
xfs_alloc_vextent_exact_bno(struct xfs_alloc_arg * args,xfs_fsblock_t target)3862 xfs_alloc_vextent_exact_bno(
3863 struct xfs_alloc_arg *args,
3864 xfs_fsblock_t target)
3865 {
3866 struct xfs_mount *mp = args->mp;
3867 xfs_agnumber_t minimum_agno;
3868 int error;
3869
3870 ASSERT(args->pag != NULL);
3871 ASSERT(args->pag->pag_agno == XFS_FSB_TO_AGNO(mp, target));
3872
3873 args->agno = XFS_FSB_TO_AGNO(mp, target);
3874 args->agbno = XFS_FSB_TO_AGBNO(mp, target);
3875
3876 trace_xfs_alloc_vextent_exact_bno(args);
3877
3878 error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3879 if (error) {
3880 if (error == -ENOSPC)
3881 return 0;
3882 return error;
3883 }
3884
3885 error = xfs_alloc_vextent_prepare_ag(args, 0);
3886 if (!error && args->agbp)
3887 error = xfs_alloc_ag_vextent_exact(args);
3888
3889 return xfs_alloc_vextent_finish(args, minimum_agno, error, false);
3890 }
3891
3892 /*
3893 * Allocate an extent as close to the target as possible. If there are not
3894 * viable candidates in the AG, then fail the allocation.
3895 *
3896 * Caller may or may not have a per-ag reference in args->pag.
3897 */
3898 int
xfs_alloc_vextent_near_bno(struct xfs_alloc_arg * args,xfs_fsblock_t target)3899 xfs_alloc_vextent_near_bno(
3900 struct xfs_alloc_arg *args,
3901 xfs_fsblock_t target)
3902 {
3903 struct xfs_mount *mp = args->mp;
3904 xfs_agnumber_t minimum_agno;
3905 bool needs_perag = args->pag == NULL;
3906 uint32_t alloc_flags = 0;
3907 int error;
3908
3909 if (!needs_perag)
3910 ASSERT(args->pag->pag_agno == XFS_FSB_TO_AGNO(mp, target));
3911
3912 args->agno = XFS_FSB_TO_AGNO(mp, target);
3913 args->agbno = XFS_FSB_TO_AGBNO(mp, target);
3914
3915 trace_xfs_alloc_vextent_near_bno(args);
3916
3917 error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3918 if (error) {
3919 if (error == -ENOSPC)
3920 return 0;
3921 return error;
3922 }
3923
3924 if (needs_perag)
3925 args->pag = xfs_perag_grab(mp, args->agno);
3926
3927 error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
3928 if (!error && args->agbp)
3929 error = xfs_alloc_ag_vextent_near(args, alloc_flags);
3930
3931 return xfs_alloc_vextent_finish(args, minimum_agno, error, needs_perag);
3932 }
3933
3934 /* Ensure that the freelist is at full capacity. */
3935 int
xfs_free_extent_fix_freelist(struct xfs_trans * tp,struct xfs_perag * pag,struct xfs_buf ** agbp)3936 xfs_free_extent_fix_freelist(
3937 struct xfs_trans *tp,
3938 struct xfs_perag *pag,
3939 struct xfs_buf **agbp)
3940 {
3941 struct xfs_alloc_arg args;
3942 int error;
3943
3944 memset(&args, 0, sizeof(struct xfs_alloc_arg));
3945 args.tp = tp;
3946 args.mp = tp->t_mountp;
3947 args.agno = pag->pag_agno;
3948 args.pag = pag;
3949
3950 /*
3951 * validate that the block number is legal - the enables us to detect
3952 * and handle a silent filesystem corruption rather than crashing.
3953 */
3954 if (args.agno >= args.mp->m_sb.sb_agcount)
3955 return -EFSCORRUPTED;
3956
3957 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
3958 if (error)
3959 return error;
3960
3961 *agbp = args.agbp;
3962 return 0;
3963 }
3964
3965 /*
3966 * Free an extent.
3967 * Just break up the extent address and hand off to xfs_free_ag_extent
3968 * after fixing up the freelist.
3969 */
3970 int
__xfs_free_extent(struct xfs_trans * tp,struct xfs_perag * pag,xfs_agblock_t agbno,xfs_extlen_t len,const struct xfs_owner_info * oinfo,enum xfs_ag_resv_type type,bool skip_discard)3971 __xfs_free_extent(
3972 struct xfs_trans *tp,
3973 struct xfs_perag *pag,
3974 xfs_agblock_t agbno,
3975 xfs_extlen_t len,
3976 const struct xfs_owner_info *oinfo,
3977 enum xfs_ag_resv_type type,
3978 bool skip_discard)
3979 {
3980 struct xfs_mount *mp = tp->t_mountp;
3981 struct xfs_buf *agbp;
3982 struct xfs_agf *agf;
3983 int error;
3984 unsigned int busy_flags = 0;
3985
3986 ASSERT(len != 0);
3987 ASSERT(type != XFS_AG_RESV_AGFL);
3988
3989 if (XFS_TEST_ERROR(false, mp,
3990 XFS_ERRTAG_FREE_EXTENT))
3991 return -EIO;
3992
3993 error = xfs_free_extent_fix_freelist(tp, pag, &agbp);
3994 if (error) {
3995 if (xfs_metadata_is_sick(error))
3996 xfs_ag_mark_sick(pag, XFS_SICK_AG_BNOBT);
3997 return error;
3998 }
3999
4000 agf = agbp->b_addr;
4001
4002 if (XFS_IS_CORRUPT(mp, agbno >= mp->m_sb.sb_agblocks)) {
4003 xfs_ag_mark_sick(pag, XFS_SICK_AG_BNOBT);
4004 error = -EFSCORRUPTED;
4005 goto err_release;
4006 }
4007
4008 /* validate the extent size is legal now we have the agf locked */
4009 if (XFS_IS_CORRUPT(mp, agbno + len > be32_to_cpu(agf->agf_length))) {
4010 xfs_ag_mark_sick(pag, XFS_SICK_AG_BNOBT);
4011 error = -EFSCORRUPTED;
4012 goto err_release;
4013 }
4014
4015 error = xfs_free_ag_extent(tp, agbp, pag->pag_agno, agbno, len, oinfo,
4016 type);
4017 if (error)
4018 goto err_release;
4019
4020 if (skip_discard)
4021 busy_flags |= XFS_EXTENT_BUSY_SKIP_DISCARD;
4022 xfs_extent_busy_insert(tp, pag, agbno, len, busy_flags);
4023 return 0;
4024
4025 err_release:
4026 xfs_trans_brelse(tp, agbp);
4027 return error;
4028 }
4029
4030 struct xfs_alloc_query_range_info {
4031 xfs_alloc_query_range_fn fn;
4032 void *priv;
4033 };
4034
4035 /* Format btree record and pass to our callback. */
4036 STATIC int
xfs_alloc_query_range_helper(struct xfs_btree_cur * cur,const union xfs_btree_rec * rec,void * priv)4037 xfs_alloc_query_range_helper(
4038 struct xfs_btree_cur *cur,
4039 const union xfs_btree_rec *rec,
4040 void *priv)
4041 {
4042 struct xfs_alloc_query_range_info *query = priv;
4043 struct xfs_alloc_rec_incore irec;
4044 xfs_failaddr_t fa;
4045
4046 xfs_alloc_btrec_to_irec(rec, &irec);
4047 fa = xfs_alloc_check_irec(cur->bc_ag.pag, &irec);
4048 if (fa)
4049 return xfs_alloc_complain_bad_rec(cur, fa, &irec);
4050
4051 return query->fn(cur, &irec, query->priv);
4052 }
4053
4054 /* Find all free space within a given range of blocks. */
4055 int
xfs_alloc_query_range(struct xfs_btree_cur * cur,const struct xfs_alloc_rec_incore * low_rec,const struct xfs_alloc_rec_incore * high_rec,xfs_alloc_query_range_fn fn,void * priv)4056 xfs_alloc_query_range(
4057 struct xfs_btree_cur *cur,
4058 const struct xfs_alloc_rec_incore *low_rec,
4059 const struct xfs_alloc_rec_incore *high_rec,
4060 xfs_alloc_query_range_fn fn,
4061 void *priv)
4062 {
4063 union xfs_btree_irec low_brec = { .a = *low_rec };
4064 union xfs_btree_irec high_brec = { .a = *high_rec };
4065 struct xfs_alloc_query_range_info query = { .priv = priv, .fn = fn };
4066
4067 ASSERT(xfs_btree_is_bno(cur->bc_ops));
4068 return xfs_btree_query_range(cur, &low_brec, &high_brec,
4069 xfs_alloc_query_range_helper, &query);
4070 }
4071
4072 /* Find all free space records. */
4073 int
xfs_alloc_query_all(struct xfs_btree_cur * cur,xfs_alloc_query_range_fn fn,void * priv)4074 xfs_alloc_query_all(
4075 struct xfs_btree_cur *cur,
4076 xfs_alloc_query_range_fn fn,
4077 void *priv)
4078 {
4079 struct xfs_alloc_query_range_info query;
4080
4081 ASSERT(xfs_btree_is_bno(cur->bc_ops));
4082 query.priv = priv;
4083 query.fn = fn;
4084 return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
4085 }
4086
4087 /*
4088 * Scan part of the keyspace of the free space and tell us if the area has no
4089 * records, is fully mapped by records, or is partially filled.
4090 */
4091 int
xfs_alloc_has_records(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,enum xbtree_recpacking * outcome)4092 xfs_alloc_has_records(
4093 struct xfs_btree_cur *cur,
4094 xfs_agblock_t bno,
4095 xfs_extlen_t len,
4096 enum xbtree_recpacking *outcome)
4097 {
4098 union xfs_btree_irec low;
4099 union xfs_btree_irec high;
4100
4101 memset(&low, 0, sizeof(low));
4102 low.a.ar_startblock = bno;
4103 memset(&high, 0xFF, sizeof(high));
4104 high.a.ar_startblock = bno + len - 1;
4105
4106 return xfs_btree_has_records(cur, &low, &high, NULL, outcome);
4107 }
4108
4109 /*
4110 * Walk all the blocks in the AGFL. The @walk_fn can return any negative
4111 * error code or XFS_ITER_*.
4112 */
4113 int
xfs_agfl_walk(struct xfs_mount * mp,struct xfs_agf * agf,struct xfs_buf * agflbp,xfs_agfl_walk_fn walk_fn,void * priv)4114 xfs_agfl_walk(
4115 struct xfs_mount *mp,
4116 struct xfs_agf *agf,
4117 struct xfs_buf *agflbp,
4118 xfs_agfl_walk_fn walk_fn,
4119 void *priv)
4120 {
4121 __be32 *agfl_bno;
4122 unsigned int i;
4123 int error;
4124
4125 agfl_bno = xfs_buf_to_agfl_bno(agflbp);
4126 i = be32_to_cpu(agf->agf_flfirst);
4127
4128 /* Nothing to walk in an empty AGFL. */
4129 if (agf->agf_flcount == cpu_to_be32(0))
4130 return 0;
4131
4132 /* Otherwise, walk from first to last, wrapping as needed. */
4133 for (;;) {
4134 error = walk_fn(mp, be32_to_cpu(agfl_bno[i]), priv);
4135 if (error)
4136 return error;
4137 if (i == be32_to_cpu(agf->agf_fllast))
4138 break;
4139 if (++i == xfs_agfl_size(mp))
4140 i = 0;
4141 }
4142
4143 return 0;
4144 }
4145
4146 int __init
xfs_extfree_intent_init_cache(void)4147 xfs_extfree_intent_init_cache(void)
4148 {
4149 xfs_extfree_item_cache = kmem_cache_create("xfs_extfree_intent",
4150 sizeof(struct xfs_extent_free_item),
4151 0, 0, NULL);
4152
4153 return xfs_extfree_item_cache != NULL ? 0 : -ENOMEM;
4154 }
4155
4156 void
xfs_extfree_intent_destroy_cache(void)4157 xfs_extfree_intent_destroy_cache(void)
4158 {
4159 kmem_cache_destroy(xfs_extfree_item_cache);
4160 xfs_extfree_item_cache = NULL;
4161 }
4162