1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_format.h" 9 #include "xfs_log_format.h" 10 #include "xfs_shared.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_sb.h" 14 #include "xfs_mount.h" 15 #include "xfs_defer.h" 16 #include "xfs_inode.h" 17 #include "xfs_btree.h" 18 #include "xfs_rmap.h" 19 #include "xfs_alloc_btree.h" 20 #include "xfs_alloc.h" 21 #include "xfs_extent_busy.h" 22 #include "xfs_errortag.h" 23 #include "xfs_error.h" 24 #include "xfs_cksum.h" 25 #include "xfs_trace.h" 26 #include "xfs_trans.h" 27 #include "xfs_buf_item.h" 28 #include "xfs_log.h" 29 #include "xfs_ag_resv.h" 30 #include "xfs_bmap.h" 31 32 extern kmem_zone_t *xfs_bmap_free_item_zone; 33 34 struct workqueue_struct *xfs_alloc_wq; 35 36 #define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b))) 37 38 #define XFSA_FIXUP_BNO_OK 1 39 #define XFSA_FIXUP_CNT_OK 2 40 41 STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *); 42 STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *); 43 STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *); 44 STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *, 45 xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *); 46 47 /* 48 * Size of the AGFL. For CRC-enabled filesystes we steal a couple of slots in 49 * the beginning of the block for a proper header with the location information 50 * and CRC. 51 */ 52 unsigned int 53 xfs_agfl_size( 54 struct xfs_mount *mp) 55 { 56 unsigned int size = mp->m_sb.sb_sectsize; 57 58 if (xfs_sb_version_hascrc(&mp->m_sb)) 59 size -= sizeof(struct xfs_agfl); 60 61 return size / sizeof(xfs_agblock_t); 62 } 63 64 unsigned int 65 xfs_refc_block( 66 struct xfs_mount *mp) 67 { 68 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) 69 return XFS_RMAP_BLOCK(mp) + 1; 70 if (xfs_sb_version_hasfinobt(&mp->m_sb)) 71 return XFS_FIBT_BLOCK(mp) + 1; 72 return XFS_IBT_BLOCK(mp) + 1; 73 } 74 75 xfs_extlen_t 76 xfs_prealloc_blocks( 77 struct xfs_mount *mp) 78 { 79 if (xfs_sb_version_hasreflink(&mp->m_sb)) 80 return xfs_refc_block(mp) + 1; 81 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) 82 return XFS_RMAP_BLOCK(mp) + 1; 83 if (xfs_sb_version_hasfinobt(&mp->m_sb)) 84 return XFS_FIBT_BLOCK(mp) + 1; 85 return XFS_IBT_BLOCK(mp) + 1; 86 } 87 88 /* 89 * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of 90 * AGF buffer (PV 947395), we place constraints on the relationship among 91 * actual allocations for data blocks, freelist blocks, and potential file data 92 * bmap btree blocks. However, these restrictions may result in no actual space 93 * allocated for a delayed extent, for example, a data block in a certain AG is 94 * allocated but there is no additional block for the additional bmap btree 95 * block due to a split of the bmap btree of the file. The result of this may 96 * lead to an infinite loop when the file gets flushed to disk and all delayed 97 * extents need to be actually allocated. To get around this, we explicitly set 98 * aside a few blocks which will not be reserved in delayed allocation. 99 * 100 * We need to reserve 4 fsbs _per AG_ for the freelist and 4 more to handle a 101 * potential split of the file's bmap btree. 102 */ 103 unsigned int 104 xfs_alloc_set_aside( 105 struct xfs_mount *mp) 106 { 107 return mp->m_sb.sb_agcount * (XFS_ALLOC_AGFL_RESERVE + 4); 108 } 109 110 /* 111 * When deciding how much space to allocate out of an AG, we limit the 112 * allocation maximum size to the size the AG. However, we cannot use all the 113 * blocks in the AG - some are permanently used by metadata. These 114 * blocks are generally: 115 * - the AG superblock, AGF, AGI and AGFL 116 * - the AGF (bno and cnt) and AGI btree root blocks, and optionally 117 * the AGI free inode and rmap btree root blocks. 118 * - blocks on the AGFL according to xfs_alloc_set_aside() limits 119 * - the rmapbt root block 120 * 121 * The AG headers are sector sized, so the amount of space they take up is 122 * dependent on filesystem geometry. The others are all single blocks. 123 */ 124 unsigned int 125 xfs_alloc_ag_max_usable( 126 struct xfs_mount *mp) 127 { 128 unsigned int blocks; 129 130 blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */ 131 blocks += XFS_ALLOC_AGFL_RESERVE; 132 blocks += 3; /* AGF, AGI btree root blocks */ 133 if (xfs_sb_version_hasfinobt(&mp->m_sb)) 134 blocks++; /* finobt root block */ 135 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) 136 blocks++; /* rmap root block */ 137 if (xfs_sb_version_hasreflink(&mp->m_sb)) 138 blocks++; /* refcount root block */ 139 140 return mp->m_sb.sb_agblocks - blocks; 141 } 142 143 /* 144 * Lookup the record equal to [bno, len] in the btree given by cur. 145 */ 146 STATIC int /* error */ 147 xfs_alloc_lookup_eq( 148 struct xfs_btree_cur *cur, /* btree cursor */ 149 xfs_agblock_t bno, /* starting block of extent */ 150 xfs_extlen_t len, /* length of extent */ 151 int *stat) /* success/failure */ 152 { 153 cur->bc_rec.a.ar_startblock = bno; 154 cur->bc_rec.a.ar_blockcount = len; 155 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); 156 } 157 158 /* 159 * Lookup the first record greater than or equal to [bno, len] 160 * in the btree given by cur. 161 */ 162 int /* error */ 163 xfs_alloc_lookup_ge( 164 struct xfs_btree_cur *cur, /* btree cursor */ 165 xfs_agblock_t bno, /* starting block of extent */ 166 xfs_extlen_t len, /* length of extent */ 167 int *stat) /* success/failure */ 168 { 169 cur->bc_rec.a.ar_startblock = bno; 170 cur->bc_rec.a.ar_blockcount = len; 171 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); 172 } 173 174 /* 175 * Lookup the first record less than or equal to [bno, len] 176 * in the btree given by cur. 177 */ 178 int /* error */ 179 xfs_alloc_lookup_le( 180 struct xfs_btree_cur *cur, /* btree cursor */ 181 xfs_agblock_t bno, /* starting block of extent */ 182 xfs_extlen_t len, /* length of extent */ 183 int *stat) /* success/failure */ 184 { 185 cur->bc_rec.a.ar_startblock = bno; 186 cur->bc_rec.a.ar_blockcount = len; 187 return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat); 188 } 189 190 /* 191 * Update the record referred to by cur to the value given 192 * by [bno, len]. 193 * This either works (return 0) or gets an EFSCORRUPTED error. 194 */ 195 STATIC int /* error */ 196 xfs_alloc_update( 197 struct xfs_btree_cur *cur, /* btree cursor */ 198 xfs_agblock_t bno, /* starting block of extent */ 199 xfs_extlen_t len) /* length of extent */ 200 { 201 union xfs_btree_rec rec; 202 203 rec.alloc.ar_startblock = cpu_to_be32(bno); 204 rec.alloc.ar_blockcount = cpu_to_be32(len); 205 return xfs_btree_update(cur, &rec); 206 } 207 208 /* 209 * Get the data from the pointed-to record. 210 */ 211 int /* error */ 212 xfs_alloc_get_rec( 213 struct xfs_btree_cur *cur, /* btree cursor */ 214 xfs_agblock_t *bno, /* output: starting block of extent */ 215 xfs_extlen_t *len, /* output: length of extent */ 216 int *stat) /* output: success/failure */ 217 { 218 struct xfs_mount *mp = cur->bc_mp; 219 xfs_agnumber_t agno = cur->bc_private.a.agno; 220 union xfs_btree_rec *rec; 221 int error; 222 223 error = xfs_btree_get_rec(cur, &rec, stat); 224 if (error || !(*stat)) 225 return error; 226 227 *bno = be32_to_cpu(rec->alloc.ar_startblock); 228 *len = be32_to_cpu(rec->alloc.ar_blockcount); 229 230 if (*len == 0) 231 goto out_bad_rec; 232 233 /* check for valid extent range, including overflow */ 234 if (!xfs_verify_agbno(mp, agno, *bno)) 235 goto out_bad_rec; 236 if (*bno > *bno + *len) 237 goto out_bad_rec; 238 if (!xfs_verify_agbno(mp, agno, *bno + *len - 1)) 239 goto out_bad_rec; 240 241 return 0; 242 243 out_bad_rec: 244 xfs_warn(mp, 245 "%s Freespace BTree record corruption in AG %d detected!", 246 cur->bc_btnum == XFS_BTNUM_BNO ? "Block" : "Size", agno); 247 xfs_warn(mp, 248 "start block 0x%x block count 0x%x", *bno, *len); 249 return -EFSCORRUPTED; 250 } 251 252 /* 253 * Compute aligned version of the found extent. 254 * Takes alignment and min length into account. 255 */ 256 STATIC bool 257 xfs_alloc_compute_aligned( 258 xfs_alloc_arg_t *args, /* allocation argument structure */ 259 xfs_agblock_t foundbno, /* starting block in found extent */ 260 xfs_extlen_t foundlen, /* length in found extent */ 261 xfs_agblock_t *resbno, /* result block number */ 262 xfs_extlen_t *reslen, /* result length */ 263 unsigned *busy_gen) 264 { 265 xfs_agblock_t bno = foundbno; 266 xfs_extlen_t len = foundlen; 267 xfs_extlen_t diff; 268 bool busy; 269 270 /* Trim busy sections out of found extent */ 271 busy = xfs_extent_busy_trim(args, &bno, &len, busy_gen); 272 273 /* 274 * If we have a largish extent that happens to start before min_agbno, 275 * see if we can shift it into range... 276 */ 277 if (bno < args->min_agbno && bno + len > args->min_agbno) { 278 diff = args->min_agbno - bno; 279 if (len > diff) { 280 bno += diff; 281 len -= diff; 282 } 283 } 284 285 if (args->alignment > 1 && len >= args->minlen) { 286 xfs_agblock_t aligned_bno = roundup(bno, args->alignment); 287 288 diff = aligned_bno - bno; 289 290 *resbno = aligned_bno; 291 *reslen = diff >= len ? 0 : len - diff; 292 } else { 293 *resbno = bno; 294 *reslen = len; 295 } 296 297 return busy; 298 } 299 300 /* 301 * Compute best start block and diff for "near" allocations. 302 * freelen >= wantlen already checked by caller. 303 */ 304 STATIC xfs_extlen_t /* difference value (absolute) */ 305 xfs_alloc_compute_diff( 306 xfs_agblock_t wantbno, /* target starting block */ 307 xfs_extlen_t wantlen, /* target length */ 308 xfs_extlen_t alignment, /* target alignment */ 309 int datatype, /* are we allocating data? */ 310 xfs_agblock_t freebno, /* freespace's starting block */ 311 xfs_extlen_t freelen, /* freespace's length */ 312 xfs_agblock_t *newbnop) /* result: best start block from free */ 313 { 314 xfs_agblock_t freeend; /* end of freespace extent */ 315 xfs_agblock_t newbno1; /* return block number */ 316 xfs_agblock_t newbno2; /* other new block number */ 317 xfs_extlen_t newlen1=0; /* length with newbno1 */ 318 xfs_extlen_t newlen2=0; /* length with newbno2 */ 319 xfs_agblock_t wantend; /* end of target extent */ 320 bool userdata = xfs_alloc_is_userdata(datatype); 321 322 ASSERT(freelen >= wantlen); 323 freeend = freebno + freelen; 324 wantend = wantbno + wantlen; 325 /* 326 * We want to allocate from the start of a free extent if it is past 327 * the desired block or if we are allocating user data and the free 328 * extent is before desired block. The second case is there to allow 329 * for contiguous allocation from the remaining free space if the file 330 * grows in the short term. 331 */ 332 if (freebno >= wantbno || (userdata && freeend < wantend)) { 333 if ((newbno1 = roundup(freebno, alignment)) >= freeend) 334 newbno1 = NULLAGBLOCK; 335 } else if (freeend >= wantend && alignment > 1) { 336 newbno1 = roundup(wantbno, alignment); 337 newbno2 = newbno1 - alignment; 338 if (newbno1 >= freeend) 339 newbno1 = NULLAGBLOCK; 340 else 341 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1); 342 if (newbno2 < freebno) 343 newbno2 = NULLAGBLOCK; 344 else 345 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2); 346 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) { 347 if (newlen1 < newlen2 || 348 (newlen1 == newlen2 && 349 XFS_ABSDIFF(newbno1, wantbno) > 350 XFS_ABSDIFF(newbno2, wantbno))) 351 newbno1 = newbno2; 352 } else if (newbno2 != NULLAGBLOCK) 353 newbno1 = newbno2; 354 } else if (freeend >= wantend) { 355 newbno1 = wantbno; 356 } else if (alignment > 1) { 357 newbno1 = roundup(freeend - wantlen, alignment); 358 if (newbno1 > freeend - wantlen && 359 newbno1 - alignment >= freebno) 360 newbno1 -= alignment; 361 else if (newbno1 >= freeend) 362 newbno1 = NULLAGBLOCK; 363 } else 364 newbno1 = freeend - wantlen; 365 *newbnop = newbno1; 366 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno); 367 } 368 369 /* 370 * Fix up the length, based on mod and prod. 371 * len should be k * prod + mod for some k. 372 * If len is too small it is returned unchanged. 373 * If len hits maxlen it is left alone. 374 */ 375 STATIC void 376 xfs_alloc_fix_len( 377 xfs_alloc_arg_t *args) /* allocation argument structure */ 378 { 379 xfs_extlen_t k; 380 xfs_extlen_t rlen; 381 382 ASSERT(args->mod < args->prod); 383 rlen = args->len; 384 ASSERT(rlen >= args->minlen); 385 ASSERT(rlen <= args->maxlen); 386 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen || 387 (args->mod == 0 && rlen < args->prod)) 388 return; 389 k = rlen % args->prod; 390 if (k == args->mod) 391 return; 392 if (k > args->mod) 393 rlen = rlen - (k - args->mod); 394 else 395 rlen = rlen - args->prod + (args->mod - k); 396 /* casts to (int) catch length underflows */ 397 if ((int)rlen < (int)args->minlen) 398 return; 399 ASSERT(rlen >= args->minlen && rlen <= args->maxlen); 400 ASSERT(rlen % args->prod == args->mod); 401 ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >= 402 rlen + args->minleft); 403 args->len = rlen; 404 } 405 406 /* 407 * Update the two btrees, logically removing from freespace the extent 408 * starting at rbno, rlen blocks. The extent is contained within the 409 * actual (current) free extent fbno for flen blocks. 410 * Flags are passed in indicating whether the cursors are set to the 411 * relevant records. 412 */ 413 STATIC int /* error code */ 414 xfs_alloc_fixup_trees( 415 xfs_btree_cur_t *cnt_cur, /* cursor for by-size btree */ 416 xfs_btree_cur_t *bno_cur, /* cursor for by-block btree */ 417 xfs_agblock_t fbno, /* starting block of free extent */ 418 xfs_extlen_t flen, /* length of free extent */ 419 xfs_agblock_t rbno, /* starting block of returned extent */ 420 xfs_extlen_t rlen, /* length of returned extent */ 421 int flags) /* flags, XFSA_FIXUP_... */ 422 { 423 int error; /* error code */ 424 int i; /* operation results */ 425 xfs_agblock_t nfbno1; /* first new free startblock */ 426 xfs_agblock_t nfbno2; /* second new free startblock */ 427 xfs_extlen_t nflen1=0; /* first new free length */ 428 xfs_extlen_t nflen2=0; /* second new free length */ 429 struct xfs_mount *mp; 430 431 mp = cnt_cur->bc_mp; 432 433 /* 434 * Look up the record in the by-size tree if necessary. 435 */ 436 if (flags & XFSA_FIXUP_CNT_OK) { 437 #ifdef DEBUG 438 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i))) 439 return error; 440 XFS_WANT_CORRUPTED_RETURN(mp, 441 i == 1 && nfbno1 == fbno && nflen1 == flen); 442 #endif 443 } else { 444 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i))) 445 return error; 446 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 447 } 448 /* 449 * Look up the record in the by-block tree if necessary. 450 */ 451 if (flags & XFSA_FIXUP_BNO_OK) { 452 #ifdef DEBUG 453 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i))) 454 return error; 455 XFS_WANT_CORRUPTED_RETURN(mp, 456 i == 1 && nfbno1 == fbno && nflen1 == flen); 457 #endif 458 } else { 459 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i))) 460 return error; 461 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 462 } 463 464 #ifdef DEBUG 465 if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) { 466 struct xfs_btree_block *bnoblock; 467 struct xfs_btree_block *cntblock; 468 469 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]); 470 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]); 471 472 XFS_WANT_CORRUPTED_RETURN(mp, 473 bnoblock->bb_numrecs == cntblock->bb_numrecs); 474 } 475 #endif 476 477 /* 478 * Deal with all four cases: the allocated record is contained 479 * within the freespace record, so we can have new freespace 480 * at either (or both) end, or no freespace remaining. 481 */ 482 if (rbno == fbno && rlen == flen) 483 nfbno1 = nfbno2 = NULLAGBLOCK; 484 else if (rbno == fbno) { 485 nfbno1 = rbno + rlen; 486 nflen1 = flen - rlen; 487 nfbno2 = NULLAGBLOCK; 488 } else if (rbno + rlen == fbno + flen) { 489 nfbno1 = fbno; 490 nflen1 = flen - rlen; 491 nfbno2 = NULLAGBLOCK; 492 } else { 493 nfbno1 = fbno; 494 nflen1 = rbno - fbno; 495 nfbno2 = rbno + rlen; 496 nflen2 = (fbno + flen) - nfbno2; 497 } 498 /* 499 * Delete the entry from the by-size btree. 500 */ 501 if ((error = xfs_btree_delete(cnt_cur, &i))) 502 return error; 503 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 504 /* 505 * Add new by-size btree entry(s). 506 */ 507 if (nfbno1 != NULLAGBLOCK) { 508 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i))) 509 return error; 510 XFS_WANT_CORRUPTED_RETURN(mp, i == 0); 511 if ((error = xfs_btree_insert(cnt_cur, &i))) 512 return error; 513 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 514 } 515 if (nfbno2 != NULLAGBLOCK) { 516 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i))) 517 return error; 518 XFS_WANT_CORRUPTED_RETURN(mp, i == 0); 519 if ((error = xfs_btree_insert(cnt_cur, &i))) 520 return error; 521 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 522 } 523 /* 524 * Fix up the by-block btree entry(s). 525 */ 526 if (nfbno1 == NULLAGBLOCK) { 527 /* 528 * No remaining freespace, just delete the by-block tree entry. 529 */ 530 if ((error = xfs_btree_delete(bno_cur, &i))) 531 return error; 532 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 533 } else { 534 /* 535 * Update the by-block entry to start later|be shorter. 536 */ 537 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1))) 538 return error; 539 } 540 if (nfbno2 != NULLAGBLOCK) { 541 /* 542 * 2 resulting free entries, need to add one. 543 */ 544 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i))) 545 return error; 546 XFS_WANT_CORRUPTED_RETURN(mp, i == 0); 547 if ((error = xfs_btree_insert(bno_cur, &i))) 548 return error; 549 XFS_WANT_CORRUPTED_RETURN(mp, i == 1); 550 } 551 return 0; 552 } 553 554 static xfs_failaddr_t 555 xfs_agfl_verify( 556 struct xfs_buf *bp) 557 { 558 struct xfs_mount *mp = bp->b_target->bt_mount; 559 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp); 560 int i; 561 562 /* 563 * There is no verification of non-crc AGFLs because mkfs does not 564 * initialise the AGFL to zero or NULL. Hence the only valid part of the 565 * AGFL is what the AGF says is active. We can't get to the AGF, so we 566 * can't verify just those entries are valid. 567 */ 568 if (!xfs_sb_version_hascrc(&mp->m_sb)) 569 return NULL; 570 571 if (!xfs_verify_magic(bp, agfl->agfl_magicnum)) 572 return __this_address; 573 if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid)) 574 return __this_address; 575 /* 576 * during growfs operations, the perag is not fully initialised, 577 * so we can't use it for any useful checking. growfs ensures we can't 578 * use it by using uncached buffers that don't have the perag attached 579 * so we can detect and avoid this problem. 580 */ 581 if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno) 582 return __this_address; 583 584 for (i = 0; i < xfs_agfl_size(mp); i++) { 585 if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK && 586 be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks) 587 return __this_address; 588 } 589 590 if (!xfs_log_check_lsn(mp, be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn))) 591 return __this_address; 592 return NULL; 593 } 594 595 static void 596 xfs_agfl_read_verify( 597 struct xfs_buf *bp) 598 { 599 struct xfs_mount *mp = bp->b_target->bt_mount; 600 xfs_failaddr_t fa; 601 602 /* 603 * There is no verification of non-crc AGFLs because mkfs does not 604 * initialise the AGFL to zero or NULL. Hence the only valid part of the 605 * AGFL is what the AGF says is active. We can't get to the AGF, so we 606 * can't verify just those entries are valid. 607 */ 608 if (!xfs_sb_version_hascrc(&mp->m_sb)) 609 return; 610 611 if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF)) 612 xfs_verifier_error(bp, -EFSBADCRC, __this_address); 613 else { 614 fa = xfs_agfl_verify(bp); 615 if (fa) 616 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 617 } 618 } 619 620 static void 621 xfs_agfl_write_verify( 622 struct xfs_buf *bp) 623 { 624 struct xfs_mount *mp = bp->b_target->bt_mount; 625 struct xfs_buf_log_item *bip = bp->b_log_item; 626 xfs_failaddr_t fa; 627 628 /* no verification of non-crc AGFLs */ 629 if (!xfs_sb_version_hascrc(&mp->m_sb)) 630 return; 631 632 fa = xfs_agfl_verify(bp); 633 if (fa) { 634 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 635 return; 636 } 637 638 if (bip) 639 XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn); 640 641 xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF); 642 } 643 644 const struct xfs_buf_ops xfs_agfl_buf_ops = { 645 .name = "xfs_agfl", 646 .magic = { cpu_to_be32(XFS_AGFL_MAGIC), cpu_to_be32(XFS_AGFL_MAGIC) }, 647 .verify_read = xfs_agfl_read_verify, 648 .verify_write = xfs_agfl_write_verify, 649 .verify_struct = xfs_agfl_verify, 650 }; 651 652 /* 653 * Read in the allocation group free block array. 654 */ 655 int /* error */ 656 xfs_alloc_read_agfl( 657 xfs_mount_t *mp, /* mount point structure */ 658 xfs_trans_t *tp, /* transaction pointer */ 659 xfs_agnumber_t agno, /* allocation group number */ 660 xfs_buf_t **bpp) /* buffer for the ag free block array */ 661 { 662 xfs_buf_t *bp; /* return value */ 663 int error; 664 665 ASSERT(agno != NULLAGNUMBER); 666 error = xfs_trans_read_buf( 667 mp, tp, mp->m_ddev_targp, 668 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)), 669 XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops); 670 if (error) 671 return error; 672 xfs_buf_set_ref(bp, XFS_AGFL_REF); 673 *bpp = bp; 674 return 0; 675 } 676 677 STATIC int 678 xfs_alloc_update_counters( 679 struct xfs_trans *tp, 680 struct xfs_perag *pag, 681 struct xfs_buf *agbp, 682 long len) 683 { 684 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); 685 686 pag->pagf_freeblks += len; 687 be32_add_cpu(&agf->agf_freeblks, len); 688 689 xfs_trans_agblocks_delta(tp, len); 690 if (unlikely(be32_to_cpu(agf->agf_freeblks) > 691 be32_to_cpu(agf->agf_length))) 692 return -EFSCORRUPTED; 693 694 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS); 695 return 0; 696 } 697 698 /* 699 * Allocation group level functions. 700 */ 701 702 /* 703 * Allocate a variable extent in the allocation group agno. 704 * Type and bno are used to determine where in the allocation group the 705 * extent will start. 706 * Extent's length (returned in *len) will be between minlen and maxlen, 707 * and of the form k * prod + mod unless there's nothing that large. 708 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it. 709 */ 710 STATIC int /* error */ 711 xfs_alloc_ag_vextent( 712 xfs_alloc_arg_t *args) /* argument structure for allocation */ 713 { 714 int error=0; 715 716 ASSERT(args->minlen > 0); 717 ASSERT(args->maxlen > 0); 718 ASSERT(args->minlen <= args->maxlen); 719 ASSERT(args->mod < args->prod); 720 ASSERT(args->alignment > 0); 721 722 /* 723 * Branch to correct routine based on the type. 724 */ 725 args->wasfromfl = 0; 726 switch (args->type) { 727 case XFS_ALLOCTYPE_THIS_AG: 728 error = xfs_alloc_ag_vextent_size(args); 729 break; 730 case XFS_ALLOCTYPE_NEAR_BNO: 731 error = xfs_alloc_ag_vextent_near(args); 732 break; 733 case XFS_ALLOCTYPE_THIS_BNO: 734 error = xfs_alloc_ag_vextent_exact(args); 735 break; 736 default: 737 ASSERT(0); 738 /* NOTREACHED */ 739 } 740 741 if (error || args->agbno == NULLAGBLOCK) 742 return error; 743 744 ASSERT(args->len >= args->minlen); 745 ASSERT(args->len <= args->maxlen); 746 ASSERT(!args->wasfromfl || args->resv != XFS_AG_RESV_AGFL); 747 ASSERT(args->agbno % args->alignment == 0); 748 749 /* if not file data, insert new block into the reverse map btree */ 750 if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) { 751 error = xfs_rmap_alloc(args->tp, args->agbp, args->agno, 752 args->agbno, args->len, &args->oinfo); 753 if (error) 754 return error; 755 } 756 757 if (!args->wasfromfl) { 758 error = xfs_alloc_update_counters(args->tp, args->pag, 759 args->agbp, 760 -((long)(args->len))); 761 if (error) 762 return error; 763 764 ASSERT(!xfs_extent_busy_search(args->mp, args->agno, 765 args->agbno, args->len)); 766 } 767 768 xfs_ag_resv_alloc_extent(args->pag, args->resv, args); 769 770 XFS_STATS_INC(args->mp, xs_allocx); 771 XFS_STATS_ADD(args->mp, xs_allocb, args->len); 772 return error; 773 } 774 775 /* 776 * Allocate a variable extent at exactly agno/bno. 777 * Extent's length (returned in *len) will be between minlen and maxlen, 778 * and of the form k * prod + mod unless there's nothing that large. 779 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it. 780 */ 781 STATIC int /* error */ 782 xfs_alloc_ag_vextent_exact( 783 xfs_alloc_arg_t *args) /* allocation argument structure */ 784 { 785 xfs_btree_cur_t *bno_cur;/* by block-number btree cursor */ 786 xfs_btree_cur_t *cnt_cur;/* by count btree cursor */ 787 int error; 788 xfs_agblock_t fbno; /* start block of found extent */ 789 xfs_extlen_t flen; /* length of found extent */ 790 xfs_agblock_t tbno; /* start block of busy extent */ 791 xfs_extlen_t tlen; /* length of busy extent */ 792 xfs_agblock_t tend; /* end block of busy extent */ 793 int i; /* success/failure of operation */ 794 unsigned busy_gen; 795 796 ASSERT(args->alignment == 1); 797 798 /* 799 * Allocate/initialize a cursor for the by-number freespace btree. 800 */ 801 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, 802 args->agno, XFS_BTNUM_BNO); 803 804 /* 805 * Lookup bno and minlen in the btree (minlen is irrelevant, really). 806 * Look for the closest free block <= bno, it must contain bno 807 * if any free block does. 808 */ 809 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i); 810 if (error) 811 goto error0; 812 if (!i) 813 goto not_found; 814 815 /* 816 * Grab the freespace record. 817 */ 818 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i); 819 if (error) 820 goto error0; 821 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0); 822 ASSERT(fbno <= args->agbno); 823 824 /* 825 * Check for overlapping busy extents. 826 */ 827 tbno = fbno; 828 tlen = flen; 829 xfs_extent_busy_trim(args, &tbno, &tlen, &busy_gen); 830 831 /* 832 * Give up if the start of the extent is busy, or the freespace isn't 833 * long enough for the minimum request. 834 */ 835 if (tbno > args->agbno) 836 goto not_found; 837 if (tlen < args->minlen) 838 goto not_found; 839 tend = tbno + tlen; 840 if (tend < args->agbno + args->minlen) 841 goto not_found; 842 843 /* 844 * End of extent will be smaller of the freespace end and the 845 * maximal requested end. 846 * 847 * Fix the length according to mod and prod if given. 848 */ 849 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen) 850 - args->agbno; 851 xfs_alloc_fix_len(args); 852 ASSERT(args->agbno + args->len <= tend); 853 854 /* 855 * We are allocating agbno for args->len 856 * Allocate/initialize a cursor for the by-size btree. 857 */ 858 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, 859 args->agno, XFS_BTNUM_CNT); 860 ASSERT(args->agbno + args->len <= 861 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); 862 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno, 863 args->len, XFSA_FIXUP_BNO_OK); 864 if (error) { 865 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); 866 goto error0; 867 } 868 869 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); 870 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 871 872 args->wasfromfl = 0; 873 trace_xfs_alloc_exact_done(args); 874 return 0; 875 876 not_found: 877 /* Didn't find it, return null. */ 878 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); 879 args->agbno = NULLAGBLOCK; 880 trace_xfs_alloc_exact_notfound(args); 881 return 0; 882 883 error0: 884 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR); 885 trace_xfs_alloc_exact_error(args); 886 return error; 887 } 888 889 /* 890 * Search the btree in a given direction via the search cursor and compare 891 * the records found against the good extent we've already found. 892 */ 893 STATIC int 894 xfs_alloc_find_best_extent( 895 struct xfs_alloc_arg *args, /* allocation argument structure */ 896 struct xfs_btree_cur **gcur, /* good cursor */ 897 struct xfs_btree_cur **scur, /* searching cursor */ 898 xfs_agblock_t gdiff, /* difference for search comparison */ 899 xfs_agblock_t *sbno, /* extent found by search */ 900 xfs_extlen_t *slen, /* extent length */ 901 xfs_agblock_t *sbnoa, /* aligned extent found by search */ 902 xfs_extlen_t *slena, /* aligned extent length */ 903 int dir) /* 0 = search right, 1 = search left */ 904 { 905 xfs_agblock_t new; 906 xfs_agblock_t sdiff; 907 int error; 908 int i; 909 unsigned busy_gen; 910 911 /* The good extent is perfect, no need to search. */ 912 if (!gdiff) 913 goto out_use_good; 914 915 /* 916 * Look until we find a better one, run out of space or run off the end. 917 */ 918 do { 919 error = xfs_alloc_get_rec(*scur, sbno, slen, &i); 920 if (error) 921 goto error0; 922 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0); 923 xfs_alloc_compute_aligned(args, *sbno, *slen, 924 sbnoa, slena, &busy_gen); 925 926 /* 927 * The good extent is closer than this one. 928 */ 929 if (!dir) { 930 if (*sbnoa > args->max_agbno) 931 goto out_use_good; 932 if (*sbnoa >= args->agbno + gdiff) 933 goto out_use_good; 934 } else { 935 if (*sbnoa < args->min_agbno) 936 goto out_use_good; 937 if (*sbnoa <= args->agbno - gdiff) 938 goto out_use_good; 939 } 940 941 /* 942 * Same distance, compare length and pick the best. 943 */ 944 if (*slena >= args->minlen) { 945 args->len = XFS_EXTLEN_MIN(*slena, args->maxlen); 946 xfs_alloc_fix_len(args); 947 948 sdiff = xfs_alloc_compute_diff(args->agbno, args->len, 949 args->alignment, 950 args->datatype, *sbnoa, 951 *slena, &new); 952 953 /* 954 * Choose closer size and invalidate other cursor. 955 */ 956 if (sdiff < gdiff) 957 goto out_use_search; 958 goto out_use_good; 959 } 960 961 if (!dir) 962 error = xfs_btree_increment(*scur, 0, &i); 963 else 964 error = xfs_btree_decrement(*scur, 0, &i); 965 if (error) 966 goto error0; 967 } while (i); 968 969 out_use_good: 970 xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR); 971 *scur = NULL; 972 return 0; 973 974 out_use_search: 975 xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR); 976 *gcur = NULL; 977 return 0; 978 979 error0: 980 /* caller invalidates cursors */ 981 return error; 982 } 983 984 /* 985 * Allocate a variable extent near bno in the allocation group agno. 986 * Extent's length (returned in len) will be between minlen and maxlen, 987 * and of the form k * prod + mod unless there's nothing that large. 988 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it. 989 */ 990 STATIC int /* error */ 991 xfs_alloc_ag_vextent_near( 992 xfs_alloc_arg_t *args) /* allocation argument structure */ 993 { 994 xfs_btree_cur_t *bno_cur_gt; /* cursor for bno btree, right side */ 995 xfs_btree_cur_t *bno_cur_lt; /* cursor for bno btree, left side */ 996 xfs_btree_cur_t *cnt_cur; /* cursor for count btree */ 997 xfs_agblock_t gtbno; /* start bno of right side entry */ 998 xfs_agblock_t gtbnoa; /* aligned ... */ 999 xfs_extlen_t gtdiff; /* difference to right side entry */ 1000 xfs_extlen_t gtlen; /* length of right side entry */ 1001 xfs_extlen_t gtlena; /* aligned ... */ 1002 xfs_agblock_t gtnew; /* useful start bno of right side */ 1003 int error; /* error code */ 1004 int i; /* result code, temporary */ 1005 int j; /* result code, temporary */ 1006 xfs_agblock_t ltbno; /* start bno of left side entry */ 1007 xfs_agblock_t ltbnoa; /* aligned ... */ 1008 xfs_extlen_t ltdiff; /* difference to left side entry */ 1009 xfs_extlen_t ltlen; /* length of left side entry */ 1010 xfs_extlen_t ltlena; /* aligned ... */ 1011 xfs_agblock_t ltnew; /* useful start bno of left side */ 1012 xfs_extlen_t rlen; /* length of returned extent */ 1013 bool busy; 1014 unsigned busy_gen; 1015 #ifdef DEBUG 1016 /* 1017 * Randomly don't execute the first algorithm. 1018 */ 1019 int dofirst; /* set to do first algorithm */ 1020 1021 dofirst = prandom_u32() & 1; 1022 #endif 1023 1024 /* handle unitialized agbno range so caller doesn't have to */ 1025 if (!args->min_agbno && !args->max_agbno) 1026 args->max_agbno = args->mp->m_sb.sb_agblocks - 1; 1027 ASSERT(args->min_agbno <= args->max_agbno); 1028 1029 /* clamp agbno to the range if it's outside */ 1030 if (args->agbno < args->min_agbno) 1031 args->agbno = args->min_agbno; 1032 if (args->agbno > args->max_agbno) 1033 args->agbno = args->max_agbno; 1034 1035 restart: 1036 bno_cur_lt = NULL; 1037 bno_cur_gt = NULL; 1038 ltlen = 0; 1039 gtlena = 0; 1040 ltlena = 0; 1041 busy = false; 1042 1043 /* 1044 * Get a cursor for the by-size btree. 1045 */ 1046 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, 1047 args->agno, XFS_BTNUM_CNT); 1048 1049 /* 1050 * See if there are any free extents as big as maxlen. 1051 */ 1052 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i))) 1053 goto error0; 1054 /* 1055 * If none, then pick up the last entry in the tree unless the 1056 * tree is empty. 1057 */ 1058 if (!i) { 1059 if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, <bno, 1060 <len, &i))) 1061 goto error0; 1062 if (i == 0 || ltlen == 0) { 1063 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1064 trace_xfs_alloc_near_noentry(args); 1065 return 0; 1066 } 1067 ASSERT(i == 1); 1068 } 1069 args->wasfromfl = 0; 1070 1071 /* 1072 * First algorithm. 1073 * If the requested extent is large wrt the freespaces available 1074 * in this a.g., then the cursor will be pointing to a btree entry 1075 * near the right edge of the tree. If it's in the last btree leaf 1076 * block, then we just examine all the entries in that block 1077 * that are big enough, and pick the best one. 1078 * This is written as a while loop so we can break out of it, 1079 * but we never loop back to the top. 1080 */ 1081 while (xfs_btree_islastblock(cnt_cur, 0)) { 1082 xfs_extlen_t bdiff; 1083 int besti=0; 1084 xfs_extlen_t blen=0; 1085 xfs_agblock_t bnew=0; 1086 1087 #ifdef DEBUG 1088 if (dofirst) 1089 break; 1090 #endif 1091 /* 1092 * Start from the entry that lookup found, sequence through 1093 * all larger free blocks. If we're actually pointing at a 1094 * record smaller than maxlen, go to the start of this block, 1095 * and skip all those smaller than minlen. 1096 */ 1097 if (ltlen || args->alignment > 1) { 1098 cnt_cur->bc_ptrs[0] = 1; 1099 do { 1100 if ((error = xfs_alloc_get_rec(cnt_cur, <bno, 1101 <len, &i))) 1102 goto error0; 1103 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0); 1104 if (ltlen >= args->minlen) 1105 break; 1106 if ((error = xfs_btree_increment(cnt_cur, 0, &i))) 1107 goto error0; 1108 } while (i); 1109 ASSERT(ltlen >= args->minlen); 1110 if (!i) 1111 break; 1112 } 1113 i = cnt_cur->bc_ptrs[0]; 1114 for (j = 1, blen = 0, bdiff = 0; 1115 !error && j && (blen < args->maxlen || bdiff > 0); 1116 error = xfs_btree_increment(cnt_cur, 0, &j)) { 1117 /* 1118 * For each entry, decide if it's better than 1119 * the previous best entry. 1120 */ 1121 if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i))) 1122 goto error0; 1123 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0); 1124 busy = xfs_alloc_compute_aligned(args, ltbno, ltlen, 1125 <bnoa, <lena, &busy_gen); 1126 if (ltlena < args->minlen) 1127 continue; 1128 if (ltbnoa < args->min_agbno || ltbnoa > args->max_agbno) 1129 continue; 1130 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); 1131 xfs_alloc_fix_len(args); 1132 ASSERT(args->len >= args->minlen); 1133 if (args->len < blen) 1134 continue; 1135 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len, 1136 args->alignment, args->datatype, ltbnoa, 1137 ltlena, <new); 1138 if (ltnew != NULLAGBLOCK && 1139 (args->len > blen || ltdiff < bdiff)) { 1140 bdiff = ltdiff; 1141 bnew = ltnew; 1142 blen = args->len; 1143 besti = cnt_cur->bc_ptrs[0]; 1144 } 1145 } 1146 /* 1147 * It didn't work. We COULD be in a case where 1148 * there's a good record somewhere, so try again. 1149 */ 1150 if (blen == 0) 1151 break; 1152 /* 1153 * Point at the best entry, and retrieve it again. 1154 */ 1155 cnt_cur->bc_ptrs[0] = besti; 1156 if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i))) 1157 goto error0; 1158 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0); 1159 ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); 1160 args->len = blen; 1161 1162 /* 1163 * We are allocating starting at bnew for blen blocks. 1164 */ 1165 args->agbno = bnew; 1166 ASSERT(bnew >= ltbno); 1167 ASSERT(bnew + blen <= ltbno + ltlen); 1168 /* 1169 * Set up a cursor for the by-bno tree. 1170 */ 1171 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, 1172 args->agbp, args->agno, XFS_BTNUM_BNO); 1173 /* 1174 * Fix up the btree entries. 1175 */ 1176 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, 1177 ltlen, bnew, blen, XFSA_FIXUP_CNT_OK))) 1178 goto error0; 1179 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1180 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR); 1181 1182 trace_xfs_alloc_near_first(args); 1183 return 0; 1184 } 1185 /* 1186 * Second algorithm. 1187 * Search in the by-bno tree to the left and to the right 1188 * simultaneously, until in each case we find a space big enough, 1189 * or run into the edge of the tree. When we run into the edge, 1190 * we deallocate that cursor. 1191 * If both searches succeed, we compare the two spaces and pick 1192 * the better one. 1193 * With alignment, it's possible for both to fail; the upper 1194 * level algorithm that picks allocation groups for allocations 1195 * is not supposed to do this. 1196 */ 1197 /* 1198 * Allocate and initialize the cursor for the leftward search. 1199 */ 1200 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, 1201 args->agno, XFS_BTNUM_BNO); 1202 /* 1203 * Lookup <= bno to find the leftward search's starting point. 1204 */ 1205 if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i))) 1206 goto error0; 1207 if (!i) { 1208 /* 1209 * Didn't find anything; use this cursor for the rightward 1210 * search. 1211 */ 1212 bno_cur_gt = bno_cur_lt; 1213 bno_cur_lt = NULL; 1214 } 1215 /* 1216 * Found something. Duplicate the cursor for the rightward search. 1217 */ 1218 else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt))) 1219 goto error0; 1220 /* 1221 * Increment the cursor, so we will point at the entry just right 1222 * of the leftward entry if any, or to the leftmost entry. 1223 */ 1224 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i))) 1225 goto error0; 1226 if (!i) { 1227 /* 1228 * It failed, there are no rightward entries. 1229 */ 1230 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR); 1231 bno_cur_gt = NULL; 1232 } 1233 /* 1234 * Loop going left with the leftward cursor, right with the 1235 * rightward cursor, until either both directions give up or 1236 * we find an entry at least as big as minlen. 1237 */ 1238 do { 1239 if (bno_cur_lt) { 1240 if ((error = xfs_alloc_get_rec(bno_cur_lt, <bno, <len, &i))) 1241 goto error0; 1242 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0); 1243 busy |= xfs_alloc_compute_aligned(args, ltbno, ltlen, 1244 <bnoa, <lena, &busy_gen); 1245 if (ltlena >= args->minlen && ltbnoa >= args->min_agbno) 1246 break; 1247 if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i))) 1248 goto error0; 1249 if (!i || ltbnoa < args->min_agbno) { 1250 xfs_btree_del_cursor(bno_cur_lt, 1251 XFS_BTREE_NOERROR); 1252 bno_cur_lt = NULL; 1253 } 1254 } 1255 if (bno_cur_gt) { 1256 if ((error = xfs_alloc_get_rec(bno_cur_gt, >bno, >len, &i))) 1257 goto error0; 1258 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0); 1259 busy |= xfs_alloc_compute_aligned(args, gtbno, gtlen, 1260 >bnoa, >lena, &busy_gen); 1261 if (gtlena >= args->minlen && gtbnoa <= args->max_agbno) 1262 break; 1263 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i))) 1264 goto error0; 1265 if (!i || gtbnoa > args->max_agbno) { 1266 xfs_btree_del_cursor(bno_cur_gt, 1267 XFS_BTREE_NOERROR); 1268 bno_cur_gt = NULL; 1269 } 1270 } 1271 } while (bno_cur_lt || bno_cur_gt); 1272 1273 /* 1274 * Got both cursors still active, need to find better entry. 1275 */ 1276 if (bno_cur_lt && bno_cur_gt) { 1277 if (ltlena >= args->minlen) { 1278 /* 1279 * Left side is good, look for a right side entry. 1280 */ 1281 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); 1282 xfs_alloc_fix_len(args); 1283 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len, 1284 args->alignment, args->datatype, ltbnoa, 1285 ltlena, <new); 1286 1287 error = xfs_alloc_find_best_extent(args, 1288 &bno_cur_lt, &bno_cur_gt, 1289 ltdiff, >bno, >len, 1290 >bnoa, >lena, 1291 0 /* search right */); 1292 } else { 1293 ASSERT(gtlena >= args->minlen); 1294 1295 /* 1296 * Right side is good, look for a left side entry. 1297 */ 1298 args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen); 1299 xfs_alloc_fix_len(args); 1300 gtdiff = xfs_alloc_compute_diff(args->agbno, args->len, 1301 args->alignment, args->datatype, gtbnoa, 1302 gtlena, >new); 1303 1304 error = xfs_alloc_find_best_extent(args, 1305 &bno_cur_gt, &bno_cur_lt, 1306 gtdiff, <bno, <len, 1307 <bnoa, <lena, 1308 1 /* search left */); 1309 } 1310 1311 if (error) 1312 goto error0; 1313 } 1314 1315 /* 1316 * If we couldn't get anything, give up. 1317 */ 1318 if (bno_cur_lt == NULL && bno_cur_gt == NULL) { 1319 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1320 1321 if (busy) { 1322 trace_xfs_alloc_near_busy(args); 1323 xfs_extent_busy_flush(args->mp, args->pag, busy_gen); 1324 goto restart; 1325 } 1326 trace_xfs_alloc_size_neither(args); 1327 args->agbno = NULLAGBLOCK; 1328 return 0; 1329 } 1330 1331 /* 1332 * At this point we have selected a freespace entry, either to the 1333 * left or to the right. If it's on the right, copy all the 1334 * useful variables to the "left" set so we only have one 1335 * copy of this code. 1336 */ 1337 if (bno_cur_gt) { 1338 bno_cur_lt = bno_cur_gt; 1339 bno_cur_gt = NULL; 1340 ltbno = gtbno; 1341 ltbnoa = gtbnoa; 1342 ltlen = gtlen; 1343 ltlena = gtlena; 1344 j = 1; 1345 } else 1346 j = 0; 1347 1348 /* 1349 * Fix up the length and compute the useful address. 1350 */ 1351 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); 1352 xfs_alloc_fix_len(args); 1353 rlen = args->len; 1354 (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment, 1355 args->datatype, ltbnoa, ltlena, <new); 1356 ASSERT(ltnew >= ltbno); 1357 ASSERT(ltnew + rlen <= ltbnoa + ltlena); 1358 ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); 1359 ASSERT(ltnew >= args->min_agbno && ltnew <= args->max_agbno); 1360 args->agbno = ltnew; 1361 1362 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen, 1363 ltnew, rlen, XFSA_FIXUP_BNO_OK))) 1364 goto error0; 1365 1366 if (j) 1367 trace_xfs_alloc_near_greater(args); 1368 else 1369 trace_xfs_alloc_near_lesser(args); 1370 1371 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1372 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR); 1373 return 0; 1374 1375 error0: 1376 trace_xfs_alloc_near_error(args); 1377 if (cnt_cur != NULL) 1378 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); 1379 if (bno_cur_lt != NULL) 1380 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR); 1381 if (bno_cur_gt != NULL) 1382 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR); 1383 return error; 1384 } 1385 1386 /* 1387 * Allocate a variable extent anywhere in the allocation group agno. 1388 * Extent's length (returned in len) will be between minlen and maxlen, 1389 * and of the form k * prod + mod unless there's nothing that large. 1390 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it. 1391 */ 1392 STATIC int /* error */ 1393 xfs_alloc_ag_vextent_size( 1394 xfs_alloc_arg_t *args) /* allocation argument structure */ 1395 { 1396 xfs_btree_cur_t *bno_cur; /* cursor for bno btree */ 1397 xfs_btree_cur_t *cnt_cur; /* cursor for cnt btree */ 1398 int error; /* error result */ 1399 xfs_agblock_t fbno; /* start of found freespace */ 1400 xfs_extlen_t flen; /* length of found freespace */ 1401 int i; /* temp status variable */ 1402 xfs_agblock_t rbno; /* returned block number */ 1403 xfs_extlen_t rlen; /* length of returned extent */ 1404 bool busy; 1405 unsigned busy_gen; 1406 1407 restart: 1408 /* 1409 * Allocate and initialize a cursor for the by-size btree. 1410 */ 1411 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, 1412 args->agno, XFS_BTNUM_CNT); 1413 bno_cur = NULL; 1414 busy = false; 1415 1416 /* 1417 * Look for an entry >= maxlen+alignment-1 blocks. 1418 */ 1419 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, 1420 args->maxlen + args->alignment - 1, &i))) 1421 goto error0; 1422 1423 /* 1424 * If none then we have to settle for a smaller extent. In the case that 1425 * there are no large extents, this will return the last entry in the 1426 * tree unless the tree is empty. In the case that there are only busy 1427 * large extents, this will return the largest small extent unless there 1428 * are no smaller extents available. 1429 */ 1430 if (!i) { 1431 error = xfs_alloc_ag_vextent_small(args, cnt_cur, 1432 &fbno, &flen, &i); 1433 if (error) 1434 goto error0; 1435 if (i == 0 || flen == 0) { 1436 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1437 trace_xfs_alloc_size_noentry(args); 1438 return 0; 1439 } 1440 ASSERT(i == 1); 1441 busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno, 1442 &rlen, &busy_gen); 1443 } else { 1444 /* 1445 * Search for a non-busy extent that is large enough. 1446 */ 1447 for (;;) { 1448 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i); 1449 if (error) 1450 goto error0; 1451 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0); 1452 1453 busy = xfs_alloc_compute_aligned(args, fbno, flen, 1454 &rbno, &rlen, &busy_gen); 1455 1456 if (rlen >= args->maxlen) 1457 break; 1458 1459 error = xfs_btree_increment(cnt_cur, 0, &i); 1460 if (error) 1461 goto error0; 1462 if (i == 0) { 1463 /* 1464 * Our only valid extents must have been busy. 1465 * Make it unbusy by forcing the log out and 1466 * retrying. 1467 */ 1468 xfs_btree_del_cursor(cnt_cur, 1469 XFS_BTREE_NOERROR); 1470 trace_xfs_alloc_size_busy(args); 1471 xfs_extent_busy_flush(args->mp, 1472 args->pag, busy_gen); 1473 goto restart; 1474 } 1475 } 1476 } 1477 1478 /* 1479 * In the first case above, we got the last entry in the 1480 * by-size btree. Now we check to see if the space hits maxlen 1481 * once aligned; if not, we search left for something better. 1482 * This can't happen in the second case above. 1483 */ 1484 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen); 1485 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 || 1486 (rlen <= flen && rbno + rlen <= fbno + flen), error0); 1487 if (rlen < args->maxlen) { 1488 xfs_agblock_t bestfbno; 1489 xfs_extlen_t bestflen; 1490 xfs_agblock_t bestrbno; 1491 xfs_extlen_t bestrlen; 1492 1493 bestrlen = rlen; 1494 bestrbno = rbno; 1495 bestflen = flen; 1496 bestfbno = fbno; 1497 for (;;) { 1498 if ((error = xfs_btree_decrement(cnt_cur, 0, &i))) 1499 goto error0; 1500 if (i == 0) 1501 break; 1502 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, 1503 &i))) 1504 goto error0; 1505 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0); 1506 if (flen < bestrlen) 1507 break; 1508 busy = xfs_alloc_compute_aligned(args, fbno, flen, 1509 &rbno, &rlen, &busy_gen); 1510 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen); 1511 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 || 1512 (rlen <= flen && rbno + rlen <= fbno + flen), 1513 error0); 1514 if (rlen > bestrlen) { 1515 bestrlen = rlen; 1516 bestrbno = rbno; 1517 bestflen = flen; 1518 bestfbno = fbno; 1519 if (rlen == args->maxlen) 1520 break; 1521 } 1522 } 1523 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen, 1524 &i))) 1525 goto error0; 1526 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0); 1527 rlen = bestrlen; 1528 rbno = bestrbno; 1529 flen = bestflen; 1530 fbno = bestfbno; 1531 } 1532 args->wasfromfl = 0; 1533 /* 1534 * Fix up the length. 1535 */ 1536 args->len = rlen; 1537 if (rlen < args->minlen) { 1538 if (busy) { 1539 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1540 trace_xfs_alloc_size_busy(args); 1541 xfs_extent_busy_flush(args->mp, args->pag, busy_gen); 1542 goto restart; 1543 } 1544 goto out_nominleft; 1545 } 1546 xfs_alloc_fix_len(args); 1547 1548 rlen = args->len; 1549 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0); 1550 /* 1551 * Allocate and initialize a cursor for the by-block tree. 1552 */ 1553 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, 1554 args->agno, XFS_BTNUM_BNO); 1555 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, 1556 rbno, rlen, XFSA_FIXUP_CNT_OK))) 1557 goto error0; 1558 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1559 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); 1560 cnt_cur = bno_cur = NULL; 1561 args->len = rlen; 1562 args->agbno = rbno; 1563 XFS_WANT_CORRUPTED_GOTO(args->mp, 1564 args->agbno + args->len <= 1565 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length), 1566 error0); 1567 trace_xfs_alloc_size_done(args); 1568 return 0; 1569 1570 error0: 1571 trace_xfs_alloc_size_error(args); 1572 if (cnt_cur) 1573 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); 1574 if (bno_cur) 1575 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR); 1576 return error; 1577 1578 out_nominleft: 1579 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1580 trace_xfs_alloc_size_nominleft(args); 1581 args->agbno = NULLAGBLOCK; 1582 return 0; 1583 } 1584 1585 /* 1586 * Deal with the case where only small freespaces remain. 1587 * Either return the contents of the last freespace record, 1588 * or allocate space from the freelist if there is nothing in the tree. 1589 */ 1590 STATIC int /* error */ 1591 xfs_alloc_ag_vextent_small( 1592 xfs_alloc_arg_t *args, /* allocation argument structure */ 1593 xfs_btree_cur_t *ccur, /* by-size cursor */ 1594 xfs_agblock_t *fbnop, /* result block number */ 1595 xfs_extlen_t *flenp, /* result length */ 1596 int *stat) /* status: 0-freelist, 1-normal/none */ 1597 { 1598 int error; 1599 xfs_agblock_t fbno; 1600 xfs_extlen_t flen; 1601 int i; 1602 1603 if ((error = xfs_btree_decrement(ccur, 0, &i))) 1604 goto error0; 1605 if (i) { 1606 if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i))) 1607 goto error0; 1608 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0); 1609 } 1610 /* 1611 * Nothing in the btree, try the freelist. Make sure 1612 * to respect minleft even when pulling from the 1613 * freelist. 1614 */ 1615 else if (args->minlen == 1 && args->alignment == 1 && 1616 args->resv != XFS_AG_RESV_AGFL && 1617 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount) 1618 > args->minleft)) { 1619 error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0); 1620 if (error) 1621 goto error0; 1622 if (fbno != NULLAGBLOCK) { 1623 xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1, 1624 xfs_alloc_allow_busy_reuse(args->datatype)); 1625 1626 if (xfs_alloc_is_userdata(args->datatype)) { 1627 xfs_buf_t *bp; 1628 1629 bp = xfs_btree_get_bufs(args->mp, args->tp, 1630 args->agno, fbno, 0); 1631 if (!bp) { 1632 error = -EFSCORRUPTED; 1633 goto error0; 1634 } 1635 xfs_trans_binval(args->tp, bp); 1636 } 1637 args->len = 1; 1638 args->agbno = fbno; 1639 XFS_WANT_CORRUPTED_GOTO(args->mp, 1640 args->agbno + args->len <= 1641 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length), 1642 error0); 1643 args->wasfromfl = 1; 1644 trace_xfs_alloc_small_freelist(args); 1645 1646 /* 1647 * If we're feeding an AGFL block to something that 1648 * doesn't live in the free space, we need to clear 1649 * out the OWN_AG rmap. 1650 */ 1651 error = xfs_rmap_free(args->tp, args->agbp, args->agno, 1652 fbno, 1, &XFS_RMAP_OINFO_AG); 1653 if (error) 1654 goto error0; 1655 1656 *stat = 0; 1657 return 0; 1658 } 1659 /* 1660 * Nothing in the freelist. 1661 */ 1662 else 1663 flen = 0; 1664 } 1665 /* 1666 * Can't allocate from the freelist for some reason. 1667 */ 1668 else { 1669 fbno = NULLAGBLOCK; 1670 flen = 0; 1671 } 1672 /* 1673 * Can't do the allocation, give up. 1674 */ 1675 if (flen < args->minlen) { 1676 args->agbno = NULLAGBLOCK; 1677 trace_xfs_alloc_small_notenough(args); 1678 flen = 0; 1679 } 1680 *fbnop = fbno; 1681 *flenp = flen; 1682 *stat = 1; 1683 trace_xfs_alloc_small_done(args); 1684 return 0; 1685 1686 error0: 1687 trace_xfs_alloc_small_error(args); 1688 return error; 1689 } 1690 1691 /* 1692 * Free the extent starting at agno/bno for length. 1693 */ 1694 STATIC int 1695 xfs_free_ag_extent( 1696 struct xfs_trans *tp, 1697 struct xfs_buf *agbp, 1698 xfs_agnumber_t agno, 1699 xfs_agblock_t bno, 1700 xfs_extlen_t len, 1701 const struct xfs_owner_info *oinfo, 1702 enum xfs_ag_resv_type type) 1703 { 1704 struct xfs_mount *mp; 1705 struct xfs_perag *pag; 1706 struct xfs_btree_cur *bno_cur; 1707 struct xfs_btree_cur *cnt_cur; 1708 xfs_agblock_t gtbno; /* start of right neighbor */ 1709 xfs_extlen_t gtlen; /* length of right neighbor */ 1710 xfs_agblock_t ltbno; /* start of left neighbor */ 1711 xfs_extlen_t ltlen; /* length of left neighbor */ 1712 xfs_agblock_t nbno; /* new starting block of freesp */ 1713 xfs_extlen_t nlen; /* new length of freespace */ 1714 int haveleft; /* have a left neighbor */ 1715 int haveright; /* have a right neighbor */ 1716 int i; 1717 int error; 1718 1719 bno_cur = cnt_cur = NULL; 1720 mp = tp->t_mountp; 1721 1722 if (!xfs_rmap_should_skip_owner_update(oinfo)) { 1723 error = xfs_rmap_free(tp, agbp, agno, bno, len, oinfo); 1724 if (error) 1725 goto error0; 1726 } 1727 1728 /* 1729 * Allocate and initialize a cursor for the by-block btree. 1730 */ 1731 bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO); 1732 /* 1733 * Look for a neighboring block on the left (lower block numbers) 1734 * that is contiguous with this space. 1735 */ 1736 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft))) 1737 goto error0; 1738 if (haveleft) { 1739 /* 1740 * There is a block to our left. 1741 */ 1742 if ((error = xfs_alloc_get_rec(bno_cur, <bno, <len, &i))) 1743 goto error0; 1744 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); 1745 /* 1746 * It's not contiguous, though. 1747 */ 1748 if (ltbno + ltlen < bno) 1749 haveleft = 0; 1750 else { 1751 /* 1752 * If this failure happens the request to free this 1753 * space was invalid, it's (partly) already free. 1754 * Very bad. 1755 */ 1756 XFS_WANT_CORRUPTED_GOTO(mp, 1757 ltbno + ltlen <= bno, error0); 1758 } 1759 } 1760 /* 1761 * Look for a neighboring block on the right (higher block numbers) 1762 * that is contiguous with this space. 1763 */ 1764 if ((error = xfs_btree_increment(bno_cur, 0, &haveright))) 1765 goto error0; 1766 if (haveright) { 1767 /* 1768 * There is a block to our right. 1769 */ 1770 if ((error = xfs_alloc_get_rec(bno_cur, >bno, >len, &i))) 1771 goto error0; 1772 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); 1773 /* 1774 * It's not contiguous, though. 1775 */ 1776 if (bno + len < gtbno) 1777 haveright = 0; 1778 else { 1779 /* 1780 * If this failure happens the request to free this 1781 * space was invalid, it's (partly) already free. 1782 * Very bad. 1783 */ 1784 XFS_WANT_CORRUPTED_GOTO(mp, gtbno >= bno + len, error0); 1785 } 1786 } 1787 /* 1788 * Now allocate and initialize a cursor for the by-size tree. 1789 */ 1790 cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT); 1791 /* 1792 * Have both left and right contiguous neighbors. 1793 * Merge all three into a single free block. 1794 */ 1795 if (haveleft && haveright) { 1796 /* 1797 * Delete the old by-size entry on the left. 1798 */ 1799 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i))) 1800 goto error0; 1801 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); 1802 if ((error = xfs_btree_delete(cnt_cur, &i))) 1803 goto error0; 1804 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); 1805 /* 1806 * Delete the old by-size entry on the right. 1807 */ 1808 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i))) 1809 goto error0; 1810 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); 1811 if ((error = xfs_btree_delete(cnt_cur, &i))) 1812 goto error0; 1813 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); 1814 /* 1815 * Delete the old by-block entry for the right block. 1816 */ 1817 if ((error = xfs_btree_delete(bno_cur, &i))) 1818 goto error0; 1819 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); 1820 /* 1821 * Move the by-block cursor back to the left neighbor. 1822 */ 1823 if ((error = xfs_btree_decrement(bno_cur, 0, &i))) 1824 goto error0; 1825 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); 1826 #ifdef DEBUG 1827 /* 1828 * Check that this is the right record: delete didn't 1829 * mangle the cursor. 1830 */ 1831 { 1832 xfs_agblock_t xxbno; 1833 xfs_extlen_t xxlen; 1834 1835 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen, 1836 &i))) 1837 goto error0; 1838 XFS_WANT_CORRUPTED_GOTO(mp, 1839 i == 1 && xxbno == ltbno && xxlen == ltlen, 1840 error0); 1841 } 1842 #endif 1843 /* 1844 * Update remaining by-block entry to the new, joined block. 1845 */ 1846 nbno = ltbno; 1847 nlen = len + ltlen + gtlen; 1848 if ((error = xfs_alloc_update(bno_cur, nbno, nlen))) 1849 goto error0; 1850 } 1851 /* 1852 * Have only a left contiguous neighbor. 1853 * Merge it together with the new freespace. 1854 */ 1855 else if (haveleft) { 1856 /* 1857 * Delete the old by-size entry on the left. 1858 */ 1859 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i))) 1860 goto error0; 1861 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); 1862 if ((error = xfs_btree_delete(cnt_cur, &i))) 1863 goto error0; 1864 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); 1865 /* 1866 * Back up the by-block cursor to the left neighbor, and 1867 * update its length. 1868 */ 1869 if ((error = xfs_btree_decrement(bno_cur, 0, &i))) 1870 goto error0; 1871 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); 1872 nbno = ltbno; 1873 nlen = len + ltlen; 1874 if ((error = xfs_alloc_update(bno_cur, nbno, nlen))) 1875 goto error0; 1876 } 1877 /* 1878 * Have only a right contiguous neighbor. 1879 * Merge it together with the new freespace. 1880 */ 1881 else if (haveright) { 1882 /* 1883 * Delete the old by-size entry on the right. 1884 */ 1885 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i))) 1886 goto error0; 1887 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); 1888 if ((error = xfs_btree_delete(cnt_cur, &i))) 1889 goto error0; 1890 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); 1891 /* 1892 * Update the starting block and length of the right 1893 * neighbor in the by-block tree. 1894 */ 1895 nbno = bno; 1896 nlen = len + gtlen; 1897 if ((error = xfs_alloc_update(bno_cur, nbno, nlen))) 1898 goto error0; 1899 } 1900 /* 1901 * No contiguous neighbors. 1902 * Insert the new freespace into the by-block tree. 1903 */ 1904 else { 1905 nbno = bno; 1906 nlen = len; 1907 if ((error = xfs_btree_insert(bno_cur, &i))) 1908 goto error0; 1909 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); 1910 } 1911 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); 1912 bno_cur = NULL; 1913 /* 1914 * In all cases we need to insert the new freespace in the by-size tree. 1915 */ 1916 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i))) 1917 goto error0; 1918 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, error0); 1919 if ((error = xfs_btree_insert(cnt_cur, &i))) 1920 goto error0; 1921 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0); 1922 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1923 cnt_cur = NULL; 1924 1925 /* 1926 * Update the freespace totals in the ag and superblock. 1927 */ 1928 pag = xfs_perag_get(mp, agno); 1929 error = xfs_alloc_update_counters(tp, pag, agbp, len); 1930 xfs_ag_resv_free_extent(pag, type, tp, len); 1931 xfs_perag_put(pag); 1932 if (error) 1933 goto error0; 1934 1935 XFS_STATS_INC(mp, xs_freex); 1936 XFS_STATS_ADD(mp, xs_freeb, len); 1937 1938 trace_xfs_free_extent(mp, agno, bno, len, type, haveleft, haveright); 1939 1940 return 0; 1941 1942 error0: 1943 trace_xfs_free_extent(mp, agno, bno, len, type, -1, -1); 1944 if (bno_cur) 1945 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR); 1946 if (cnt_cur) 1947 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); 1948 return error; 1949 } 1950 1951 /* 1952 * Visible (exported) allocation/free functions. 1953 * Some of these are used just by xfs_alloc_btree.c and this file. 1954 */ 1955 1956 /* 1957 * Compute and fill in value of m_ag_maxlevels. 1958 */ 1959 void 1960 xfs_alloc_compute_maxlevels( 1961 xfs_mount_t *mp) /* file system mount structure */ 1962 { 1963 mp->m_ag_maxlevels = xfs_btree_compute_maxlevels(mp->m_alloc_mnr, 1964 (mp->m_sb.sb_agblocks + 1) / 2); 1965 } 1966 1967 /* 1968 * Find the length of the longest extent in an AG. The 'need' parameter 1969 * specifies how much space we're going to need for the AGFL and the 1970 * 'reserved' parameter tells us how many blocks in this AG are reserved for 1971 * other callers. 1972 */ 1973 xfs_extlen_t 1974 xfs_alloc_longest_free_extent( 1975 struct xfs_perag *pag, 1976 xfs_extlen_t need, 1977 xfs_extlen_t reserved) 1978 { 1979 xfs_extlen_t delta = 0; 1980 1981 /* 1982 * If the AGFL needs a recharge, we'll have to subtract that from the 1983 * longest extent. 1984 */ 1985 if (need > pag->pagf_flcount) 1986 delta = need - pag->pagf_flcount; 1987 1988 /* 1989 * If we cannot maintain others' reservations with space from the 1990 * not-longest freesp extents, we'll have to subtract /that/ from 1991 * the longest extent too. 1992 */ 1993 if (pag->pagf_freeblks - pag->pagf_longest < reserved) 1994 delta += reserved - (pag->pagf_freeblks - pag->pagf_longest); 1995 1996 /* 1997 * If the longest extent is long enough to satisfy all the 1998 * reservations and AGFL rules in place, we can return this extent. 1999 */ 2000 if (pag->pagf_longest > delta) 2001 return pag->pagf_longest - delta; 2002 2003 /* Otherwise, let the caller try for 1 block if there's space. */ 2004 return pag->pagf_flcount > 0 || pag->pagf_longest > 0; 2005 } 2006 2007 unsigned int 2008 xfs_alloc_min_freelist( 2009 struct xfs_mount *mp, 2010 struct xfs_perag *pag) 2011 { 2012 unsigned int min_free; 2013 2014 /* space needed by-bno freespace btree */ 2015 min_free = min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_BNOi] + 1, 2016 mp->m_ag_maxlevels); 2017 /* space needed by-size freespace btree */ 2018 min_free += min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_CNTi] + 1, 2019 mp->m_ag_maxlevels); 2020 /* space needed reverse mapping used space btree */ 2021 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) 2022 min_free += min_t(unsigned int, 2023 pag->pagf_levels[XFS_BTNUM_RMAPi] + 1, 2024 mp->m_rmap_maxlevels); 2025 2026 return min_free; 2027 } 2028 2029 /* 2030 * Check if the operation we are fixing up the freelist for should go ahead or 2031 * not. If we are freeing blocks, we always allow it, otherwise the allocation 2032 * is dependent on whether the size and shape of free space available will 2033 * permit the requested allocation to take place. 2034 */ 2035 static bool 2036 xfs_alloc_space_available( 2037 struct xfs_alloc_arg *args, 2038 xfs_extlen_t min_free, 2039 int flags) 2040 { 2041 struct xfs_perag *pag = args->pag; 2042 xfs_extlen_t alloc_len, longest; 2043 xfs_extlen_t reservation; /* blocks that are still reserved */ 2044 int available; 2045 2046 if (flags & XFS_ALLOC_FLAG_FREEING) 2047 return true; 2048 2049 reservation = xfs_ag_resv_needed(pag, args->resv); 2050 2051 /* do we have enough contiguous free space for the allocation? */ 2052 alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop; 2053 longest = xfs_alloc_longest_free_extent(pag, min_free, reservation); 2054 if (longest < alloc_len) 2055 return false; 2056 2057 /* do we have enough free space remaining for the allocation? */ 2058 available = (int)(pag->pagf_freeblks + pag->pagf_flcount - 2059 reservation - min_free - args->minleft); 2060 if (available < (int)max(args->total, alloc_len)) 2061 return false; 2062 2063 /* 2064 * Clamp maxlen to the amount of free space available for the actual 2065 * extent allocation. 2066 */ 2067 if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) { 2068 args->maxlen = available; 2069 ASSERT(args->maxlen > 0); 2070 ASSERT(args->maxlen >= args->minlen); 2071 } 2072 2073 return true; 2074 } 2075 2076 int 2077 xfs_free_agfl_block( 2078 struct xfs_trans *tp, 2079 xfs_agnumber_t agno, 2080 xfs_agblock_t agbno, 2081 struct xfs_buf *agbp, 2082 struct xfs_owner_info *oinfo) 2083 { 2084 int error; 2085 struct xfs_buf *bp; 2086 2087 error = xfs_free_ag_extent(tp, agbp, agno, agbno, 1, oinfo, 2088 XFS_AG_RESV_AGFL); 2089 if (error) 2090 return error; 2091 2092 bp = xfs_btree_get_bufs(tp->t_mountp, tp, agno, agbno, 0); 2093 if (!bp) 2094 return -EFSCORRUPTED; 2095 xfs_trans_binval(tp, bp); 2096 2097 return 0; 2098 } 2099 2100 /* 2101 * Check the agfl fields of the agf for inconsistency or corruption. The purpose 2102 * is to detect an agfl header padding mismatch between current and early v5 2103 * kernels. This problem manifests as a 1-slot size difference between the 2104 * on-disk flcount and the active [first, last] range of a wrapped agfl. This 2105 * may also catch variants of agfl count corruption unrelated to padding. Either 2106 * way, we'll reset the agfl and warn the user. 2107 * 2108 * Return true if a reset is required before the agfl can be used, false 2109 * otherwise. 2110 */ 2111 static bool 2112 xfs_agfl_needs_reset( 2113 struct xfs_mount *mp, 2114 struct xfs_agf *agf) 2115 { 2116 uint32_t f = be32_to_cpu(agf->agf_flfirst); 2117 uint32_t l = be32_to_cpu(agf->agf_fllast); 2118 uint32_t c = be32_to_cpu(agf->agf_flcount); 2119 int agfl_size = xfs_agfl_size(mp); 2120 int active; 2121 2122 /* no agfl header on v4 supers */ 2123 if (!xfs_sb_version_hascrc(&mp->m_sb)) 2124 return false; 2125 2126 /* 2127 * The agf read verifier catches severe corruption of these fields. 2128 * Repeat some sanity checks to cover a packed -> unpacked mismatch if 2129 * the verifier allows it. 2130 */ 2131 if (f >= agfl_size || l >= agfl_size) 2132 return true; 2133 if (c > agfl_size) 2134 return true; 2135 2136 /* 2137 * Check consistency between the on-disk count and the active range. An 2138 * agfl padding mismatch manifests as an inconsistent flcount. 2139 */ 2140 if (c && l >= f) 2141 active = l - f + 1; 2142 else if (c) 2143 active = agfl_size - f + l + 1; 2144 else 2145 active = 0; 2146 2147 return active != c; 2148 } 2149 2150 /* 2151 * Reset the agfl to an empty state. Ignore/drop any existing blocks since the 2152 * agfl content cannot be trusted. Warn the user that a repair is required to 2153 * recover leaked blocks. 2154 * 2155 * The purpose of this mechanism is to handle filesystems affected by the agfl 2156 * header padding mismatch problem. A reset keeps the filesystem online with a 2157 * relatively minor free space accounting inconsistency rather than suffer the 2158 * inevitable crash from use of an invalid agfl block. 2159 */ 2160 static void 2161 xfs_agfl_reset( 2162 struct xfs_trans *tp, 2163 struct xfs_buf *agbp, 2164 struct xfs_perag *pag) 2165 { 2166 struct xfs_mount *mp = tp->t_mountp; 2167 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); 2168 2169 ASSERT(pag->pagf_agflreset); 2170 trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_); 2171 2172 xfs_warn(mp, 2173 "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. " 2174 "Please unmount and run xfs_repair.", 2175 pag->pag_agno, pag->pagf_flcount); 2176 2177 agf->agf_flfirst = 0; 2178 agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1); 2179 agf->agf_flcount = 0; 2180 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST | 2181 XFS_AGF_FLCOUNT); 2182 2183 pag->pagf_flcount = 0; 2184 pag->pagf_agflreset = false; 2185 } 2186 2187 /* 2188 * Defer an AGFL block free. This is effectively equivalent to 2189 * xfs_bmap_add_free() with some special handling particular to AGFL blocks. 2190 * 2191 * Deferring AGFL frees helps prevent log reservation overruns due to too many 2192 * allocation operations in a transaction. AGFL frees are prone to this problem 2193 * because for one they are always freed one at a time. Further, an immediate 2194 * AGFL block free can cause a btree join and require another block free before 2195 * the real allocation can proceed. Deferring the free disconnects freeing up 2196 * the AGFL slot from freeing the block. 2197 */ 2198 STATIC void 2199 xfs_defer_agfl_block( 2200 struct xfs_trans *tp, 2201 xfs_agnumber_t agno, 2202 xfs_fsblock_t agbno, 2203 struct xfs_owner_info *oinfo) 2204 { 2205 struct xfs_mount *mp = tp->t_mountp; 2206 struct xfs_extent_free_item *new; /* new element */ 2207 2208 ASSERT(xfs_bmap_free_item_zone != NULL); 2209 ASSERT(oinfo != NULL); 2210 2211 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP); 2212 new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno); 2213 new->xefi_blockcount = 1; 2214 new->xefi_oinfo = *oinfo; 2215 2216 trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1); 2217 2218 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_AGFL_FREE, &new->xefi_list); 2219 } 2220 2221 /* 2222 * Decide whether to use this allocation group for this allocation. 2223 * If so, fix up the btree freelist's size. 2224 */ 2225 int /* error */ 2226 xfs_alloc_fix_freelist( 2227 struct xfs_alloc_arg *args, /* allocation argument structure */ 2228 int flags) /* XFS_ALLOC_FLAG_... */ 2229 { 2230 struct xfs_mount *mp = args->mp; 2231 struct xfs_perag *pag = args->pag; 2232 struct xfs_trans *tp = args->tp; 2233 struct xfs_buf *agbp = NULL; 2234 struct xfs_buf *agflbp = NULL; 2235 struct xfs_alloc_arg targs; /* local allocation arguments */ 2236 xfs_agblock_t bno; /* freelist block */ 2237 xfs_extlen_t need; /* total blocks needed in freelist */ 2238 int error = 0; 2239 2240 if (!pag->pagf_init) { 2241 error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp); 2242 if (error) 2243 goto out_no_agbp; 2244 if (!pag->pagf_init) { 2245 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK); 2246 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING)); 2247 goto out_agbp_relse; 2248 } 2249 } 2250 2251 /* 2252 * If this is a metadata preferred pag and we are user data then try 2253 * somewhere else if we are not being asked to try harder at this 2254 * point 2255 */ 2256 if (pag->pagf_metadata && xfs_alloc_is_userdata(args->datatype) && 2257 (flags & XFS_ALLOC_FLAG_TRYLOCK)) { 2258 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING)); 2259 goto out_agbp_relse; 2260 } 2261 2262 need = xfs_alloc_min_freelist(mp, pag); 2263 if (!xfs_alloc_space_available(args, need, flags | 2264 XFS_ALLOC_FLAG_CHECK)) 2265 goto out_agbp_relse; 2266 2267 /* 2268 * Get the a.g. freespace buffer. 2269 * Can fail if we're not blocking on locks, and it's held. 2270 */ 2271 if (!agbp) { 2272 error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp); 2273 if (error) 2274 goto out_no_agbp; 2275 if (!agbp) { 2276 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK); 2277 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING)); 2278 goto out_no_agbp; 2279 } 2280 } 2281 2282 /* reset a padding mismatched agfl before final free space check */ 2283 if (pag->pagf_agflreset) 2284 xfs_agfl_reset(tp, agbp, pag); 2285 2286 /* If there isn't enough total space or single-extent, reject it. */ 2287 need = xfs_alloc_min_freelist(mp, pag); 2288 if (!xfs_alloc_space_available(args, need, flags)) 2289 goto out_agbp_relse; 2290 2291 /* 2292 * Make the freelist shorter if it's too long. 2293 * 2294 * Note that from this point onwards, we will always release the agf and 2295 * agfl buffers on error. This handles the case where we error out and 2296 * the buffers are clean or may not have been joined to the transaction 2297 * and hence need to be released manually. If they have been joined to 2298 * the transaction, then xfs_trans_brelse() will handle them 2299 * appropriately based on the recursion count and dirty state of the 2300 * buffer. 2301 * 2302 * XXX (dgc): When we have lots of free space, does this buy us 2303 * anything other than extra overhead when we need to put more blocks 2304 * back on the free list? Maybe we should only do this when space is 2305 * getting low or the AGFL is more than half full? 2306 * 2307 * The NOSHRINK flag prevents the AGFL from being shrunk if it's too 2308 * big; the NORMAP flag prevents AGFL expand/shrink operations from 2309 * updating the rmapbt. Both flags are used in xfs_repair while we're 2310 * rebuilding the rmapbt, and neither are used by the kernel. They're 2311 * both required to ensure that rmaps are correctly recorded for the 2312 * regenerated AGFL, bnobt, and cntbt. See repair/phase5.c and 2313 * repair/rmap.c in xfsprogs for details. 2314 */ 2315 memset(&targs, 0, sizeof(targs)); 2316 /* struct copy below */ 2317 if (flags & XFS_ALLOC_FLAG_NORMAP) 2318 targs.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE; 2319 else 2320 targs.oinfo = XFS_RMAP_OINFO_AG; 2321 while (!(flags & XFS_ALLOC_FLAG_NOSHRINK) && pag->pagf_flcount > need) { 2322 error = xfs_alloc_get_freelist(tp, agbp, &bno, 0); 2323 if (error) 2324 goto out_agbp_relse; 2325 2326 /* defer agfl frees */ 2327 xfs_defer_agfl_block(tp, args->agno, bno, &targs.oinfo); 2328 } 2329 2330 targs.tp = tp; 2331 targs.mp = mp; 2332 targs.agbp = agbp; 2333 targs.agno = args->agno; 2334 targs.alignment = targs.minlen = targs.prod = 1; 2335 targs.type = XFS_ALLOCTYPE_THIS_AG; 2336 targs.pag = pag; 2337 error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp); 2338 if (error) 2339 goto out_agbp_relse; 2340 2341 /* Make the freelist longer if it's too short. */ 2342 while (pag->pagf_flcount < need) { 2343 targs.agbno = 0; 2344 targs.maxlen = need - pag->pagf_flcount; 2345 targs.resv = XFS_AG_RESV_AGFL; 2346 2347 /* Allocate as many blocks as possible at once. */ 2348 error = xfs_alloc_ag_vextent(&targs); 2349 if (error) 2350 goto out_agflbp_relse; 2351 2352 /* 2353 * Stop if we run out. Won't happen if callers are obeying 2354 * the restrictions correctly. Can happen for free calls 2355 * on a completely full ag. 2356 */ 2357 if (targs.agbno == NULLAGBLOCK) { 2358 if (flags & XFS_ALLOC_FLAG_FREEING) 2359 break; 2360 goto out_agflbp_relse; 2361 } 2362 /* 2363 * Put each allocated block on the list. 2364 */ 2365 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) { 2366 error = xfs_alloc_put_freelist(tp, agbp, 2367 agflbp, bno, 0); 2368 if (error) 2369 goto out_agflbp_relse; 2370 } 2371 } 2372 xfs_trans_brelse(tp, agflbp); 2373 args->agbp = agbp; 2374 return 0; 2375 2376 out_agflbp_relse: 2377 xfs_trans_brelse(tp, agflbp); 2378 out_agbp_relse: 2379 if (agbp) 2380 xfs_trans_brelse(tp, agbp); 2381 out_no_agbp: 2382 args->agbp = NULL; 2383 return error; 2384 } 2385 2386 /* 2387 * Get a block from the freelist. 2388 * Returns with the buffer for the block gotten. 2389 */ 2390 int /* error */ 2391 xfs_alloc_get_freelist( 2392 xfs_trans_t *tp, /* transaction pointer */ 2393 xfs_buf_t *agbp, /* buffer containing the agf structure */ 2394 xfs_agblock_t *bnop, /* block address retrieved from freelist */ 2395 int btreeblk) /* destination is a AGF btree */ 2396 { 2397 xfs_agf_t *agf; /* a.g. freespace structure */ 2398 xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */ 2399 xfs_agblock_t bno; /* block number returned */ 2400 __be32 *agfl_bno; 2401 int error; 2402 int logflags; 2403 xfs_mount_t *mp = tp->t_mountp; 2404 xfs_perag_t *pag; /* per allocation group data */ 2405 2406 /* 2407 * Freelist is empty, give up. 2408 */ 2409 agf = XFS_BUF_TO_AGF(agbp); 2410 if (!agf->agf_flcount) { 2411 *bnop = NULLAGBLOCK; 2412 return 0; 2413 } 2414 /* 2415 * Read the array of free blocks. 2416 */ 2417 error = xfs_alloc_read_agfl(mp, tp, be32_to_cpu(agf->agf_seqno), 2418 &agflbp); 2419 if (error) 2420 return error; 2421 2422 2423 /* 2424 * Get the block number and update the data structures. 2425 */ 2426 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp); 2427 bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]); 2428 be32_add_cpu(&agf->agf_flfirst, 1); 2429 xfs_trans_brelse(tp, agflbp); 2430 if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp)) 2431 agf->agf_flfirst = 0; 2432 2433 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno)); 2434 ASSERT(!pag->pagf_agflreset); 2435 be32_add_cpu(&agf->agf_flcount, -1); 2436 xfs_trans_agflist_delta(tp, -1); 2437 pag->pagf_flcount--; 2438 2439 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT; 2440 if (btreeblk) { 2441 be32_add_cpu(&agf->agf_btreeblks, 1); 2442 pag->pagf_btreeblks++; 2443 logflags |= XFS_AGF_BTREEBLKS; 2444 } 2445 xfs_perag_put(pag); 2446 2447 xfs_alloc_log_agf(tp, agbp, logflags); 2448 *bnop = bno; 2449 2450 return 0; 2451 } 2452 2453 /* 2454 * Log the given fields from the agf structure. 2455 */ 2456 void 2457 xfs_alloc_log_agf( 2458 xfs_trans_t *tp, /* transaction pointer */ 2459 xfs_buf_t *bp, /* buffer for a.g. freelist header */ 2460 int fields) /* mask of fields to be logged (XFS_AGF_...) */ 2461 { 2462 int first; /* first byte offset */ 2463 int last; /* last byte offset */ 2464 static const short offsets[] = { 2465 offsetof(xfs_agf_t, agf_magicnum), 2466 offsetof(xfs_agf_t, agf_versionnum), 2467 offsetof(xfs_agf_t, agf_seqno), 2468 offsetof(xfs_agf_t, agf_length), 2469 offsetof(xfs_agf_t, agf_roots[0]), 2470 offsetof(xfs_agf_t, agf_levels[0]), 2471 offsetof(xfs_agf_t, agf_flfirst), 2472 offsetof(xfs_agf_t, agf_fllast), 2473 offsetof(xfs_agf_t, agf_flcount), 2474 offsetof(xfs_agf_t, agf_freeblks), 2475 offsetof(xfs_agf_t, agf_longest), 2476 offsetof(xfs_agf_t, agf_btreeblks), 2477 offsetof(xfs_agf_t, agf_uuid), 2478 offsetof(xfs_agf_t, agf_rmap_blocks), 2479 offsetof(xfs_agf_t, agf_refcount_blocks), 2480 offsetof(xfs_agf_t, agf_refcount_root), 2481 offsetof(xfs_agf_t, agf_refcount_level), 2482 /* needed so that we don't log the whole rest of the structure: */ 2483 offsetof(xfs_agf_t, agf_spare64), 2484 sizeof(xfs_agf_t) 2485 }; 2486 2487 trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_); 2488 2489 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF); 2490 2491 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last); 2492 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last); 2493 } 2494 2495 /* 2496 * Interface for inode allocation to force the pag data to be initialized. 2497 */ 2498 int /* error */ 2499 xfs_alloc_pagf_init( 2500 xfs_mount_t *mp, /* file system mount structure */ 2501 xfs_trans_t *tp, /* transaction pointer */ 2502 xfs_agnumber_t agno, /* allocation group number */ 2503 int flags) /* XFS_ALLOC_FLAGS_... */ 2504 { 2505 xfs_buf_t *bp; 2506 int error; 2507 2508 if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp))) 2509 return error; 2510 if (bp) 2511 xfs_trans_brelse(tp, bp); 2512 return 0; 2513 } 2514 2515 /* 2516 * Put the block on the freelist for the allocation group. 2517 */ 2518 int /* error */ 2519 xfs_alloc_put_freelist( 2520 xfs_trans_t *tp, /* transaction pointer */ 2521 xfs_buf_t *agbp, /* buffer for a.g. freelist header */ 2522 xfs_buf_t *agflbp,/* buffer for a.g. free block array */ 2523 xfs_agblock_t bno, /* block being freed */ 2524 int btreeblk) /* block came from a AGF btree */ 2525 { 2526 xfs_agf_t *agf; /* a.g. freespace structure */ 2527 __be32 *blockp;/* pointer to array entry */ 2528 int error; 2529 int logflags; 2530 xfs_mount_t *mp; /* mount structure */ 2531 xfs_perag_t *pag; /* per allocation group data */ 2532 __be32 *agfl_bno; 2533 int startoff; 2534 2535 agf = XFS_BUF_TO_AGF(agbp); 2536 mp = tp->t_mountp; 2537 2538 if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp, 2539 be32_to_cpu(agf->agf_seqno), &agflbp))) 2540 return error; 2541 be32_add_cpu(&agf->agf_fllast, 1); 2542 if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp)) 2543 agf->agf_fllast = 0; 2544 2545 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno)); 2546 ASSERT(!pag->pagf_agflreset); 2547 be32_add_cpu(&agf->agf_flcount, 1); 2548 xfs_trans_agflist_delta(tp, 1); 2549 pag->pagf_flcount++; 2550 2551 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT; 2552 if (btreeblk) { 2553 be32_add_cpu(&agf->agf_btreeblks, -1); 2554 pag->pagf_btreeblks--; 2555 logflags |= XFS_AGF_BTREEBLKS; 2556 } 2557 xfs_perag_put(pag); 2558 2559 xfs_alloc_log_agf(tp, agbp, logflags); 2560 2561 ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)); 2562 2563 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp); 2564 blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)]; 2565 *blockp = cpu_to_be32(bno); 2566 startoff = (char *)blockp - (char *)agflbp->b_addr; 2567 2568 xfs_alloc_log_agf(tp, agbp, logflags); 2569 2570 xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF); 2571 xfs_trans_log_buf(tp, agflbp, startoff, 2572 startoff + sizeof(xfs_agblock_t) - 1); 2573 return 0; 2574 } 2575 2576 static xfs_failaddr_t 2577 xfs_agf_verify( 2578 struct xfs_buf *bp) 2579 { 2580 struct xfs_mount *mp = bp->b_target->bt_mount; 2581 struct xfs_agf *agf = XFS_BUF_TO_AGF(bp); 2582 2583 if (xfs_sb_version_hascrc(&mp->m_sb)) { 2584 if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid)) 2585 return __this_address; 2586 if (!xfs_log_check_lsn(mp, 2587 be64_to_cpu(XFS_BUF_TO_AGF(bp)->agf_lsn))) 2588 return __this_address; 2589 } 2590 2591 if (!xfs_verify_magic(bp, agf->agf_magicnum)) 2592 return __this_address; 2593 2594 if (!(XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) && 2595 be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) && 2596 be32_to_cpu(agf->agf_flfirst) < xfs_agfl_size(mp) && 2597 be32_to_cpu(agf->agf_fllast) < xfs_agfl_size(mp) && 2598 be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp))) 2599 return __this_address; 2600 2601 if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 || 2602 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 || 2603 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS || 2604 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) > XFS_BTREE_MAXLEVELS) 2605 return __this_address; 2606 2607 if (xfs_sb_version_hasrmapbt(&mp->m_sb) && 2608 (be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 || 2609 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS)) 2610 return __this_address; 2611 2612 /* 2613 * during growfs operations, the perag is not fully initialised, 2614 * so we can't use it for any useful checking. growfs ensures we can't 2615 * use it by using uncached buffers that don't have the perag attached 2616 * so we can detect and avoid this problem. 2617 */ 2618 if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno) 2619 return __this_address; 2620 2621 if (xfs_sb_version_haslazysbcount(&mp->m_sb) && 2622 be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length)) 2623 return __this_address; 2624 2625 if (xfs_sb_version_hasreflink(&mp->m_sb) && 2626 (be32_to_cpu(agf->agf_refcount_level) < 1 || 2627 be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS)) 2628 return __this_address; 2629 2630 return NULL; 2631 2632 } 2633 2634 static void 2635 xfs_agf_read_verify( 2636 struct xfs_buf *bp) 2637 { 2638 struct xfs_mount *mp = bp->b_target->bt_mount; 2639 xfs_failaddr_t fa; 2640 2641 if (xfs_sb_version_hascrc(&mp->m_sb) && 2642 !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF)) 2643 xfs_verifier_error(bp, -EFSBADCRC, __this_address); 2644 else { 2645 fa = xfs_agf_verify(bp); 2646 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_ALLOC_READ_AGF)) 2647 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 2648 } 2649 } 2650 2651 static void 2652 xfs_agf_write_verify( 2653 struct xfs_buf *bp) 2654 { 2655 struct xfs_mount *mp = bp->b_target->bt_mount; 2656 struct xfs_buf_log_item *bip = bp->b_log_item; 2657 xfs_failaddr_t fa; 2658 2659 fa = xfs_agf_verify(bp); 2660 if (fa) { 2661 xfs_verifier_error(bp, -EFSCORRUPTED, fa); 2662 return; 2663 } 2664 2665 if (!xfs_sb_version_hascrc(&mp->m_sb)) 2666 return; 2667 2668 if (bip) 2669 XFS_BUF_TO_AGF(bp)->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn); 2670 2671 xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF); 2672 } 2673 2674 const struct xfs_buf_ops xfs_agf_buf_ops = { 2675 .name = "xfs_agf", 2676 .magic = { cpu_to_be32(XFS_AGF_MAGIC), cpu_to_be32(XFS_AGF_MAGIC) }, 2677 .verify_read = xfs_agf_read_verify, 2678 .verify_write = xfs_agf_write_verify, 2679 .verify_struct = xfs_agf_verify, 2680 }; 2681 2682 /* 2683 * Read in the allocation group header (free/alloc section). 2684 */ 2685 int /* error */ 2686 xfs_read_agf( 2687 struct xfs_mount *mp, /* mount point structure */ 2688 struct xfs_trans *tp, /* transaction pointer */ 2689 xfs_agnumber_t agno, /* allocation group number */ 2690 int flags, /* XFS_BUF_ */ 2691 struct xfs_buf **bpp) /* buffer for the ag freelist header */ 2692 { 2693 int error; 2694 2695 trace_xfs_read_agf(mp, agno); 2696 2697 ASSERT(agno != NULLAGNUMBER); 2698 error = xfs_trans_read_buf( 2699 mp, tp, mp->m_ddev_targp, 2700 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)), 2701 XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops); 2702 if (error) 2703 return error; 2704 if (!*bpp) 2705 return 0; 2706 2707 ASSERT(!(*bpp)->b_error); 2708 xfs_buf_set_ref(*bpp, XFS_AGF_REF); 2709 return 0; 2710 } 2711 2712 /* 2713 * Read in the allocation group header (free/alloc section). 2714 */ 2715 int /* error */ 2716 xfs_alloc_read_agf( 2717 struct xfs_mount *mp, /* mount point structure */ 2718 struct xfs_trans *tp, /* transaction pointer */ 2719 xfs_agnumber_t agno, /* allocation group number */ 2720 int flags, /* XFS_ALLOC_FLAG_... */ 2721 struct xfs_buf **bpp) /* buffer for the ag freelist header */ 2722 { 2723 struct xfs_agf *agf; /* ag freelist header */ 2724 struct xfs_perag *pag; /* per allocation group data */ 2725 int error; 2726 2727 trace_xfs_alloc_read_agf(mp, agno); 2728 2729 ASSERT(agno != NULLAGNUMBER); 2730 error = xfs_read_agf(mp, tp, agno, 2731 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0, 2732 bpp); 2733 if (error) 2734 return error; 2735 if (!*bpp) 2736 return 0; 2737 ASSERT(!(*bpp)->b_error); 2738 2739 agf = XFS_BUF_TO_AGF(*bpp); 2740 pag = xfs_perag_get(mp, agno); 2741 if (!pag->pagf_init) { 2742 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks); 2743 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks); 2744 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount); 2745 pag->pagf_longest = be32_to_cpu(agf->agf_longest); 2746 pag->pagf_levels[XFS_BTNUM_BNOi] = 2747 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]); 2748 pag->pagf_levels[XFS_BTNUM_CNTi] = 2749 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]); 2750 pag->pagf_levels[XFS_BTNUM_RMAPi] = 2751 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]); 2752 pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level); 2753 pag->pagf_init = 1; 2754 pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf); 2755 } 2756 #ifdef DEBUG 2757 else if (!XFS_FORCED_SHUTDOWN(mp)) { 2758 ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks)); 2759 ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks)); 2760 ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount)); 2761 ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest)); 2762 ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] == 2763 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi])); 2764 ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] == 2765 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi])); 2766 } 2767 #endif 2768 xfs_perag_put(pag); 2769 return 0; 2770 } 2771 2772 /* 2773 * Allocate an extent (variable-size). 2774 * Depending on the allocation type, we either look in a single allocation 2775 * group or loop over the allocation groups to find the result. 2776 */ 2777 int /* error */ 2778 xfs_alloc_vextent( 2779 struct xfs_alloc_arg *args) /* allocation argument structure */ 2780 { 2781 xfs_agblock_t agsize; /* allocation group size */ 2782 int error; 2783 int flags; /* XFS_ALLOC_FLAG_... locking flags */ 2784 struct xfs_mount *mp; /* mount structure pointer */ 2785 xfs_agnumber_t sagno; /* starting allocation group number */ 2786 xfs_alloctype_t type; /* input allocation type */ 2787 int bump_rotor = 0; 2788 xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */ 2789 2790 mp = args->mp; 2791 type = args->otype = args->type; 2792 args->agbno = NULLAGBLOCK; 2793 /* 2794 * Just fix this up, for the case where the last a.g. is shorter 2795 * (or there's only one a.g.) and the caller couldn't easily figure 2796 * that out (xfs_bmap_alloc). 2797 */ 2798 agsize = mp->m_sb.sb_agblocks; 2799 if (args->maxlen > agsize) 2800 args->maxlen = agsize; 2801 if (args->alignment == 0) 2802 args->alignment = 1; 2803 ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount); 2804 ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize); 2805 ASSERT(args->minlen <= args->maxlen); 2806 ASSERT(args->minlen <= agsize); 2807 ASSERT(args->mod < args->prod); 2808 if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount || 2809 XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize || 2810 args->minlen > args->maxlen || args->minlen > agsize || 2811 args->mod >= args->prod) { 2812 args->fsbno = NULLFSBLOCK; 2813 trace_xfs_alloc_vextent_badargs(args); 2814 return 0; 2815 } 2816 2817 switch (type) { 2818 case XFS_ALLOCTYPE_THIS_AG: 2819 case XFS_ALLOCTYPE_NEAR_BNO: 2820 case XFS_ALLOCTYPE_THIS_BNO: 2821 /* 2822 * These three force us into a single a.g. 2823 */ 2824 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno); 2825 args->pag = xfs_perag_get(mp, args->agno); 2826 error = xfs_alloc_fix_freelist(args, 0); 2827 if (error) { 2828 trace_xfs_alloc_vextent_nofix(args); 2829 goto error0; 2830 } 2831 if (!args->agbp) { 2832 trace_xfs_alloc_vextent_noagbp(args); 2833 break; 2834 } 2835 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno); 2836 if ((error = xfs_alloc_ag_vextent(args))) 2837 goto error0; 2838 break; 2839 case XFS_ALLOCTYPE_START_BNO: 2840 /* 2841 * Try near allocation first, then anywhere-in-ag after 2842 * the first a.g. fails. 2843 */ 2844 if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) && 2845 (mp->m_flags & XFS_MOUNT_32BITINODES)) { 2846 args->fsbno = XFS_AGB_TO_FSB(mp, 2847 ((mp->m_agfrotor / rotorstep) % 2848 mp->m_sb.sb_agcount), 0); 2849 bump_rotor = 1; 2850 } 2851 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno); 2852 args->type = XFS_ALLOCTYPE_NEAR_BNO; 2853 /* FALLTHROUGH */ 2854 case XFS_ALLOCTYPE_FIRST_AG: 2855 /* 2856 * Rotate through the allocation groups looking for a winner. 2857 */ 2858 if (type == XFS_ALLOCTYPE_FIRST_AG) { 2859 /* 2860 * Start with allocation group given by bno. 2861 */ 2862 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno); 2863 args->type = XFS_ALLOCTYPE_THIS_AG; 2864 sagno = 0; 2865 flags = 0; 2866 } else { 2867 /* 2868 * Start with the given allocation group. 2869 */ 2870 args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno); 2871 flags = XFS_ALLOC_FLAG_TRYLOCK; 2872 } 2873 /* 2874 * Loop over allocation groups twice; first time with 2875 * trylock set, second time without. 2876 */ 2877 for (;;) { 2878 args->pag = xfs_perag_get(mp, args->agno); 2879 error = xfs_alloc_fix_freelist(args, flags); 2880 if (error) { 2881 trace_xfs_alloc_vextent_nofix(args); 2882 goto error0; 2883 } 2884 /* 2885 * If we get a buffer back then the allocation will fly. 2886 */ 2887 if (args->agbp) { 2888 if ((error = xfs_alloc_ag_vextent(args))) 2889 goto error0; 2890 break; 2891 } 2892 2893 trace_xfs_alloc_vextent_loopfailed(args); 2894 2895 /* 2896 * Didn't work, figure out the next iteration. 2897 */ 2898 if (args->agno == sagno && 2899 type == XFS_ALLOCTYPE_START_BNO) 2900 args->type = XFS_ALLOCTYPE_THIS_AG; 2901 /* 2902 * For the first allocation, we can try any AG to get 2903 * space. However, if we already have allocated a 2904 * block, we don't want to try AGs whose number is below 2905 * sagno. Otherwise, we may end up with out-of-order 2906 * locking of AGF, which might cause deadlock. 2907 */ 2908 if (++(args->agno) == mp->m_sb.sb_agcount) { 2909 if (args->tp->t_firstblock != NULLFSBLOCK) 2910 args->agno = sagno; 2911 else 2912 args->agno = 0; 2913 } 2914 /* 2915 * Reached the starting a.g., must either be done 2916 * or switch to non-trylock mode. 2917 */ 2918 if (args->agno == sagno) { 2919 if (flags == 0) { 2920 args->agbno = NULLAGBLOCK; 2921 trace_xfs_alloc_vextent_allfailed(args); 2922 break; 2923 } 2924 2925 flags = 0; 2926 if (type == XFS_ALLOCTYPE_START_BNO) { 2927 args->agbno = XFS_FSB_TO_AGBNO(mp, 2928 args->fsbno); 2929 args->type = XFS_ALLOCTYPE_NEAR_BNO; 2930 } 2931 } 2932 xfs_perag_put(args->pag); 2933 } 2934 if (bump_rotor) { 2935 if (args->agno == sagno) 2936 mp->m_agfrotor = (mp->m_agfrotor + 1) % 2937 (mp->m_sb.sb_agcount * rotorstep); 2938 else 2939 mp->m_agfrotor = (args->agno * rotorstep + 1) % 2940 (mp->m_sb.sb_agcount * rotorstep); 2941 } 2942 break; 2943 default: 2944 ASSERT(0); 2945 /* NOTREACHED */ 2946 } 2947 if (args->agbno == NULLAGBLOCK) 2948 args->fsbno = NULLFSBLOCK; 2949 else { 2950 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno); 2951 #ifdef DEBUG 2952 ASSERT(args->len >= args->minlen); 2953 ASSERT(args->len <= args->maxlen); 2954 ASSERT(args->agbno % args->alignment == 0); 2955 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno), 2956 args->len); 2957 #endif 2958 2959 /* Zero the extent if we were asked to do so */ 2960 if (args->datatype & XFS_ALLOC_USERDATA_ZERO) { 2961 error = xfs_zero_extent(args->ip, args->fsbno, args->len); 2962 if (error) 2963 goto error0; 2964 } 2965 2966 } 2967 xfs_perag_put(args->pag); 2968 return 0; 2969 error0: 2970 xfs_perag_put(args->pag); 2971 return error; 2972 } 2973 2974 /* Ensure that the freelist is at full capacity. */ 2975 int 2976 xfs_free_extent_fix_freelist( 2977 struct xfs_trans *tp, 2978 xfs_agnumber_t agno, 2979 struct xfs_buf **agbp) 2980 { 2981 struct xfs_alloc_arg args; 2982 int error; 2983 2984 memset(&args, 0, sizeof(struct xfs_alloc_arg)); 2985 args.tp = tp; 2986 args.mp = tp->t_mountp; 2987 args.agno = agno; 2988 2989 /* 2990 * validate that the block number is legal - the enables us to detect 2991 * and handle a silent filesystem corruption rather than crashing. 2992 */ 2993 if (args.agno >= args.mp->m_sb.sb_agcount) 2994 return -EFSCORRUPTED; 2995 2996 args.pag = xfs_perag_get(args.mp, args.agno); 2997 ASSERT(args.pag); 2998 2999 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING); 3000 if (error) 3001 goto out; 3002 3003 *agbp = args.agbp; 3004 out: 3005 xfs_perag_put(args.pag); 3006 return error; 3007 } 3008 3009 /* 3010 * Free an extent. 3011 * Just break up the extent address and hand off to xfs_free_ag_extent 3012 * after fixing up the freelist. 3013 */ 3014 int 3015 __xfs_free_extent( 3016 struct xfs_trans *tp, 3017 xfs_fsblock_t bno, 3018 xfs_extlen_t len, 3019 const struct xfs_owner_info *oinfo, 3020 enum xfs_ag_resv_type type, 3021 bool skip_discard) 3022 { 3023 struct xfs_mount *mp = tp->t_mountp; 3024 struct xfs_buf *agbp; 3025 xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, bno); 3026 xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, bno); 3027 int error; 3028 unsigned int busy_flags = 0; 3029 3030 ASSERT(len != 0); 3031 ASSERT(type != XFS_AG_RESV_AGFL); 3032 3033 if (XFS_TEST_ERROR(false, mp, 3034 XFS_ERRTAG_FREE_EXTENT)) 3035 return -EIO; 3036 3037 error = xfs_free_extent_fix_freelist(tp, agno, &agbp); 3038 if (error) 3039 return error; 3040 3041 XFS_WANT_CORRUPTED_GOTO(mp, agbno < mp->m_sb.sb_agblocks, err); 3042 3043 /* validate the extent size is legal now we have the agf locked */ 3044 XFS_WANT_CORRUPTED_GOTO(mp, 3045 agbno + len <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_length), 3046 err); 3047 3048 error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, type); 3049 if (error) 3050 goto err; 3051 3052 if (skip_discard) 3053 busy_flags |= XFS_EXTENT_BUSY_SKIP_DISCARD; 3054 xfs_extent_busy_insert(tp, agno, agbno, len, busy_flags); 3055 return 0; 3056 3057 err: 3058 xfs_trans_brelse(tp, agbp); 3059 return error; 3060 } 3061 3062 struct xfs_alloc_query_range_info { 3063 xfs_alloc_query_range_fn fn; 3064 void *priv; 3065 }; 3066 3067 /* Format btree record and pass to our callback. */ 3068 STATIC int 3069 xfs_alloc_query_range_helper( 3070 struct xfs_btree_cur *cur, 3071 union xfs_btree_rec *rec, 3072 void *priv) 3073 { 3074 struct xfs_alloc_query_range_info *query = priv; 3075 struct xfs_alloc_rec_incore irec; 3076 3077 irec.ar_startblock = be32_to_cpu(rec->alloc.ar_startblock); 3078 irec.ar_blockcount = be32_to_cpu(rec->alloc.ar_blockcount); 3079 return query->fn(cur, &irec, query->priv); 3080 } 3081 3082 /* Find all free space within a given range of blocks. */ 3083 int 3084 xfs_alloc_query_range( 3085 struct xfs_btree_cur *cur, 3086 struct xfs_alloc_rec_incore *low_rec, 3087 struct xfs_alloc_rec_incore *high_rec, 3088 xfs_alloc_query_range_fn fn, 3089 void *priv) 3090 { 3091 union xfs_btree_irec low_brec; 3092 union xfs_btree_irec high_brec; 3093 struct xfs_alloc_query_range_info query; 3094 3095 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO); 3096 low_brec.a = *low_rec; 3097 high_brec.a = *high_rec; 3098 query.priv = priv; 3099 query.fn = fn; 3100 return xfs_btree_query_range(cur, &low_brec, &high_brec, 3101 xfs_alloc_query_range_helper, &query); 3102 } 3103 3104 /* Find all free space records. */ 3105 int 3106 xfs_alloc_query_all( 3107 struct xfs_btree_cur *cur, 3108 xfs_alloc_query_range_fn fn, 3109 void *priv) 3110 { 3111 struct xfs_alloc_query_range_info query; 3112 3113 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO); 3114 query.priv = priv; 3115 query.fn = fn; 3116 return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query); 3117 } 3118 3119 /* Is there a record covering a given extent? */ 3120 int 3121 xfs_alloc_has_record( 3122 struct xfs_btree_cur *cur, 3123 xfs_agblock_t bno, 3124 xfs_extlen_t len, 3125 bool *exists) 3126 { 3127 union xfs_btree_irec low; 3128 union xfs_btree_irec high; 3129 3130 memset(&low, 0, sizeof(low)); 3131 low.a.ar_startblock = bno; 3132 memset(&high, 0xFF, sizeof(high)); 3133 high.a.ar_startblock = bno + len - 1; 3134 3135 return xfs_btree_has_record(cur, &low, &high, exists); 3136 } 3137 3138 /* 3139 * Walk all the blocks in the AGFL. The @walk_fn can return any negative 3140 * error code or XFS_BTREE_QUERY_RANGE_ABORT. 3141 */ 3142 int 3143 xfs_agfl_walk( 3144 struct xfs_mount *mp, 3145 struct xfs_agf *agf, 3146 struct xfs_buf *agflbp, 3147 xfs_agfl_walk_fn walk_fn, 3148 void *priv) 3149 { 3150 __be32 *agfl_bno; 3151 unsigned int i; 3152 int error; 3153 3154 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp); 3155 i = be32_to_cpu(agf->agf_flfirst); 3156 3157 /* Nothing to walk in an empty AGFL. */ 3158 if (agf->agf_flcount == cpu_to_be32(0)) 3159 return 0; 3160 3161 /* Otherwise, walk from first to last, wrapping as needed. */ 3162 for (;;) { 3163 error = walk_fn(mp, be32_to_cpu(agfl_bno[i]), priv); 3164 if (error) 3165 return error; 3166 if (i == be32_to_cpu(agf->agf_fllast)) 3167 break; 3168 if (++i == xfs_agfl_size(mp)) 3169 i = 0; 3170 } 3171 3172 return 0; 3173 } 3174