1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * Copyright (c) 2018 Red Hat, Inc. 5 * All rights reserved. 6 */ 7 8 #include "xfs.h" 9 #include "xfs_fs.h" 10 #include "xfs_shared.h" 11 #include "xfs_format.h" 12 #include "xfs_trans_resv.h" 13 #include "xfs_bit.h" 14 #include "xfs_sb.h" 15 #include "xfs_mount.h" 16 #include "xfs_btree.h" 17 #include "xfs_alloc_btree.h" 18 #include "xfs_rmap_btree.h" 19 #include "xfs_alloc.h" 20 #include "xfs_ialloc.h" 21 #include "xfs_rmap.h" 22 #include "xfs_ag.h" 23 #include "xfs_ag_resv.h" 24 #include "xfs_health.h" 25 #include "xfs_error.h" 26 #include "xfs_bmap.h" 27 #include "xfs_defer.h" 28 #include "xfs_log_format.h" 29 #include "xfs_trans.h" 30 #include "xfs_trace.h" 31 #include "xfs_inode.h" 32 #include "xfs_icache.h" 33 34 35 /* 36 * Passive reference counting access wrappers to the perag structures. If the 37 * per-ag structure is to be freed, the freeing code is responsible for cleaning 38 * up objects with passive references before freeing the structure. This is 39 * things like cached buffers. 40 */ 41 struct xfs_perag * 42 xfs_perag_get( 43 struct xfs_mount *mp, 44 xfs_agnumber_t agno) 45 { 46 struct xfs_perag *pag; 47 48 rcu_read_lock(); 49 pag = xa_load(&mp->m_perags, agno); 50 if (pag) { 51 trace_xfs_perag_get(pag, _RET_IP_); 52 ASSERT(atomic_read(&pag->pag_ref) >= 0); 53 atomic_inc(&pag->pag_ref); 54 } 55 rcu_read_unlock(); 56 return pag; 57 } 58 59 /* Get a passive reference to the given perag. */ 60 struct xfs_perag * 61 xfs_perag_hold( 62 struct xfs_perag *pag) 63 { 64 ASSERT(atomic_read(&pag->pag_ref) > 0 || 65 atomic_read(&pag->pag_active_ref) > 0); 66 67 trace_xfs_perag_hold(pag, _RET_IP_); 68 atomic_inc(&pag->pag_ref); 69 return pag; 70 } 71 72 void 73 xfs_perag_put( 74 struct xfs_perag *pag) 75 { 76 trace_xfs_perag_put(pag, _RET_IP_); 77 ASSERT(atomic_read(&pag->pag_ref) > 0); 78 atomic_dec(&pag->pag_ref); 79 } 80 81 /* 82 * Active references for perag structures. This is for short term access to the 83 * per ag structures for walking trees or accessing state. If an AG is being 84 * shrunk or is offline, then this will fail to find that AG and return NULL 85 * instead. 86 */ 87 struct xfs_perag * 88 xfs_perag_grab( 89 struct xfs_mount *mp, 90 xfs_agnumber_t agno) 91 { 92 struct xfs_perag *pag; 93 94 rcu_read_lock(); 95 pag = xa_load(&mp->m_perags, agno); 96 if (pag) { 97 trace_xfs_perag_grab(pag, _RET_IP_); 98 if (!atomic_inc_not_zero(&pag->pag_active_ref)) 99 pag = NULL; 100 } 101 rcu_read_unlock(); 102 return pag; 103 } 104 105 void 106 xfs_perag_rele( 107 struct xfs_perag *pag) 108 { 109 trace_xfs_perag_rele(pag, _RET_IP_); 110 if (atomic_dec_and_test(&pag->pag_active_ref)) 111 wake_up(&pag->pag_active_wq); 112 } 113 114 /* 115 * xfs_initialize_perag_data 116 * 117 * Read in each per-ag structure so we can count up the number of 118 * allocated inodes, free inodes and used filesystem blocks as this 119 * information is no longer persistent in the superblock. Once we have 120 * this information, write it into the in-core superblock structure. 121 */ 122 int 123 xfs_initialize_perag_data( 124 struct xfs_mount *mp, 125 xfs_agnumber_t agcount) 126 { 127 xfs_agnumber_t index; 128 struct xfs_perag *pag; 129 struct xfs_sb *sbp = &mp->m_sb; 130 uint64_t ifree = 0; 131 uint64_t ialloc = 0; 132 uint64_t bfree = 0; 133 uint64_t bfreelst = 0; 134 uint64_t btree = 0; 135 uint64_t fdblocks; 136 int error = 0; 137 138 for (index = 0; index < agcount; index++) { 139 /* 140 * Read the AGF and AGI buffers to populate the per-ag 141 * structures for us. 142 */ 143 pag = xfs_perag_get(mp, index); 144 error = xfs_alloc_read_agf(pag, NULL, 0, NULL); 145 if (!error) 146 error = xfs_ialloc_read_agi(pag, NULL, 0, NULL); 147 if (error) { 148 xfs_perag_put(pag); 149 return error; 150 } 151 152 ifree += pag->pagi_freecount; 153 ialloc += pag->pagi_count; 154 bfree += pag->pagf_freeblks; 155 bfreelst += pag->pagf_flcount; 156 btree += pag->pagf_btreeblks; 157 xfs_perag_put(pag); 158 } 159 fdblocks = bfree + bfreelst + btree; 160 161 /* 162 * If the new summary counts are obviously incorrect, fail the 163 * mount operation because that implies the AGFs are also corrupt. 164 * Clear FS_COUNTERS so that we don't unmount with a dirty log, which 165 * will prevent xfs_repair from fixing anything. 166 */ 167 if (fdblocks > sbp->sb_dblocks || ifree > ialloc) { 168 xfs_alert(mp, "AGF corruption. Please run xfs_repair."); 169 xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS); 170 error = -EFSCORRUPTED; 171 goto out; 172 } 173 174 /* Overwrite incore superblock counters with just-read data */ 175 spin_lock(&mp->m_sb_lock); 176 sbp->sb_ifree = ifree; 177 sbp->sb_icount = ialloc; 178 sbp->sb_fdblocks = fdblocks; 179 spin_unlock(&mp->m_sb_lock); 180 181 xfs_reinit_percpu_counters(mp); 182 out: 183 xfs_fs_mark_healthy(mp, XFS_SICK_FS_COUNTERS); 184 return error; 185 } 186 187 /* 188 * Free up the per-ag resources associated with the mount structure. 189 */ 190 void 191 xfs_free_perag( 192 struct xfs_mount *mp) 193 { 194 struct xfs_perag *pag; 195 xfs_agnumber_t agno; 196 197 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 198 pag = xa_erase(&mp->m_perags, agno); 199 ASSERT(pag); 200 XFS_IS_CORRUPT(pag->pag_mount, atomic_read(&pag->pag_ref) != 0); 201 xfs_defer_drain_free(&pag->pag_intents_drain); 202 203 cancel_delayed_work_sync(&pag->pag_blockgc_work); 204 xfs_buf_cache_destroy(&pag->pag_bcache); 205 206 /* drop the mount's active reference */ 207 xfs_perag_rele(pag); 208 XFS_IS_CORRUPT(pag->pag_mount, 209 atomic_read(&pag->pag_active_ref) != 0); 210 kfree_rcu_mightsleep(pag); 211 } 212 } 213 214 /* Find the size of the AG, in blocks. */ 215 static xfs_agblock_t 216 __xfs_ag_block_count( 217 struct xfs_mount *mp, 218 xfs_agnumber_t agno, 219 xfs_agnumber_t agcount, 220 xfs_rfsblock_t dblocks) 221 { 222 ASSERT(agno < agcount); 223 224 if (agno < agcount - 1) 225 return mp->m_sb.sb_agblocks; 226 return dblocks - (agno * mp->m_sb.sb_agblocks); 227 } 228 229 xfs_agblock_t 230 xfs_ag_block_count( 231 struct xfs_mount *mp, 232 xfs_agnumber_t agno) 233 { 234 return __xfs_ag_block_count(mp, agno, mp->m_sb.sb_agcount, 235 mp->m_sb.sb_dblocks); 236 } 237 238 /* Calculate the first and last possible inode number in an AG. */ 239 static void 240 __xfs_agino_range( 241 struct xfs_mount *mp, 242 xfs_agblock_t eoag, 243 xfs_agino_t *first, 244 xfs_agino_t *last) 245 { 246 xfs_agblock_t bno; 247 248 /* 249 * Calculate the first inode, which will be in the first 250 * cluster-aligned block after the AGFL. 251 */ 252 bno = round_up(XFS_AGFL_BLOCK(mp) + 1, M_IGEO(mp)->cluster_align); 253 *first = XFS_AGB_TO_AGINO(mp, bno); 254 255 /* 256 * Calculate the last inode, which will be at the end of the 257 * last (aligned) cluster that can be allocated in the AG. 258 */ 259 bno = round_down(eoag, M_IGEO(mp)->cluster_align); 260 *last = XFS_AGB_TO_AGINO(mp, bno) - 1; 261 } 262 263 void 264 xfs_agino_range( 265 struct xfs_mount *mp, 266 xfs_agnumber_t agno, 267 xfs_agino_t *first, 268 xfs_agino_t *last) 269 { 270 return __xfs_agino_range(mp, xfs_ag_block_count(mp, agno), first, last); 271 } 272 273 /* 274 * Free perag within the specified AG range, it is only used to free unused 275 * perags under the error handling path. 276 */ 277 void 278 xfs_free_unused_perag_range( 279 struct xfs_mount *mp, 280 xfs_agnumber_t agstart, 281 xfs_agnumber_t agend) 282 { 283 struct xfs_perag *pag; 284 xfs_agnumber_t index; 285 286 for (index = agstart; index < agend; index++) { 287 pag = xa_erase(&mp->m_perags, index); 288 if (!pag) 289 break; 290 xfs_buf_cache_destroy(&pag->pag_bcache); 291 xfs_defer_drain_free(&pag->pag_intents_drain); 292 kfree(pag); 293 } 294 } 295 296 int 297 xfs_initialize_perag( 298 struct xfs_mount *mp, 299 xfs_agnumber_t agcount, 300 xfs_rfsblock_t dblocks, 301 xfs_agnumber_t *maxagi) 302 { 303 struct xfs_perag *pag; 304 xfs_agnumber_t index; 305 xfs_agnumber_t first_initialised = NULLAGNUMBER; 306 int error; 307 308 /* 309 * Walk the current per-ag tree so we don't try to initialise AGs 310 * that already exist (growfs case). Allocate and insert all the 311 * AGs we don't find ready for initialisation. 312 */ 313 for (index = 0; index < agcount; index++) { 314 pag = xfs_perag_get(mp, index); 315 if (pag) { 316 xfs_perag_put(pag); 317 continue; 318 } 319 320 pag = kzalloc(sizeof(*pag), GFP_KERNEL | __GFP_RETRY_MAYFAIL); 321 if (!pag) { 322 error = -ENOMEM; 323 goto out_unwind_new_pags; 324 } 325 pag->pag_agno = index; 326 pag->pag_mount = mp; 327 328 error = xa_insert(&mp->m_perags, index, pag, GFP_KERNEL); 329 if (error) { 330 WARN_ON_ONCE(error == -EBUSY); 331 goto out_free_pag; 332 } 333 334 #ifdef __KERNEL__ 335 /* Place kernel structure only init below this point. */ 336 spin_lock_init(&pag->pag_ici_lock); 337 spin_lock_init(&pag->pagb_lock); 338 spin_lock_init(&pag->pag_state_lock); 339 INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker); 340 INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); 341 xfs_defer_drain_init(&pag->pag_intents_drain); 342 init_waitqueue_head(&pag->pagb_wait); 343 init_waitqueue_head(&pag->pag_active_wq); 344 pag->pagb_count = 0; 345 pag->pagb_tree = RB_ROOT; 346 xfs_hooks_init(&pag->pag_rmap_update_hooks); 347 #endif /* __KERNEL__ */ 348 349 error = xfs_buf_cache_init(&pag->pag_bcache); 350 if (error) 351 goto out_remove_pag; 352 353 /* Active ref owned by mount indicates AG is online. */ 354 atomic_set(&pag->pag_active_ref, 1); 355 356 /* first new pag is fully initialized */ 357 if (first_initialised == NULLAGNUMBER) 358 first_initialised = index; 359 360 /* 361 * Pre-calculated geometry 362 */ 363 pag->block_count = __xfs_ag_block_count(mp, index, agcount, 364 dblocks); 365 pag->min_block = XFS_AGFL_BLOCK(mp); 366 __xfs_agino_range(mp, pag->block_count, &pag->agino_min, 367 &pag->agino_max); 368 } 369 370 index = xfs_set_inode_alloc(mp, agcount); 371 372 if (maxagi) 373 *maxagi = index; 374 375 mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp); 376 return 0; 377 378 out_remove_pag: 379 xfs_defer_drain_free(&pag->pag_intents_drain); 380 pag = xa_erase(&mp->m_perags, index); 381 out_free_pag: 382 kfree(pag); 383 out_unwind_new_pags: 384 /* unwind any prior newly initialized pags */ 385 xfs_free_unused_perag_range(mp, first_initialised, agcount); 386 return error; 387 } 388 389 static int 390 xfs_get_aghdr_buf( 391 struct xfs_mount *mp, 392 xfs_daddr_t blkno, 393 size_t numblks, 394 struct xfs_buf **bpp, 395 const struct xfs_buf_ops *ops) 396 { 397 struct xfs_buf *bp; 398 int error; 399 400 error = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, 0, &bp); 401 if (error) 402 return error; 403 404 bp->b_maps[0].bm_bn = blkno; 405 bp->b_ops = ops; 406 407 *bpp = bp; 408 return 0; 409 } 410 411 /* 412 * Generic btree root block init function 413 */ 414 static void 415 xfs_btroot_init( 416 struct xfs_mount *mp, 417 struct xfs_buf *bp, 418 struct aghdr_init_data *id) 419 { 420 xfs_btree_init_buf(mp, bp, id->bc_ops, 0, 0, id->agno); 421 } 422 423 /* Finish initializing a free space btree. */ 424 static void 425 xfs_freesp_init_recs( 426 struct xfs_mount *mp, 427 struct xfs_buf *bp, 428 struct aghdr_init_data *id) 429 { 430 struct xfs_alloc_rec *arec; 431 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 432 433 arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1); 434 arec->ar_startblock = cpu_to_be32(mp->m_ag_prealloc_blocks); 435 436 if (xfs_ag_contains_log(mp, id->agno)) { 437 struct xfs_alloc_rec *nrec; 438 xfs_agblock_t start = XFS_FSB_TO_AGBNO(mp, 439 mp->m_sb.sb_logstart); 440 441 ASSERT(start >= mp->m_ag_prealloc_blocks); 442 if (start != mp->m_ag_prealloc_blocks) { 443 /* 444 * Modify first record to pad stripe align of log and 445 * bump the record count. 446 */ 447 arec->ar_blockcount = cpu_to_be32(start - 448 mp->m_ag_prealloc_blocks); 449 be16_add_cpu(&block->bb_numrecs, 1); 450 nrec = arec + 1; 451 452 /* 453 * Insert second record at start of internal log 454 * which then gets trimmed. 455 */ 456 nrec->ar_startblock = cpu_to_be32( 457 be32_to_cpu(arec->ar_startblock) + 458 be32_to_cpu(arec->ar_blockcount)); 459 arec = nrec; 460 } 461 /* 462 * Change record start to after the internal log 463 */ 464 be32_add_cpu(&arec->ar_startblock, mp->m_sb.sb_logblocks); 465 } 466 467 /* 468 * Calculate the block count of this record; if it is nonzero, 469 * increment the record count. 470 */ 471 arec->ar_blockcount = cpu_to_be32(id->agsize - 472 be32_to_cpu(arec->ar_startblock)); 473 if (arec->ar_blockcount) 474 be16_add_cpu(&block->bb_numrecs, 1); 475 } 476 477 /* 478 * bnobt/cntbt btree root block init functions 479 */ 480 static void 481 xfs_bnoroot_init( 482 struct xfs_mount *mp, 483 struct xfs_buf *bp, 484 struct aghdr_init_data *id) 485 { 486 xfs_btree_init_buf(mp, bp, id->bc_ops, 0, 0, id->agno); 487 xfs_freesp_init_recs(mp, bp, id); 488 } 489 490 /* 491 * Reverse map root block init 492 */ 493 static void 494 xfs_rmaproot_init( 495 struct xfs_mount *mp, 496 struct xfs_buf *bp, 497 struct aghdr_init_data *id) 498 { 499 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); 500 struct xfs_rmap_rec *rrec; 501 502 xfs_btree_init_buf(mp, bp, id->bc_ops, 0, 4, id->agno); 503 504 /* 505 * mark the AG header regions as static metadata The BNO 506 * btree block is the first block after the headers, so 507 * it's location defines the size of region the static 508 * metadata consumes. 509 * 510 * Note: unlike mkfs, we never have to account for log 511 * space when growing the data regions 512 */ 513 rrec = XFS_RMAP_REC_ADDR(block, 1); 514 rrec->rm_startblock = 0; 515 rrec->rm_blockcount = cpu_to_be32(XFS_BNO_BLOCK(mp)); 516 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_FS); 517 rrec->rm_offset = 0; 518 519 /* account freespace btree root blocks */ 520 rrec = XFS_RMAP_REC_ADDR(block, 2); 521 rrec->rm_startblock = cpu_to_be32(XFS_BNO_BLOCK(mp)); 522 rrec->rm_blockcount = cpu_to_be32(2); 523 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG); 524 rrec->rm_offset = 0; 525 526 /* account inode btree root blocks */ 527 rrec = XFS_RMAP_REC_ADDR(block, 3); 528 rrec->rm_startblock = cpu_to_be32(XFS_IBT_BLOCK(mp)); 529 rrec->rm_blockcount = cpu_to_be32(XFS_RMAP_BLOCK(mp) - 530 XFS_IBT_BLOCK(mp)); 531 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_INOBT); 532 rrec->rm_offset = 0; 533 534 /* account for rmap btree root */ 535 rrec = XFS_RMAP_REC_ADDR(block, 4); 536 rrec->rm_startblock = cpu_to_be32(XFS_RMAP_BLOCK(mp)); 537 rrec->rm_blockcount = cpu_to_be32(1); 538 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG); 539 rrec->rm_offset = 0; 540 541 /* account for refc btree root */ 542 if (xfs_has_reflink(mp)) { 543 rrec = XFS_RMAP_REC_ADDR(block, 5); 544 rrec->rm_startblock = cpu_to_be32(xfs_refc_block(mp)); 545 rrec->rm_blockcount = cpu_to_be32(1); 546 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_REFC); 547 rrec->rm_offset = 0; 548 be16_add_cpu(&block->bb_numrecs, 1); 549 } 550 551 /* account for the log space */ 552 if (xfs_ag_contains_log(mp, id->agno)) { 553 rrec = XFS_RMAP_REC_ADDR(block, 554 be16_to_cpu(block->bb_numrecs) + 1); 555 rrec->rm_startblock = cpu_to_be32( 556 XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart)); 557 rrec->rm_blockcount = cpu_to_be32(mp->m_sb.sb_logblocks); 558 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_LOG); 559 rrec->rm_offset = 0; 560 be16_add_cpu(&block->bb_numrecs, 1); 561 } 562 } 563 564 /* 565 * Initialise new secondary superblocks with the pre-grow geometry, but mark 566 * them as "in progress" so we know they haven't yet been activated. This will 567 * get cleared when the update with the new geometry information is done after 568 * changes to the primary are committed. This isn't strictly necessary, but we 569 * get it for free with the delayed buffer write lists and it means we can tell 570 * if a grow operation didn't complete properly after the fact. 571 */ 572 static void 573 xfs_sbblock_init( 574 struct xfs_mount *mp, 575 struct xfs_buf *bp, 576 struct aghdr_init_data *id) 577 { 578 struct xfs_dsb *dsb = bp->b_addr; 579 580 xfs_sb_to_disk(dsb, &mp->m_sb); 581 dsb->sb_inprogress = 1; 582 } 583 584 static void 585 xfs_agfblock_init( 586 struct xfs_mount *mp, 587 struct xfs_buf *bp, 588 struct aghdr_init_data *id) 589 { 590 struct xfs_agf *agf = bp->b_addr; 591 xfs_extlen_t tmpsize; 592 593 agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC); 594 agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION); 595 agf->agf_seqno = cpu_to_be32(id->agno); 596 agf->agf_length = cpu_to_be32(id->agsize); 597 agf->agf_bno_root = cpu_to_be32(XFS_BNO_BLOCK(mp)); 598 agf->agf_cnt_root = cpu_to_be32(XFS_CNT_BLOCK(mp)); 599 agf->agf_bno_level = cpu_to_be32(1); 600 agf->agf_cnt_level = cpu_to_be32(1); 601 if (xfs_has_rmapbt(mp)) { 602 agf->agf_rmap_root = cpu_to_be32(XFS_RMAP_BLOCK(mp)); 603 agf->agf_rmap_level = cpu_to_be32(1); 604 agf->agf_rmap_blocks = cpu_to_be32(1); 605 } 606 607 agf->agf_flfirst = cpu_to_be32(1); 608 agf->agf_fllast = 0; 609 agf->agf_flcount = 0; 610 tmpsize = id->agsize - mp->m_ag_prealloc_blocks; 611 agf->agf_freeblks = cpu_to_be32(tmpsize); 612 agf->agf_longest = cpu_to_be32(tmpsize); 613 if (xfs_has_crc(mp)) 614 uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid); 615 if (xfs_has_reflink(mp)) { 616 agf->agf_refcount_root = cpu_to_be32( 617 xfs_refc_block(mp)); 618 agf->agf_refcount_level = cpu_to_be32(1); 619 agf->agf_refcount_blocks = cpu_to_be32(1); 620 } 621 622 if (xfs_ag_contains_log(mp, id->agno)) { 623 int64_t logblocks = mp->m_sb.sb_logblocks; 624 625 be32_add_cpu(&agf->agf_freeblks, -logblocks); 626 agf->agf_longest = cpu_to_be32(id->agsize - 627 XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart) - logblocks); 628 } 629 } 630 631 static void 632 xfs_agflblock_init( 633 struct xfs_mount *mp, 634 struct xfs_buf *bp, 635 struct aghdr_init_data *id) 636 { 637 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp); 638 __be32 *agfl_bno; 639 int bucket; 640 641 if (xfs_has_crc(mp)) { 642 agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC); 643 agfl->agfl_seqno = cpu_to_be32(id->agno); 644 uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid); 645 } 646 647 agfl_bno = xfs_buf_to_agfl_bno(bp); 648 for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++) 649 agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK); 650 } 651 652 static void 653 xfs_agiblock_init( 654 struct xfs_mount *mp, 655 struct xfs_buf *bp, 656 struct aghdr_init_data *id) 657 { 658 struct xfs_agi *agi = bp->b_addr; 659 int bucket; 660 661 agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC); 662 agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION); 663 agi->agi_seqno = cpu_to_be32(id->agno); 664 agi->agi_length = cpu_to_be32(id->agsize); 665 agi->agi_count = 0; 666 agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp)); 667 agi->agi_level = cpu_to_be32(1); 668 agi->agi_freecount = 0; 669 agi->agi_newino = cpu_to_be32(NULLAGINO); 670 agi->agi_dirino = cpu_to_be32(NULLAGINO); 671 if (xfs_has_crc(mp)) 672 uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid); 673 if (xfs_has_finobt(mp)) { 674 agi->agi_free_root = cpu_to_be32(XFS_FIBT_BLOCK(mp)); 675 agi->agi_free_level = cpu_to_be32(1); 676 } 677 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) 678 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); 679 if (xfs_has_inobtcounts(mp)) { 680 agi->agi_iblocks = cpu_to_be32(1); 681 if (xfs_has_finobt(mp)) 682 agi->agi_fblocks = cpu_to_be32(1); 683 } 684 } 685 686 typedef void (*aghdr_init_work_f)(struct xfs_mount *mp, struct xfs_buf *bp, 687 struct aghdr_init_data *id); 688 static int 689 xfs_ag_init_hdr( 690 struct xfs_mount *mp, 691 struct aghdr_init_data *id, 692 aghdr_init_work_f work, 693 const struct xfs_buf_ops *ops) 694 { 695 struct xfs_buf *bp; 696 int error; 697 698 error = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, &bp, ops); 699 if (error) 700 return error; 701 702 (*work)(mp, bp, id); 703 704 xfs_buf_delwri_queue(bp, &id->buffer_list); 705 xfs_buf_relse(bp); 706 return 0; 707 } 708 709 struct xfs_aghdr_grow_data { 710 xfs_daddr_t daddr; 711 size_t numblks; 712 const struct xfs_buf_ops *ops; 713 aghdr_init_work_f work; 714 const struct xfs_btree_ops *bc_ops; 715 bool need_init; 716 }; 717 718 /* 719 * Prepare new AG headers to be written to disk. We use uncached buffers here, 720 * as it is assumed these new AG headers are currently beyond the currently 721 * valid filesystem address space. Using cached buffers would trip over EOFS 722 * corruption detection alogrithms in the buffer cache lookup routines. 723 * 724 * This is a non-transactional function, but the prepared buffers are added to a 725 * delayed write buffer list supplied by the caller so they can submit them to 726 * disk and wait on them as required. 727 */ 728 int 729 xfs_ag_init_headers( 730 struct xfs_mount *mp, 731 struct aghdr_init_data *id) 732 733 { 734 struct xfs_aghdr_grow_data aghdr_data[] = { 735 { /* SB */ 736 .daddr = XFS_AG_DADDR(mp, id->agno, XFS_SB_DADDR), 737 .numblks = XFS_FSS_TO_BB(mp, 1), 738 .ops = &xfs_sb_buf_ops, 739 .work = &xfs_sbblock_init, 740 .need_init = true 741 }, 742 { /* AGF */ 743 .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGF_DADDR(mp)), 744 .numblks = XFS_FSS_TO_BB(mp, 1), 745 .ops = &xfs_agf_buf_ops, 746 .work = &xfs_agfblock_init, 747 .need_init = true 748 }, 749 { /* AGFL */ 750 .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGFL_DADDR(mp)), 751 .numblks = XFS_FSS_TO_BB(mp, 1), 752 .ops = &xfs_agfl_buf_ops, 753 .work = &xfs_agflblock_init, 754 .need_init = true 755 }, 756 { /* AGI */ 757 .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGI_DADDR(mp)), 758 .numblks = XFS_FSS_TO_BB(mp, 1), 759 .ops = &xfs_agi_buf_ops, 760 .work = &xfs_agiblock_init, 761 .need_init = true 762 }, 763 { /* BNO root block */ 764 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_BNO_BLOCK(mp)), 765 .numblks = BTOBB(mp->m_sb.sb_blocksize), 766 .ops = &xfs_bnobt_buf_ops, 767 .work = &xfs_bnoroot_init, 768 .bc_ops = &xfs_bnobt_ops, 769 .need_init = true 770 }, 771 { /* CNT root block */ 772 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_CNT_BLOCK(mp)), 773 .numblks = BTOBB(mp->m_sb.sb_blocksize), 774 .ops = &xfs_cntbt_buf_ops, 775 .work = &xfs_bnoroot_init, 776 .bc_ops = &xfs_cntbt_ops, 777 .need_init = true 778 }, 779 { /* INO root block */ 780 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_IBT_BLOCK(mp)), 781 .numblks = BTOBB(mp->m_sb.sb_blocksize), 782 .ops = &xfs_inobt_buf_ops, 783 .work = &xfs_btroot_init, 784 .bc_ops = &xfs_inobt_ops, 785 .need_init = true 786 }, 787 { /* FINO root block */ 788 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_FIBT_BLOCK(mp)), 789 .numblks = BTOBB(mp->m_sb.sb_blocksize), 790 .ops = &xfs_finobt_buf_ops, 791 .work = &xfs_btroot_init, 792 .bc_ops = &xfs_finobt_ops, 793 .need_init = xfs_has_finobt(mp) 794 }, 795 { /* RMAP root block */ 796 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_RMAP_BLOCK(mp)), 797 .numblks = BTOBB(mp->m_sb.sb_blocksize), 798 .ops = &xfs_rmapbt_buf_ops, 799 .work = &xfs_rmaproot_init, 800 .bc_ops = &xfs_rmapbt_ops, 801 .need_init = xfs_has_rmapbt(mp) 802 }, 803 { /* REFC root block */ 804 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, xfs_refc_block(mp)), 805 .numblks = BTOBB(mp->m_sb.sb_blocksize), 806 .ops = &xfs_refcountbt_buf_ops, 807 .work = &xfs_btroot_init, 808 .bc_ops = &xfs_refcountbt_ops, 809 .need_init = xfs_has_reflink(mp) 810 }, 811 { /* NULL terminating block */ 812 .daddr = XFS_BUF_DADDR_NULL, 813 } 814 }; 815 struct xfs_aghdr_grow_data *dp; 816 int error = 0; 817 818 /* Account for AG free space in new AG */ 819 id->nfree += id->agsize - mp->m_ag_prealloc_blocks; 820 for (dp = &aghdr_data[0]; dp->daddr != XFS_BUF_DADDR_NULL; dp++) { 821 if (!dp->need_init) 822 continue; 823 824 id->daddr = dp->daddr; 825 id->numblks = dp->numblks; 826 id->bc_ops = dp->bc_ops; 827 error = xfs_ag_init_hdr(mp, id, dp->work, dp->ops); 828 if (error) 829 break; 830 } 831 return error; 832 } 833 834 int 835 xfs_ag_shrink_space( 836 struct xfs_perag *pag, 837 struct xfs_trans **tpp, 838 xfs_extlen_t delta) 839 { 840 struct xfs_mount *mp = pag->pag_mount; 841 struct xfs_alloc_arg args = { 842 .tp = *tpp, 843 .mp = mp, 844 .pag = pag, 845 .minlen = delta, 846 .maxlen = delta, 847 .oinfo = XFS_RMAP_OINFO_SKIP_UPDATE, 848 .resv = XFS_AG_RESV_NONE, 849 .prod = 1 850 }; 851 struct xfs_buf *agibp, *agfbp; 852 struct xfs_agi *agi; 853 struct xfs_agf *agf; 854 xfs_agblock_t aglen; 855 int error, err2; 856 857 ASSERT(pag->pag_agno == mp->m_sb.sb_agcount - 1); 858 error = xfs_ialloc_read_agi(pag, *tpp, 0, &agibp); 859 if (error) 860 return error; 861 862 agi = agibp->b_addr; 863 864 error = xfs_alloc_read_agf(pag, *tpp, 0, &agfbp); 865 if (error) 866 return error; 867 868 agf = agfbp->b_addr; 869 aglen = be32_to_cpu(agi->agi_length); 870 /* some extra paranoid checks before we shrink the ag */ 871 if (XFS_IS_CORRUPT(mp, agf->agf_length != agi->agi_length)) { 872 xfs_ag_mark_sick(pag, XFS_SICK_AG_AGF); 873 return -EFSCORRUPTED; 874 } 875 if (delta >= aglen) 876 return -EINVAL; 877 878 /* 879 * Make sure that the last inode cluster cannot overlap with the new 880 * end of the AG, even if it's sparse. 881 */ 882 error = xfs_ialloc_check_shrink(pag, *tpp, agibp, aglen - delta); 883 if (error) 884 return error; 885 886 /* 887 * Disable perag reservations so it doesn't cause the allocation request 888 * to fail. We'll reestablish reservation before we return. 889 */ 890 xfs_ag_resv_free(pag); 891 892 /* internal log shouldn't also show up in the free space btrees */ 893 error = xfs_alloc_vextent_exact_bno(&args, 894 XFS_AGB_TO_FSB(mp, pag->pag_agno, aglen - delta)); 895 if (!error && args.agbno == NULLAGBLOCK) 896 error = -ENOSPC; 897 898 if (error) { 899 /* 900 * If extent allocation fails, need to roll the transaction to 901 * ensure that the AGFL fixup has been committed anyway. 902 * 903 * We need to hold the AGF across the roll to ensure nothing can 904 * access the AG for allocation until the shrink is fully 905 * cleaned up. And due to the resetting of the AG block 906 * reservation space needing to lock the AGI, we also have to 907 * hold that so we don't get AGI/AGF lock order inversions in 908 * the error handling path. 909 */ 910 xfs_trans_bhold(*tpp, agfbp); 911 xfs_trans_bhold(*tpp, agibp); 912 err2 = xfs_trans_roll(tpp); 913 if (err2) 914 return err2; 915 xfs_trans_bjoin(*tpp, agfbp); 916 xfs_trans_bjoin(*tpp, agibp); 917 goto resv_init_out; 918 } 919 920 /* 921 * if successfully deleted from freespace btrees, need to confirm 922 * per-AG reservation works as expected. 923 */ 924 be32_add_cpu(&agi->agi_length, -delta); 925 be32_add_cpu(&agf->agf_length, -delta); 926 927 err2 = xfs_ag_resv_init(pag, *tpp); 928 if (err2) { 929 be32_add_cpu(&agi->agi_length, delta); 930 be32_add_cpu(&agf->agf_length, delta); 931 if (err2 != -ENOSPC) 932 goto resv_err; 933 934 err2 = xfs_free_extent_later(*tpp, args.fsbno, delta, NULL, 935 XFS_AG_RESV_NONE, XFS_FREE_EXTENT_SKIP_DISCARD); 936 if (err2) 937 goto resv_err; 938 939 /* 940 * Roll the transaction before trying to re-init the per-ag 941 * reservation. The new transaction is clean so it will cancel 942 * without any side effects. 943 */ 944 error = xfs_defer_finish(tpp); 945 if (error) 946 return error; 947 948 error = -ENOSPC; 949 goto resv_init_out; 950 } 951 952 /* Update perag geometry */ 953 pag->block_count -= delta; 954 __xfs_agino_range(pag->pag_mount, pag->block_count, &pag->agino_min, 955 &pag->agino_max); 956 957 xfs_ialloc_log_agi(*tpp, agibp, XFS_AGI_LENGTH); 958 xfs_alloc_log_agf(*tpp, agfbp, XFS_AGF_LENGTH); 959 return 0; 960 961 resv_init_out: 962 err2 = xfs_ag_resv_init(pag, *tpp); 963 if (!err2) 964 return error; 965 resv_err: 966 xfs_warn(mp, "Error %d reserving per-AG metadata reserve pool.", err2); 967 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 968 return err2; 969 } 970 971 /* 972 * Extent the AG indicated by the @id by the length passed in 973 */ 974 int 975 xfs_ag_extend_space( 976 struct xfs_perag *pag, 977 struct xfs_trans *tp, 978 xfs_extlen_t len) 979 { 980 struct xfs_buf *bp; 981 struct xfs_agi *agi; 982 struct xfs_agf *agf; 983 int error; 984 985 ASSERT(pag->pag_agno == pag->pag_mount->m_sb.sb_agcount - 1); 986 987 error = xfs_ialloc_read_agi(pag, tp, 0, &bp); 988 if (error) 989 return error; 990 991 agi = bp->b_addr; 992 be32_add_cpu(&agi->agi_length, len); 993 xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH); 994 995 /* 996 * Change agf length. 997 */ 998 error = xfs_alloc_read_agf(pag, tp, 0, &bp); 999 if (error) 1000 return error; 1001 1002 agf = bp->b_addr; 1003 be32_add_cpu(&agf->agf_length, len); 1004 ASSERT(agf->agf_length == agi->agi_length); 1005 xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH); 1006 1007 /* 1008 * Free the new space. 1009 * 1010 * XFS_RMAP_OINFO_SKIP_UPDATE is used here to tell the rmap btree that 1011 * this doesn't actually exist in the rmap btree. 1012 */ 1013 error = xfs_rmap_free(tp, bp, pag, be32_to_cpu(agf->agf_length) - len, 1014 len, &XFS_RMAP_OINFO_SKIP_UPDATE); 1015 if (error) 1016 return error; 1017 1018 error = xfs_free_extent(tp, pag, be32_to_cpu(agf->agf_length) - len, 1019 len, &XFS_RMAP_OINFO_SKIP_UPDATE, XFS_AG_RESV_NONE); 1020 if (error) 1021 return error; 1022 1023 /* Update perag geometry */ 1024 pag->block_count = be32_to_cpu(agf->agf_length); 1025 __xfs_agino_range(pag->pag_mount, pag->block_count, &pag->agino_min, 1026 &pag->agino_max); 1027 return 0; 1028 } 1029 1030 /* Retrieve AG geometry. */ 1031 int 1032 xfs_ag_get_geometry( 1033 struct xfs_perag *pag, 1034 struct xfs_ag_geometry *ageo) 1035 { 1036 struct xfs_buf *agi_bp; 1037 struct xfs_buf *agf_bp; 1038 struct xfs_agi *agi; 1039 struct xfs_agf *agf; 1040 unsigned int freeblks; 1041 int error; 1042 1043 /* Lock the AG headers. */ 1044 error = xfs_ialloc_read_agi(pag, NULL, 0, &agi_bp); 1045 if (error) 1046 return error; 1047 error = xfs_alloc_read_agf(pag, NULL, 0, &agf_bp); 1048 if (error) 1049 goto out_agi; 1050 1051 /* Fill out form. */ 1052 memset(ageo, 0, sizeof(*ageo)); 1053 ageo->ag_number = pag->pag_agno; 1054 1055 agi = agi_bp->b_addr; 1056 ageo->ag_icount = be32_to_cpu(agi->agi_count); 1057 ageo->ag_ifree = be32_to_cpu(agi->agi_freecount); 1058 1059 agf = agf_bp->b_addr; 1060 ageo->ag_length = be32_to_cpu(agf->agf_length); 1061 freeblks = pag->pagf_freeblks + 1062 pag->pagf_flcount + 1063 pag->pagf_btreeblks - 1064 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE); 1065 ageo->ag_freeblks = freeblks; 1066 xfs_ag_geom_health(pag, ageo); 1067 1068 /* Release resources. */ 1069 xfs_buf_relse(agf_bp); 1070 out_agi: 1071 xfs_buf_relse(agi_bp); 1072 return error; 1073 } 1074