1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_sb.h" 14 #include "xfs_mount.h" 15 #include "xfs_inode.h" 16 #include "xfs_dir2.h" 17 #include "xfs_ialloc.h" 18 #include "xfs_alloc.h" 19 #include "xfs_rtalloc.h" 20 #include "xfs_bmap.h" 21 #include "xfs_trans.h" 22 #include "xfs_trans_priv.h" 23 #include "xfs_log.h" 24 #include "xfs_error.h" 25 #include "xfs_quota.h" 26 #include "xfs_fsops.h" 27 #include "xfs_icache.h" 28 #include "xfs_sysfs.h" 29 #include "xfs_rmap_btree.h" 30 #include "xfs_refcount_btree.h" 31 #include "xfs_reflink.h" 32 #include "xfs_extent_busy.h" 33 #include "xfs_health.h" 34 #include "xfs_trace.h" 35 #include "xfs_ag.h" 36 37 static DEFINE_MUTEX(xfs_uuid_table_mutex); 38 static int xfs_uuid_table_size; 39 static uuid_t *xfs_uuid_table; 40 41 void 42 xfs_uuid_table_free(void) 43 { 44 if (xfs_uuid_table_size == 0) 45 return; 46 kmem_free(xfs_uuid_table); 47 xfs_uuid_table = NULL; 48 xfs_uuid_table_size = 0; 49 } 50 51 /* 52 * See if the UUID is unique among mounted XFS filesystems. 53 * Mount fails if UUID is nil or a FS with the same UUID is already mounted. 54 */ 55 STATIC int 56 xfs_uuid_mount( 57 struct xfs_mount *mp) 58 { 59 uuid_t *uuid = &mp->m_sb.sb_uuid; 60 int hole, i; 61 62 /* Publish UUID in struct super_block */ 63 uuid_copy(&mp->m_super->s_uuid, uuid); 64 65 if (mp->m_flags & XFS_MOUNT_NOUUID) 66 return 0; 67 68 if (uuid_is_null(uuid)) { 69 xfs_warn(mp, "Filesystem has null UUID - can't mount"); 70 return -EINVAL; 71 } 72 73 mutex_lock(&xfs_uuid_table_mutex); 74 for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) { 75 if (uuid_is_null(&xfs_uuid_table[i])) { 76 hole = i; 77 continue; 78 } 79 if (uuid_equal(uuid, &xfs_uuid_table[i])) 80 goto out_duplicate; 81 } 82 83 if (hole < 0) { 84 xfs_uuid_table = krealloc(xfs_uuid_table, 85 (xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table), 86 GFP_KERNEL | __GFP_NOFAIL); 87 hole = xfs_uuid_table_size++; 88 } 89 xfs_uuid_table[hole] = *uuid; 90 mutex_unlock(&xfs_uuid_table_mutex); 91 92 return 0; 93 94 out_duplicate: 95 mutex_unlock(&xfs_uuid_table_mutex); 96 xfs_warn(mp, "Filesystem has duplicate UUID %pU - can't mount", uuid); 97 return -EINVAL; 98 } 99 100 STATIC void 101 xfs_uuid_unmount( 102 struct xfs_mount *mp) 103 { 104 uuid_t *uuid = &mp->m_sb.sb_uuid; 105 int i; 106 107 if (mp->m_flags & XFS_MOUNT_NOUUID) 108 return; 109 110 mutex_lock(&xfs_uuid_table_mutex); 111 for (i = 0; i < xfs_uuid_table_size; i++) { 112 if (uuid_is_null(&xfs_uuid_table[i])) 113 continue; 114 if (!uuid_equal(uuid, &xfs_uuid_table[i])) 115 continue; 116 memset(&xfs_uuid_table[i], 0, sizeof(uuid_t)); 117 break; 118 } 119 ASSERT(i < xfs_uuid_table_size); 120 mutex_unlock(&xfs_uuid_table_mutex); 121 } 122 123 124 STATIC void 125 __xfs_free_perag( 126 struct rcu_head *head) 127 { 128 struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head); 129 130 ASSERT(!delayed_work_pending(&pag->pag_blockgc_work)); 131 ASSERT(atomic_read(&pag->pag_ref) == 0); 132 kmem_free(pag); 133 } 134 135 /* 136 * Free up the per-ag resources associated with the mount structure. 137 */ 138 STATIC void 139 xfs_free_perag( 140 xfs_mount_t *mp) 141 { 142 xfs_agnumber_t agno; 143 struct xfs_perag *pag; 144 145 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 146 spin_lock(&mp->m_perag_lock); 147 pag = radix_tree_delete(&mp->m_perag_tree, agno); 148 spin_unlock(&mp->m_perag_lock); 149 ASSERT(pag); 150 ASSERT(atomic_read(&pag->pag_ref) == 0); 151 cancel_delayed_work_sync(&pag->pag_blockgc_work); 152 xfs_iunlink_destroy(pag); 153 xfs_buf_hash_destroy(pag); 154 call_rcu(&pag->rcu_head, __xfs_free_perag); 155 } 156 } 157 158 /* 159 * Check size of device based on the (data/realtime) block count. 160 * Note: this check is used by the growfs code as well as mount. 161 */ 162 int 163 xfs_sb_validate_fsb_count( 164 xfs_sb_t *sbp, 165 uint64_t nblocks) 166 { 167 ASSERT(PAGE_SHIFT >= sbp->sb_blocklog); 168 ASSERT(sbp->sb_blocklog >= BBSHIFT); 169 170 /* Limited by ULONG_MAX of page cache index */ 171 if (nblocks >> (PAGE_SHIFT - sbp->sb_blocklog) > ULONG_MAX) 172 return -EFBIG; 173 return 0; 174 } 175 176 int 177 xfs_initialize_perag( 178 xfs_mount_t *mp, 179 xfs_agnumber_t agcount, 180 xfs_agnumber_t *maxagi) 181 { 182 xfs_agnumber_t index; 183 xfs_agnumber_t first_initialised = NULLAGNUMBER; 184 xfs_perag_t *pag; 185 int error = -ENOMEM; 186 187 /* 188 * Walk the current per-ag tree so we don't try to initialise AGs 189 * that already exist (growfs case). Allocate and insert all the 190 * AGs we don't find ready for initialisation. 191 */ 192 for (index = 0; index < agcount; index++) { 193 pag = xfs_perag_get(mp, index); 194 if (pag) { 195 xfs_perag_put(pag); 196 continue; 197 } 198 199 pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL); 200 if (!pag) { 201 error = -ENOMEM; 202 goto out_unwind_new_pags; 203 } 204 pag->pag_agno = index; 205 pag->pag_mount = mp; 206 spin_lock_init(&pag->pag_ici_lock); 207 INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker); 208 INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); 209 210 error = xfs_buf_hash_init(pag); 211 if (error) 212 goto out_free_pag; 213 init_waitqueue_head(&pag->pagb_wait); 214 spin_lock_init(&pag->pagb_lock); 215 pag->pagb_count = 0; 216 pag->pagb_tree = RB_ROOT; 217 218 error = radix_tree_preload(GFP_NOFS); 219 if (error) 220 goto out_hash_destroy; 221 222 spin_lock(&mp->m_perag_lock); 223 if (radix_tree_insert(&mp->m_perag_tree, index, pag)) { 224 WARN_ON_ONCE(1); 225 spin_unlock(&mp->m_perag_lock); 226 radix_tree_preload_end(); 227 error = -EEXIST; 228 goto out_hash_destroy; 229 } 230 spin_unlock(&mp->m_perag_lock); 231 radix_tree_preload_end(); 232 /* first new pag is fully initialized */ 233 if (first_initialised == NULLAGNUMBER) 234 first_initialised = index; 235 error = xfs_iunlink_init(pag); 236 if (error) 237 goto out_hash_destroy; 238 spin_lock_init(&pag->pag_state_lock); 239 } 240 241 index = xfs_set_inode_alloc(mp, agcount); 242 243 if (maxagi) 244 *maxagi = index; 245 246 mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp); 247 return 0; 248 249 out_hash_destroy: 250 xfs_buf_hash_destroy(pag); 251 out_free_pag: 252 kmem_free(pag); 253 out_unwind_new_pags: 254 /* unwind any prior newly initialized pags */ 255 for (index = first_initialised; index < agcount; index++) { 256 pag = radix_tree_delete(&mp->m_perag_tree, index); 257 if (!pag) 258 break; 259 xfs_buf_hash_destroy(pag); 260 xfs_iunlink_destroy(pag); 261 kmem_free(pag); 262 } 263 return error; 264 } 265 266 /* 267 * xfs_readsb 268 * 269 * Does the initial read of the superblock. 270 */ 271 int 272 xfs_readsb( 273 struct xfs_mount *mp, 274 int flags) 275 { 276 unsigned int sector_size; 277 struct xfs_buf *bp; 278 struct xfs_sb *sbp = &mp->m_sb; 279 int error; 280 int loud = !(flags & XFS_MFSI_QUIET); 281 const struct xfs_buf_ops *buf_ops; 282 283 ASSERT(mp->m_sb_bp == NULL); 284 ASSERT(mp->m_ddev_targp != NULL); 285 286 /* 287 * For the initial read, we must guess at the sector 288 * size based on the block device. It's enough to 289 * get the sb_sectsize out of the superblock and 290 * then reread with the proper length. 291 * We don't verify it yet, because it may not be complete. 292 */ 293 sector_size = xfs_getsize_buftarg(mp->m_ddev_targp); 294 buf_ops = NULL; 295 296 /* 297 * Allocate a (locked) buffer to hold the superblock. This will be kept 298 * around at all times to optimize access to the superblock. Therefore, 299 * set XBF_NO_IOACCT to make sure it doesn't hold the buftarg count 300 * elevated. 301 */ 302 reread: 303 error = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR, 304 BTOBB(sector_size), XBF_NO_IOACCT, &bp, 305 buf_ops); 306 if (error) { 307 if (loud) 308 xfs_warn(mp, "SB validate failed with error %d.", error); 309 /* bad CRC means corrupted metadata */ 310 if (error == -EFSBADCRC) 311 error = -EFSCORRUPTED; 312 return error; 313 } 314 315 /* 316 * Initialize the mount structure from the superblock. 317 */ 318 xfs_sb_from_disk(sbp, bp->b_addr); 319 320 /* 321 * If we haven't validated the superblock, do so now before we try 322 * to check the sector size and reread the superblock appropriately. 323 */ 324 if (sbp->sb_magicnum != XFS_SB_MAGIC) { 325 if (loud) 326 xfs_warn(mp, "Invalid superblock magic number"); 327 error = -EINVAL; 328 goto release_buf; 329 } 330 331 /* 332 * We must be able to do sector-sized and sector-aligned IO. 333 */ 334 if (sector_size > sbp->sb_sectsize) { 335 if (loud) 336 xfs_warn(mp, "device supports %u byte sectors (not %u)", 337 sector_size, sbp->sb_sectsize); 338 error = -ENOSYS; 339 goto release_buf; 340 } 341 342 if (buf_ops == NULL) { 343 /* 344 * Re-read the superblock so the buffer is correctly sized, 345 * and properly verified. 346 */ 347 xfs_buf_relse(bp); 348 sector_size = sbp->sb_sectsize; 349 buf_ops = loud ? &xfs_sb_buf_ops : &xfs_sb_quiet_buf_ops; 350 goto reread; 351 } 352 353 xfs_reinit_percpu_counters(mp); 354 355 /* no need to be quiet anymore, so reset the buf ops */ 356 bp->b_ops = &xfs_sb_buf_ops; 357 358 mp->m_sb_bp = bp; 359 xfs_buf_unlock(bp); 360 return 0; 361 362 release_buf: 363 xfs_buf_relse(bp); 364 return error; 365 } 366 367 /* 368 * If the sunit/swidth change would move the precomputed root inode value, we 369 * must reject the ondisk change because repair will stumble over that. 370 * However, we allow the mount to proceed because we never rejected this 371 * combination before. Returns true to update the sb, false otherwise. 372 */ 373 static inline int 374 xfs_check_new_dalign( 375 struct xfs_mount *mp, 376 int new_dalign, 377 bool *update_sb) 378 { 379 struct xfs_sb *sbp = &mp->m_sb; 380 xfs_ino_t calc_ino; 381 382 calc_ino = xfs_ialloc_calc_rootino(mp, new_dalign); 383 trace_xfs_check_new_dalign(mp, new_dalign, calc_ino); 384 385 if (sbp->sb_rootino == calc_ino) { 386 *update_sb = true; 387 return 0; 388 } 389 390 xfs_warn(mp, 391 "Cannot change stripe alignment; would require moving root inode."); 392 393 /* 394 * XXX: Next time we add a new incompat feature, this should start 395 * returning -EINVAL to fail the mount. Until then, spit out a warning 396 * that we're ignoring the administrator's instructions. 397 */ 398 xfs_warn(mp, "Skipping superblock stripe alignment update."); 399 *update_sb = false; 400 return 0; 401 } 402 403 /* 404 * If we were provided with new sunit/swidth values as mount options, make sure 405 * that they pass basic alignment and superblock feature checks, and convert 406 * them into the same units (FSB) that everything else expects. This step 407 * /must/ be done before computing the inode geometry. 408 */ 409 STATIC int 410 xfs_validate_new_dalign( 411 struct xfs_mount *mp) 412 { 413 if (mp->m_dalign == 0) 414 return 0; 415 416 /* 417 * If stripe unit and stripe width are not multiples 418 * of the fs blocksize turn off alignment. 419 */ 420 if ((BBTOB(mp->m_dalign) & mp->m_blockmask) || 421 (BBTOB(mp->m_swidth) & mp->m_blockmask)) { 422 xfs_warn(mp, 423 "alignment check failed: sunit/swidth vs. blocksize(%d)", 424 mp->m_sb.sb_blocksize); 425 return -EINVAL; 426 } else { 427 /* 428 * Convert the stripe unit and width to FSBs. 429 */ 430 mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign); 431 if (mp->m_dalign && (mp->m_sb.sb_agblocks % mp->m_dalign)) { 432 xfs_warn(mp, 433 "alignment check failed: sunit/swidth vs. agsize(%d)", 434 mp->m_sb.sb_agblocks); 435 return -EINVAL; 436 } else if (mp->m_dalign) { 437 mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth); 438 } else { 439 xfs_warn(mp, 440 "alignment check failed: sunit(%d) less than bsize(%d)", 441 mp->m_dalign, mp->m_sb.sb_blocksize); 442 return -EINVAL; 443 } 444 } 445 446 if (!xfs_sb_version_hasdalign(&mp->m_sb)) { 447 xfs_warn(mp, 448 "cannot change alignment: superblock does not support data alignment"); 449 return -EINVAL; 450 } 451 452 return 0; 453 } 454 455 /* Update alignment values based on mount options and sb values. */ 456 STATIC int 457 xfs_update_alignment( 458 struct xfs_mount *mp) 459 { 460 struct xfs_sb *sbp = &mp->m_sb; 461 462 if (mp->m_dalign) { 463 bool update_sb; 464 int error; 465 466 if (sbp->sb_unit == mp->m_dalign && 467 sbp->sb_width == mp->m_swidth) 468 return 0; 469 470 error = xfs_check_new_dalign(mp, mp->m_dalign, &update_sb); 471 if (error || !update_sb) 472 return error; 473 474 sbp->sb_unit = mp->m_dalign; 475 sbp->sb_width = mp->m_swidth; 476 mp->m_update_sb = true; 477 } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN && 478 xfs_sb_version_hasdalign(&mp->m_sb)) { 479 mp->m_dalign = sbp->sb_unit; 480 mp->m_swidth = sbp->sb_width; 481 } 482 483 return 0; 484 } 485 486 /* 487 * precalculate the low space thresholds for dynamic speculative preallocation. 488 */ 489 void 490 xfs_set_low_space_thresholds( 491 struct xfs_mount *mp) 492 { 493 int i; 494 495 for (i = 0; i < XFS_LOWSP_MAX; i++) { 496 uint64_t space = mp->m_sb.sb_dblocks; 497 498 do_div(space, 100); 499 mp->m_low_space[i] = space * (i + 1); 500 } 501 } 502 503 /* 504 * Check that the data (and log if separate) is an ok size. 505 */ 506 STATIC int 507 xfs_check_sizes( 508 struct xfs_mount *mp) 509 { 510 struct xfs_buf *bp; 511 xfs_daddr_t d; 512 int error; 513 514 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); 515 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) { 516 xfs_warn(mp, "filesystem size mismatch detected"); 517 return -EFBIG; 518 } 519 error = xfs_buf_read_uncached(mp->m_ddev_targp, 520 d - XFS_FSS_TO_BB(mp, 1), 521 XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL); 522 if (error) { 523 xfs_warn(mp, "last sector read failed"); 524 return error; 525 } 526 xfs_buf_relse(bp); 527 528 if (mp->m_logdev_targp == mp->m_ddev_targp) 529 return 0; 530 531 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); 532 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { 533 xfs_warn(mp, "log size mismatch detected"); 534 return -EFBIG; 535 } 536 error = xfs_buf_read_uncached(mp->m_logdev_targp, 537 d - XFS_FSB_TO_BB(mp, 1), 538 XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL); 539 if (error) { 540 xfs_warn(mp, "log device read failed"); 541 return error; 542 } 543 xfs_buf_relse(bp); 544 return 0; 545 } 546 547 /* 548 * Clear the quotaflags in memory and in the superblock. 549 */ 550 int 551 xfs_mount_reset_sbqflags( 552 struct xfs_mount *mp) 553 { 554 mp->m_qflags = 0; 555 556 /* It is OK to look at sb_qflags in the mount path without m_sb_lock. */ 557 if (mp->m_sb.sb_qflags == 0) 558 return 0; 559 spin_lock(&mp->m_sb_lock); 560 mp->m_sb.sb_qflags = 0; 561 spin_unlock(&mp->m_sb_lock); 562 563 if (!xfs_fs_writable(mp, SB_FREEZE_WRITE)) 564 return 0; 565 566 return xfs_sync_sb(mp, false); 567 } 568 569 uint64_t 570 xfs_default_resblks(xfs_mount_t *mp) 571 { 572 uint64_t resblks; 573 574 /* 575 * We default to 5% or 8192 fsbs of space reserved, whichever is 576 * smaller. This is intended to cover concurrent allocation 577 * transactions when we initially hit enospc. These each require a 4 578 * block reservation. Hence by default we cover roughly 2000 concurrent 579 * allocation reservations. 580 */ 581 resblks = mp->m_sb.sb_dblocks; 582 do_div(resblks, 20); 583 resblks = min_t(uint64_t, resblks, 8192); 584 return resblks; 585 } 586 587 /* Ensure the summary counts are correct. */ 588 STATIC int 589 xfs_check_summary_counts( 590 struct xfs_mount *mp) 591 { 592 /* 593 * The AG0 superblock verifier rejects in-progress filesystems, 594 * so we should never see the flag set this far into mounting. 595 */ 596 if (mp->m_sb.sb_inprogress) { 597 xfs_err(mp, "sb_inprogress set after log recovery??"); 598 WARN_ON(1); 599 return -EFSCORRUPTED; 600 } 601 602 /* 603 * Now the log is mounted, we know if it was an unclean shutdown or 604 * not. If it was, with the first phase of recovery has completed, we 605 * have consistent AG blocks on disk. We have not recovered EFIs yet, 606 * but they are recovered transactionally in the second recovery phase 607 * later. 608 * 609 * If the log was clean when we mounted, we can check the summary 610 * counters. If any of them are obviously incorrect, we can recompute 611 * them from the AGF headers in the next step. 612 */ 613 if (XFS_LAST_UNMOUNT_WAS_CLEAN(mp) && 614 (mp->m_sb.sb_fdblocks > mp->m_sb.sb_dblocks || 615 !xfs_verify_icount(mp, mp->m_sb.sb_icount) || 616 mp->m_sb.sb_ifree > mp->m_sb.sb_icount)) 617 xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS); 618 619 /* 620 * We can safely re-initialise incore superblock counters from the 621 * per-ag data. These may not be correct if the filesystem was not 622 * cleanly unmounted, so we waited for recovery to finish before doing 623 * this. 624 * 625 * If the filesystem was cleanly unmounted or the previous check did 626 * not flag anything weird, then we can trust the values in the 627 * superblock to be correct and we don't need to do anything here. 628 * Otherwise, recalculate the summary counters. 629 */ 630 if ((!xfs_sb_version_haslazysbcount(&mp->m_sb) || 631 XFS_LAST_UNMOUNT_WAS_CLEAN(mp)) && 632 !xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS)) 633 return 0; 634 635 return xfs_initialize_perag_data(mp, mp->m_sb.sb_agcount); 636 } 637 638 /* 639 * Flush and reclaim dirty inodes in preparation for unmount. Inodes and 640 * internal inode structures can be sitting in the CIL and AIL at this point, 641 * so we need to unpin them, write them back and/or reclaim them before unmount 642 * can proceed. 643 * 644 * An inode cluster that has been freed can have its buffer still pinned in 645 * memory because the transaction is still sitting in a iclog. The stale inodes 646 * on that buffer will be pinned to the buffer until the transaction hits the 647 * disk and the callbacks run. Pushing the AIL will skip the stale inodes and 648 * may never see the pinned buffer, so nothing will push out the iclog and 649 * unpin the buffer. 650 * 651 * Hence we need to force the log to unpin everything first. However, log 652 * forces don't wait for the discards they issue to complete, so we have to 653 * explicitly wait for them to complete here as well. 654 * 655 * Then we can tell the world we are unmounting so that error handling knows 656 * that the filesystem is going away and we should error out anything that we 657 * have been retrying in the background. This will prevent never-ending 658 * retries in AIL pushing from hanging the unmount. 659 * 660 * Finally, we can push the AIL to clean all the remaining dirty objects, then 661 * reclaim the remaining inodes that are still in memory at this point in time. 662 */ 663 static void 664 xfs_unmount_flush_inodes( 665 struct xfs_mount *mp) 666 { 667 xfs_log_force(mp, XFS_LOG_SYNC); 668 xfs_extent_busy_wait_all(mp); 669 flush_workqueue(xfs_discard_wq); 670 671 mp->m_flags |= XFS_MOUNT_UNMOUNTING; 672 673 xfs_ail_push_all_sync(mp->m_ail); 674 cancel_delayed_work_sync(&mp->m_reclaim_work); 675 xfs_reclaim_inodes(mp); 676 xfs_health_unmount(mp); 677 } 678 679 static void 680 xfs_mount_setup_inode_geom( 681 struct xfs_mount *mp) 682 { 683 struct xfs_ino_geometry *igeo = M_IGEO(mp); 684 685 igeo->attr_fork_offset = xfs_bmap_compute_attr_offset(mp); 686 ASSERT(igeo->attr_fork_offset < XFS_LITINO(mp)); 687 688 xfs_ialloc_setup_geometry(mp); 689 } 690 691 /* 692 * This function does the following on an initial mount of a file system: 693 * - reads the superblock from disk and init the mount struct 694 * - if we're a 32-bit kernel, do a size check on the superblock 695 * so we don't mount terabyte filesystems 696 * - init mount struct realtime fields 697 * - allocate inode hash table for fs 698 * - init directory manager 699 * - perform recovery and init the log manager 700 */ 701 int 702 xfs_mountfs( 703 struct xfs_mount *mp) 704 { 705 struct xfs_sb *sbp = &(mp->m_sb); 706 struct xfs_inode *rip; 707 struct xfs_ino_geometry *igeo = M_IGEO(mp); 708 uint64_t resblks; 709 uint quotamount = 0; 710 uint quotaflags = 0; 711 int error = 0; 712 713 xfs_sb_mount_common(mp, sbp); 714 715 /* 716 * Check for a mismatched features2 values. Older kernels read & wrote 717 * into the wrong sb offset for sb_features2 on some platforms due to 718 * xfs_sb_t not being 64bit size aligned when sb_features2 was added, 719 * which made older superblock reading/writing routines swap it as a 720 * 64-bit value. 721 * 722 * For backwards compatibility, we make both slots equal. 723 * 724 * If we detect a mismatched field, we OR the set bits into the existing 725 * features2 field in case it has already been modified; we don't want 726 * to lose any features. We then update the bad location with the ORed 727 * value so that older kernels will see any features2 flags. The 728 * superblock writeback code ensures the new sb_features2 is copied to 729 * sb_bad_features2 before it is logged or written to disk. 730 */ 731 if (xfs_sb_has_mismatched_features2(sbp)) { 732 xfs_warn(mp, "correcting sb_features alignment problem"); 733 sbp->sb_features2 |= sbp->sb_bad_features2; 734 mp->m_update_sb = true; 735 736 /* 737 * Re-check for ATTR2 in case it was found in bad_features2 738 * slot. 739 */ 740 if (xfs_sb_version_hasattr2(&mp->m_sb) && 741 !(mp->m_flags & XFS_MOUNT_NOATTR2)) 742 mp->m_flags |= XFS_MOUNT_ATTR2; 743 } 744 745 if (xfs_sb_version_hasattr2(&mp->m_sb) && 746 (mp->m_flags & XFS_MOUNT_NOATTR2)) { 747 xfs_sb_version_removeattr2(&mp->m_sb); 748 mp->m_update_sb = true; 749 750 /* update sb_versionnum for the clearing of the morebits */ 751 if (!sbp->sb_features2) 752 mp->m_update_sb = true; 753 } 754 755 /* always use v2 inodes by default now */ 756 if (!(mp->m_sb.sb_versionnum & XFS_SB_VERSION_NLINKBIT)) { 757 mp->m_sb.sb_versionnum |= XFS_SB_VERSION_NLINKBIT; 758 mp->m_update_sb = true; 759 } 760 761 /* 762 * If we were given new sunit/swidth options, do some basic validation 763 * checks and convert the incore dalign and swidth values to the 764 * same units (FSB) that everything else uses. This /must/ happen 765 * before computing the inode geometry. 766 */ 767 error = xfs_validate_new_dalign(mp); 768 if (error) 769 goto out; 770 771 xfs_alloc_compute_maxlevels(mp); 772 xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK); 773 xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK); 774 xfs_mount_setup_inode_geom(mp); 775 xfs_rmapbt_compute_maxlevels(mp); 776 xfs_refcountbt_compute_maxlevels(mp); 777 778 /* 779 * Check if sb_agblocks is aligned at stripe boundary. If sb_agblocks 780 * is NOT aligned turn off m_dalign since allocator alignment is within 781 * an ag, therefore ag has to be aligned at stripe boundary. Note that 782 * we must compute the free space and rmap btree geometry before doing 783 * this. 784 */ 785 error = xfs_update_alignment(mp); 786 if (error) 787 goto out; 788 789 /* enable fail_at_unmount as default */ 790 mp->m_fail_unmount = true; 791 792 error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, 793 NULL, mp->m_super->s_id); 794 if (error) 795 goto out; 796 797 error = xfs_sysfs_init(&mp->m_stats.xs_kobj, &xfs_stats_ktype, 798 &mp->m_kobj, "stats"); 799 if (error) 800 goto out_remove_sysfs; 801 802 error = xfs_error_sysfs_init(mp); 803 if (error) 804 goto out_del_stats; 805 806 error = xfs_errortag_init(mp); 807 if (error) 808 goto out_remove_error_sysfs; 809 810 error = xfs_uuid_mount(mp); 811 if (error) 812 goto out_remove_errortag; 813 814 /* 815 * Update the preferred write size based on the information from the 816 * on-disk superblock. 817 */ 818 mp->m_allocsize_log = 819 max_t(uint32_t, sbp->sb_blocklog, mp->m_allocsize_log); 820 mp->m_allocsize_blocks = 1U << (mp->m_allocsize_log - sbp->sb_blocklog); 821 822 /* set the low space thresholds for dynamic preallocation */ 823 xfs_set_low_space_thresholds(mp); 824 825 /* 826 * If enabled, sparse inode chunk alignment is expected to match the 827 * cluster size. Full inode chunk alignment must match the chunk size, 828 * but that is checked on sb read verification... 829 */ 830 if (xfs_sb_version_hassparseinodes(&mp->m_sb) && 831 mp->m_sb.sb_spino_align != 832 XFS_B_TO_FSBT(mp, igeo->inode_cluster_size_raw)) { 833 xfs_warn(mp, 834 "Sparse inode block alignment (%u) must match cluster size (%llu).", 835 mp->m_sb.sb_spino_align, 836 XFS_B_TO_FSBT(mp, igeo->inode_cluster_size_raw)); 837 error = -EINVAL; 838 goto out_remove_uuid; 839 } 840 841 /* 842 * Check that the data (and log if separate) is an ok size. 843 */ 844 error = xfs_check_sizes(mp); 845 if (error) 846 goto out_remove_uuid; 847 848 /* 849 * Initialize realtime fields in the mount structure 850 */ 851 error = xfs_rtmount_init(mp); 852 if (error) { 853 xfs_warn(mp, "RT mount failed"); 854 goto out_remove_uuid; 855 } 856 857 /* 858 * Copies the low order bits of the timestamp and the randomly 859 * set "sequence" number out of a UUID. 860 */ 861 mp->m_fixedfsid[0] = 862 (get_unaligned_be16(&sbp->sb_uuid.b[8]) << 16) | 863 get_unaligned_be16(&sbp->sb_uuid.b[4]); 864 mp->m_fixedfsid[1] = get_unaligned_be32(&sbp->sb_uuid.b[0]); 865 866 error = xfs_da_mount(mp); 867 if (error) { 868 xfs_warn(mp, "Failed dir/attr init: %d", error); 869 goto out_remove_uuid; 870 } 871 872 /* 873 * Initialize the precomputed transaction reservations values. 874 */ 875 xfs_trans_init(mp); 876 877 /* 878 * Allocate and initialize the per-ag data. 879 */ 880 error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi); 881 if (error) { 882 xfs_warn(mp, "Failed per-ag init: %d", error); 883 goto out_free_dir; 884 } 885 886 if (XFS_IS_CORRUPT(mp, !sbp->sb_logblocks)) { 887 xfs_warn(mp, "no log defined"); 888 error = -EFSCORRUPTED; 889 goto out_free_perag; 890 } 891 892 /* 893 * Log's mount-time initialization. The first part of recovery can place 894 * some items on the AIL, to be handled when recovery is finished or 895 * cancelled. 896 */ 897 error = xfs_log_mount(mp, mp->m_logdev_targp, 898 XFS_FSB_TO_DADDR(mp, sbp->sb_logstart), 899 XFS_FSB_TO_BB(mp, sbp->sb_logblocks)); 900 if (error) { 901 xfs_warn(mp, "log mount failed"); 902 goto out_fail_wait; 903 } 904 905 /* Make sure the summary counts are ok. */ 906 error = xfs_check_summary_counts(mp); 907 if (error) 908 goto out_log_dealloc; 909 910 /* 911 * Get and sanity-check the root inode. 912 * Save the pointer to it in the mount structure. 913 */ 914 error = xfs_iget(mp, NULL, sbp->sb_rootino, XFS_IGET_UNTRUSTED, 915 XFS_ILOCK_EXCL, &rip); 916 if (error) { 917 xfs_warn(mp, 918 "Failed to read root inode 0x%llx, error %d", 919 sbp->sb_rootino, -error); 920 goto out_log_dealloc; 921 } 922 923 ASSERT(rip != NULL); 924 925 if (XFS_IS_CORRUPT(mp, !S_ISDIR(VFS_I(rip)->i_mode))) { 926 xfs_warn(mp, "corrupted root inode %llu: not a directory", 927 (unsigned long long)rip->i_ino); 928 xfs_iunlock(rip, XFS_ILOCK_EXCL); 929 error = -EFSCORRUPTED; 930 goto out_rele_rip; 931 } 932 mp->m_rootip = rip; /* save it */ 933 934 xfs_iunlock(rip, XFS_ILOCK_EXCL); 935 936 /* 937 * Initialize realtime inode pointers in the mount structure 938 */ 939 error = xfs_rtmount_inodes(mp); 940 if (error) { 941 /* 942 * Free up the root inode. 943 */ 944 xfs_warn(mp, "failed to read RT inodes"); 945 goto out_rele_rip; 946 } 947 948 /* 949 * If this is a read-only mount defer the superblock updates until 950 * the next remount into writeable mode. Otherwise we would never 951 * perform the update e.g. for the root filesystem. 952 */ 953 if (mp->m_update_sb && !(mp->m_flags & XFS_MOUNT_RDONLY)) { 954 error = xfs_sync_sb(mp, false); 955 if (error) { 956 xfs_warn(mp, "failed to write sb changes"); 957 goto out_rtunmount; 958 } 959 } 960 961 /* 962 * Initialise the XFS quota management subsystem for this mount 963 */ 964 if (XFS_IS_QUOTA_RUNNING(mp)) { 965 error = xfs_qm_newmount(mp, "amount, "aflags); 966 if (error) 967 goto out_rtunmount; 968 } else { 969 ASSERT(!XFS_IS_QUOTA_ON(mp)); 970 971 /* 972 * If a file system had quotas running earlier, but decided to 973 * mount without -o uquota/pquota/gquota options, revoke the 974 * quotachecked license. 975 */ 976 if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) { 977 xfs_notice(mp, "resetting quota flags"); 978 error = xfs_mount_reset_sbqflags(mp); 979 if (error) 980 goto out_rtunmount; 981 } 982 } 983 984 /* 985 * Finish recovering the file system. This part needed to be delayed 986 * until after the root and real-time bitmap inodes were consistently 987 * read in. 988 */ 989 error = xfs_log_mount_finish(mp); 990 if (error) { 991 xfs_warn(mp, "log mount finish failed"); 992 goto out_rtunmount; 993 } 994 995 /* 996 * Now the log is fully replayed, we can transition to full read-only 997 * mode for read-only mounts. This will sync all the metadata and clean 998 * the log so that the recovery we just performed does not have to be 999 * replayed again on the next mount. 1000 * 1001 * We use the same quiesce mechanism as the rw->ro remount, as they are 1002 * semantically identical operations. 1003 */ 1004 if ((mp->m_flags & (XFS_MOUNT_RDONLY|XFS_MOUNT_NORECOVERY)) == 1005 XFS_MOUNT_RDONLY) { 1006 xfs_log_clean(mp); 1007 } 1008 1009 /* 1010 * Complete the quota initialisation, post-log-replay component. 1011 */ 1012 if (quotamount) { 1013 ASSERT(mp->m_qflags == 0); 1014 mp->m_qflags = quotaflags; 1015 1016 xfs_qm_mount_quotas(mp); 1017 } 1018 1019 /* 1020 * Now we are mounted, reserve a small amount of unused space for 1021 * privileged transactions. This is needed so that transaction 1022 * space required for critical operations can dip into this pool 1023 * when at ENOSPC. This is needed for operations like create with 1024 * attr, unwritten extent conversion at ENOSPC, etc. Data allocations 1025 * are not allowed to use this reserved space. 1026 * 1027 * This may drive us straight to ENOSPC on mount, but that implies 1028 * we were already there on the last unmount. Warn if this occurs. 1029 */ 1030 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { 1031 resblks = xfs_default_resblks(mp); 1032 error = xfs_reserve_blocks(mp, &resblks, NULL); 1033 if (error) 1034 xfs_warn(mp, 1035 "Unable to allocate reserve blocks. Continuing without reserve pool."); 1036 1037 /* Recover any CoW blocks that never got remapped. */ 1038 error = xfs_reflink_recover_cow(mp); 1039 if (error) { 1040 xfs_err(mp, 1041 "Error %d recovering leftover CoW allocations.", error); 1042 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1043 goto out_quota; 1044 } 1045 1046 /* Reserve AG blocks for future btree expansion. */ 1047 error = xfs_fs_reserve_ag_blocks(mp); 1048 if (error && error != -ENOSPC) 1049 goto out_agresv; 1050 } 1051 1052 return 0; 1053 1054 out_agresv: 1055 xfs_fs_unreserve_ag_blocks(mp); 1056 out_quota: 1057 xfs_qm_unmount_quotas(mp); 1058 out_rtunmount: 1059 xfs_rtunmount_inodes(mp); 1060 out_rele_rip: 1061 xfs_irele(rip); 1062 /* Clean out dquots that might be in memory after quotacheck. */ 1063 xfs_qm_unmount(mp); 1064 /* 1065 * Flush all inode reclamation work and flush the log. 1066 * We have to do this /after/ rtunmount and qm_unmount because those 1067 * two will have scheduled delayed reclaim for the rt/quota inodes. 1068 * 1069 * This is slightly different from the unmountfs call sequence 1070 * because we could be tearing down a partially set up mount. In 1071 * particular, if log_mount_finish fails we bail out without calling 1072 * qm_unmount_quotas and therefore rely on qm_unmount to release the 1073 * quota inodes. 1074 */ 1075 xfs_unmount_flush_inodes(mp); 1076 out_log_dealloc: 1077 xfs_log_mount_cancel(mp); 1078 out_fail_wait: 1079 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) 1080 xfs_buftarg_drain(mp->m_logdev_targp); 1081 xfs_buftarg_drain(mp->m_ddev_targp); 1082 out_free_perag: 1083 xfs_free_perag(mp); 1084 out_free_dir: 1085 xfs_da_unmount(mp); 1086 out_remove_uuid: 1087 xfs_uuid_unmount(mp); 1088 out_remove_errortag: 1089 xfs_errortag_del(mp); 1090 out_remove_error_sysfs: 1091 xfs_error_sysfs_del(mp); 1092 out_del_stats: 1093 xfs_sysfs_del(&mp->m_stats.xs_kobj); 1094 out_remove_sysfs: 1095 xfs_sysfs_del(&mp->m_kobj); 1096 out: 1097 return error; 1098 } 1099 1100 /* 1101 * This flushes out the inodes,dquots and the superblock, unmounts the 1102 * log and makes sure that incore structures are freed. 1103 */ 1104 void 1105 xfs_unmountfs( 1106 struct xfs_mount *mp) 1107 { 1108 uint64_t resblks; 1109 int error; 1110 1111 xfs_blockgc_stop(mp); 1112 xfs_fs_unreserve_ag_blocks(mp); 1113 xfs_qm_unmount_quotas(mp); 1114 xfs_rtunmount_inodes(mp); 1115 xfs_irele(mp->m_rootip); 1116 1117 xfs_unmount_flush_inodes(mp); 1118 1119 xfs_qm_unmount(mp); 1120 1121 /* 1122 * Unreserve any blocks we have so that when we unmount we don't account 1123 * the reserved free space as used. This is really only necessary for 1124 * lazy superblock counting because it trusts the incore superblock 1125 * counters to be absolutely correct on clean unmount. 1126 * 1127 * We don't bother correcting this elsewhere for lazy superblock 1128 * counting because on mount of an unclean filesystem we reconstruct the 1129 * correct counter value and this is irrelevant. 1130 * 1131 * For non-lazy counter filesystems, this doesn't matter at all because 1132 * we only every apply deltas to the superblock and hence the incore 1133 * value does not matter.... 1134 */ 1135 resblks = 0; 1136 error = xfs_reserve_blocks(mp, &resblks, NULL); 1137 if (error) 1138 xfs_warn(mp, "Unable to free reserved block pool. " 1139 "Freespace may not be correct on next mount."); 1140 1141 xfs_log_unmount(mp); 1142 xfs_da_unmount(mp); 1143 xfs_uuid_unmount(mp); 1144 1145 #if defined(DEBUG) 1146 xfs_errortag_clearall(mp); 1147 #endif 1148 xfs_free_perag(mp); 1149 1150 xfs_errortag_del(mp); 1151 xfs_error_sysfs_del(mp); 1152 xfs_sysfs_del(&mp->m_stats.xs_kobj); 1153 xfs_sysfs_del(&mp->m_kobj); 1154 } 1155 1156 /* 1157 * Determine whether modifications can proceed. The caller specifies the minimum 1158 * freeze level for which modifications should not be allowed. This allows 1159 * certain operations to proceed while the freeze sequence is in progress, if 1160 * necessary. 1161 */ 1162 bool 1163 xfs_fs_writable( 1164 struct xfs_mount *mp, 1165 int level) 1166 { 1167 ASSERT(level > SB_UNFROZEN); 1168 if ((mp->m_super->s_writers.frozen >= level) || 1169 XFS_FORCED_SHUTDOWN(mp) || (mp->m_flags & XFS_MOUNT_RDONLY)) 1170 return false; 1171 1172 return true; 1173 } 1174 1175 /* 1176 * Deltas for the block count can vary from 1 to very large, but lock contention 1177 * only occurs on frequent small block count updates such as in the delayed 1178 * allocation path for buffered writes (page a time updates). Hence we set 1179 * a large batch count (1024) to minimise global counter updates except when 1180 * we get near to ENOSPC and we have to be very accurate with our updates. 1181 */ 1182 #define XFS_FDBLOCKS_BATCH 1024 1183 int 1184 xfs_mod_fdblocks( 1185 struct xfs_mount *mp, 1186 int64_t delta, 1187 bool rsvd) 1188 { 1189 int64_t lcounter; 1190 long long res_used; 1191 s32 batch; 1192 uint64_t set_aside; 1193 1194 if (delta > 0) { 1195 /* 1196 * If the reserve pool is depleted, put blocks back into it 1197 * first. Most of the time the pool is full. 1198 */ 1199 if (likely(mp->m_resblks == mp->m_resblks_avail)) { 1200 percpu_counter_add(&mp->m_fdblocks, delta); 1201 return 0; 1202 } 1203 1204 spin_lock(&mp->m_sb_lock); 1205 res_used = (long long)(mp->m_resblks - mp->m_resblks_avail); 1206 1207 if (res_used > delta) { 1208 mp->m_resblks_avail += delta; 1209 } else { 1210 delta -= res_used; 1211 mp->m_resblks_avail = mp->m_resblks; 1212 percpu_counter_add(&mp->m_fdblocks, delta); 1213 } 1214 spin_unlock(&mp->m_sb_lock); 1215 return 0; 1216 } 1217 1218 /* 1219 * Taking blocks away, need to be more accurate the closer we 1220 * are to zero. 1221 * 1222 * If the counter has a value of less than 2 * max batch size, 1223 * then make everything serialise as we are real close to 1224 * ENOSPC. 1225 */ 1226 if (__percpu_counter_compare(&mp->m_fdblocks, 2 * XFS_FDBLOCKS_BATCH, 1227 XFS_FDBLOCKS_BATCH) < 0) 1228 batch = 1; 1229 else 1230 batch = XFS_FDBLOCKS_BATCH; 1231 1232 /* 1233 * Set aside allocbt blocks because these blocks are tracked as free 1234 * space but not available for allocation. Technically this means that a 1235 * single reservation cannot consume all remaining free space, but the 1236 * ratio of allocbt blocks to usable free blocks should be rather small. 1237 * The tradeoff without this is that filesystems that maintain high 1238 * perag block reservations can over reserve physical block availability 1239 * and fail physical allocation, which leads to much more serious 1240 * problems (i.e. transaction abort, pagecache discards, etc.) than 1241 * slightly premature -ENOSPC. 1242 */ 1243 set_aside = mp->m_alloc_set_aside + atomic64_read(&mp->m_allocbt_blks); 1244 percpu_counter_add_batch(&mp->m_fdblocks, delta, batch); 1245 if (__percpu_counter_compare(&mp->m_fdblocks, set_aside, 1246 XFS_FDBLOCKS_BATCH) >= 0) { 1247 /* we had space! */ 1248 return 0; 1249 } 1250 1251 /* 1252 * lock up the sb for dipping into reserves before releasing the space 1253 * that took us to ENOSPC. 1254 */ 1255 spin_lock(&mp->m_sb_lock); 1256 percpu_counter_add(&mp->m_fdblocks, -delta); 1257 if (!rsvd) 1258 goto fdblocks_enospc; 1259 1260 lcounter = (long long)mp->m_resblks_avail + delta; 1261 if (lcounter >= 0) { 1262 mp->m_resblks_avail = lcounter; 1263 spin_unlock(&mp->m_sb_lock); 1264 return 0; 1265 } 1266 xfs_warn_once(mp, 1267 "Reserve blocks depleted! Consider increasing reserve pool size."); 1268 1269 fdblocks_enospc: 1270 spin_unlock(&mp->m_sb_lock); 1271 return -ENOSPC; 1272 } 1273 1274 int 1275 xfs_mod_frextents( 1276 struct xfs_mount *mp, 1277 int64_t delta) 1278 { 1279 int64_t lcounter; 1280 int ret = 0; 1281 1282 spin_lock(&mp->m_sb_lock); 1283 lcounter = mp->m_sb.sb_frextents + delta; 1284 if (lcounter < 0) 1285 ret = -ENOSPC; 1286 else 1287 mp->m_sb.sb_frextents = lcounter; 1288 spin_unlock(&mp->m_sb_lock); 1289 return ret; 1290 } 1291 1292 /* 1293 * Used to free the superblock along various error paths. 1294 */ 1295 void 1296 xfs_freesb( 1297 struct xfs_mount *mp) 1298 { 1299 struct xfs_buf *bp = mp->m_sb_bp; 1300 1301 xfs_buf_lock(bp); 1302 mp->m_sb_bp = NULL; 1303 xfs_buf_relse(bp); 1304 } 1305 1306 /* 1307 * If the underlying (data/log/rt) device is readonly, there are some 1308 * operations that cannot proceed. 1309 */ 1310 int 1311 xfs_dev_is_read_only( 1312 struct xfs_mount *mp, 1313 char *message) 1314 { 1315 if (xfs_readonly_buftarg(mp->m_ddev_targp) || 1316 xfs_readonly_buftarg(mp->m_logdev_targp) || 1317 (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) { 1318 xfs_notice(mp, "%s required on read-only device.", message); 1319 xfs_notice(mp, "write access unavailable, cannot proceed."); 1320 return -EROFS; 1321 } 1322 return 0; 1323 } 1324 1325 /* Force the summary counters to be recalculated at next mount. */ 1326 void 1327 xfs_force_summary_recalc( 1328 struct xfs_mount *mp) 1329 { 1330 if (!xfs_sb_version_haslazysbcount(&mp->m_sb)) 1331 return; 1332 1333 xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS); 1334 } 1335 1336 /* 1337 * Update the in-core delayed block counter. 1338 * 1339 * We prefer to update the counter without having to take a spinlock for every 1340 * counter update (i.e. batching). Each change to delayed allocation 1341 * reservations can change can easily exceed the default percpu counter 1342 * batching, so we use a larger batch factor here. 1343 * 1344 * Note that we don't currently have any callers requiring fast summation 1345 * (e.g. percpu_counter_read) so we can use a big batch value here. 1346 */ 1347 #define XFS_DELALLOC_BATCH (4096) 1348 void 1349 xfs_mod_delalloc( 1350 struct xfs_mount *mp, 1351 int64_t delta) 1352 { 1353 percpu_counter_add_batch(&mp->m_delalloc_blks, delta, 1354 XFS_DELALLOC_BATCH); 1355 } 1356