1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs_platform.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_bit.h" 13 #include "xfs_sb.h" 14 #include "xfs_mount.h" 15 #include "xfs_inode.h" 16 #include "xfs_dir2.h" 17 #include "xfs_ialloc.h" 18 #include "xfs_alloc.h" 19 #include "xfs_rtalloc.h" 20 #include "xfs_bmap.h" 21 #include "xfs_trans.h" 22 #include "xfs_trans_priv.h" 23 #include "xfs_log.h" 24 #include "xfs_log_priv.h" 25 #include "xfs_error.h" 26 #include "xfs_quota.h" 27 #include "xfs_fsops.h" 28 #include "xfs_icache.h" 29 #include "xfs_sysfs.h" 30 #include "xfs_rmap_btree.h" 31 #include "xfs_refcount_btree.h" 32 #include "xfs_reflink.h" 33 #include "xfs_extent_busy.h" 34 #include "xfs_health.h" 35 #include "xfs_trace.h" 36 #include "xfs_ag.h" 37 #include "xfs_rtbitmap.h" 38 #include "xfs_metafile.h" 39 #include "xfs_rtgroup.h" 40 #include "xfs_rtrmap_btree.h" 41 #include "xfs_rtrefcount_btree.h" 42 #include "scrub/stats.h" 43 #include "xfs_zone_alloc.h" 44 #include "xfs_healthmon.h" 45 46 static DEFINE_MUTEX(xfs_uuid_table_mutex); 47 static int xfs_uuid_table_size; 48 static uuid_t *xfs_uuid_table; 49 50 void 51 xfs_uuid_table_free(void) 52 { 53 if (xfs_uuid_table_size == 0) 54 return; 55 kfree(xfs_uuid_table); 56 xfs_uuid_table = NULL; 57 xfs_uuid_table_size = 0; 58 } 59 60 /* 61 * See if the UUID is unique among mounted XFS filesystems. 62 * Mount fails if UUID is nil or a FS with the same UUID is already mounted. 63 */ 64 STATIC int 65 xfs_uuid_mount( 66 struct xfs_mount *mp) 67 { 68 uuid_t *uuid = &mp->m_sb.sb_uuid; 69 int hole, i; 70 71 /* Publish UUID in struct super_block */ 72 super_set_uuid(mp->m_super, uuid->b, sizeof(*uuid)); 73 74 if (xfs_has_nouuid(mp)) 75 return 0; 76 77 if (uuid_is_null(uuid)) { 78 xfs_warn(mp, "Filesystem has null UUID - can't mount"); 79 return -EINVAL; 80 } 81 82 mutex_lock(&xfs_uuid_table_mutex); 83 for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) { 84 if (uuid_is_null(&xfs_uuid_table[i])) { 85 hole = i; 86 continue; 87 } 88 if (uuid_equal(uuid, &xfs_uuid_table[i])) 89 goto out_duplicate; 90 } 91 92 if (hole < 0) { 93 xfs_uuid_table = krealloc(xfs_uuid_table, 94 (xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table), 95 GFP_KERNEL | __GFP_NOFAIL); 96 hole = xfs_uuid_table_size++; 97 } 98 xfs_uuid_table[hole] = *uuid; 99 mutex_unlock(&xfs_uuid_table_mutex); 100 101 return 0; 102 103 out_duplicate: 104 mutex_unlock(&xfs_uuid_table_mutex); 105 xfs_warn(mp, "Filesystem has duplicate UUID %pU - can't mount", uuid); 106 return -EINVAL; 107 } 108 109 STATIC void 110 xfs_uuid_unmount( 111 struct xfs_mount *mp) 112 { 113 uuid_t *uuid = &mp->m_sb.sb_uuid; 114 int i; 115 116 if (xfs_has_nouuid(mp)) 117 return; 118 119 mutex_lock(&xfs_uuid_table_mutex); 120 for (i = 0; i < xfs_uuid_table_size; i++) { 121 if (uuid_is_null(&xfs_uuid_table[i])) 122 continue; 123 if (!uuid_equal(uuid, &xfs_uuid_table[i])) 124 continue; 125 memset(&xfs_uuid_table[i], 0, sizeof(uuid_t)); 126 break; 127 } 128 ASSERT(i < xfs_uuid_table_size); 129 mutex_unlock(&xfs_uuid_table_mutex); 130 } 131 132 /* 133 * Check size of device based on the (data/realtime) block count. 134 * Note: this check is used by the growfs code as well as mount. 135 */ 136 int 137 xfs_sb_validate_fsb_count( 138 xfs_sb_t *sbp, 139 uint64_t nblocks) 140 { 141 uint64_t max_bytes; 142 143 ASSERT(sbp->sb_blocklog >= BBSHIFT); 144 145 if (check_shl_overflow(nblocks, sbp->sb_blocklog, &max_bytes)) 146 return -EFBIG; 147 148 /* Limited by ULONG_MAX of page cache index */ 149 if (max_bytes >> PAGE_SHIFT > ULONG_MAX) 150 return -EFBIG; 151 return 0; 152 } 153 154 /* 155 * xfs_readsb 156 * 157 * Does the initial read of the superblock. 158 */ 159 int 160 xfs_readsb( 161 struct xfs_mount *mp, 162 int flags) 163 { 164 unsigned int sector_size; 165 struct xfs_buf *bp; 166 struct xfs_sb *sbp = &mp->m_sb; 167 int error; 168 int loud = !(flags & XFS_MFSI_QUIET); 169 const struct xfs_buf_ops *buf_ops; 170 171 ASSERT(mp->m_sb_bp == NULL); 172 ASSERT(mp->m_ddev_targp != NULL); 173 174 /* 175 * In the first pass, use the device sector size to just read enough 176 * of the superblock to extract the XFS sector size. 177 * 178 * The device sector size must be smaller than or equal to the XFS 179 * sector size and thus we can always read the superblock. Once we know 180 * the XFS sector size, re-read it and run the buffer verifier. 181 */ 182 sector_size = mp->m_ddev_targp->bt_logical_sectorsize; 183 buf_ops = NULL; 184 185 reread: 186 error = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR, 187 BTOBB(sector_size), &bp, buf_ops); 188 if (error) { 189 if (loud) 190 xfs_warn(mp, "SB validate failed with error %d.", error); 191 /* bad CRC means corrupted metadata */ 192 if (error == -EFSBADCRC) 193 error = -EFSCORRUPTED; 194 return error; 195 } 196 197 /* 198 * Initialize the mount structure from the superblock. 199 */ 200 xfs_sb_from_disk(sbp, bp->b_addr); 201 202 /* 203 * If we haven't validated the superblock, do so now before we try 204 * to check the sector size and reread the superblock appropriately. 205 */ 206 if (sbp->sb_magicnum != XFS_SB_MAGIC) { 207 if (loud) 208 xfs_warn(mp, "Invalid superblock magic number"); 209 error = -EINVAL; 210 goto release_buf; 211 } 212 213 /* 214 * We must be able to do sector-sized and sector-aligned IO. 215 */ 216 if (sector_size > sbp->sb_sectsize) { 217 if (loud) 218 xfs_warn(mp, "device supports %u byte sectors (not %u)", 219 sector_size, sbp->sb_sectsize); 220 error = -ENOSYS; 221 goto release_buf; 222 } 223 224 if (buf_ops == NULL) { 225 /* 226 * Re-read the superblock so the buffer is correctly sized, 227 * and properly verified. 228 */ 229 xfs_buf_relse(bp); 230 sector_size = sbp->sb_sectsize; 231 buf_ops = loud ? &xfs_sb_buf_ops : &xfs_sb_quiet_buf_ops; 232 goto reread; 233 } 234 235 mp->m_features |= xfs_sb_version_to_features(sbp); 236 xfs_reinit_percpu_counters(mp); 237 238 /* 239 * If logged xattrs are enabled after log recovery finishes, then set 240 * the opstate so that log recovery will work properly. 241 */ 242 if (xfs_sb_version_haslogxattrs(&mp->m_sb)) 243 xfs_set_using_logged_xattrs(mp); 244 245 /* no need to be quiet anymore, so reset the buf ops */ 246 bp->b_ops = &xfs_sb_buf_ops; 247 248 /* 249 * Keep a pointer of the sb buffer around instead of caching it in the 250 * buffer cache because we access it frequently. 251 */ 252 mp->m_sb_bp = bp; 253 xfs_buf_unlock(bp); 254 return 0; 255 256 release_buf: 257 xfs_buf_relse(bp); 258 return error; 259 } 260 261 /* 262 * If the sunit/swidth change would move the precomputed root inode value, we 263 * must reject the ondisk change because repair will stumble over that. 264 * However, we allow the mount to proceed because we never rejected this 265 * combination before. Returns true to update the sb, false otherwise. 266 */ 267 static inline int 268 xfs_check_new_dalign( 269 struct xfs_mount *mp, 270 int new_dalign, 271 bool *update_sb) 272 { 273 struct xfs_sb *sbp = &mp->m_sb; 274 xfs_ino_t calc_ino; 275 276 calc_ino = xfs_ialloc_calc_rootino(mp, new_dalign); 277 trace_xfs_check_new_dalign(mp, new_dalign, calc_ino); 278 279 if (sbp->sb_rootino == calc_ino) { 280 *update_sb = true; 281 return 0; 282 } 283 284 xfs_warn(mp, 285 "Cannot change stripe alignment; would require moving root inode."); 286 287 /* 288 * XXX: Next time we add a new incompat feature, this should start 289 * returning -EINVAL to fail the mount. Until then, spit out a warning 290 * that we're ignoring the administrator's instructions. 291 */ 292 xfs_warn(mp, "Skipping superblock stripe alignment update."); 293 *update_sb = false; 294 return 0; 295 } 296 297 /* 298 * If we were provided with new sunit/swidth values as mount options, make sure 299 * that they pass basic alignment and superblock feature checks, and convert 300 * them into the same units (FSB) that everything else expects. This step 301 * /must/ be done before computing the inode geometry. 302 */ 303 STATIC int 304 xfs_validate_new_dalign( 305 struct xfs_mount *mp) 306 { 307 if (mp->m_dalign == 0) 308 return 0; 309 310 /* 311 * If stripe unit and stripe width are not multiples 312 * of the fs blocksize turn off alignment. 313 */ 314 if ((BBTOB(mp->m_dalign) & mp->m_blockmask) || 315 (BBTOB(mp->m_swidth) & mp->m_blockmask)) { 316 xfs_warn(mp, 317 "alignment check failed: sunit/swidth vs. blocksize(%d)", 318 mp->m_sb.sb_blocksize); 319 return -EINVAL; 320 } 321 322 /* 323 * Convert the stripe unit and width to FSBs. 324 */ 325 mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign); 326 if (mp->m_dalign && (mp->m_sb.sb_agblocks % mp->m_dalign)) { 327 xfs_warn(mp, 328 "alignment check failed: sunit/swidth vs. agsize(%d)", 329 mp->m_sb.sb_agblocks); 330 return -EINVAL; 331 } 332 333 if (!mp->m_dalign) { 334 xfs_warn(mp, 335 "alignment check failed: sunit(%d) less than bsize(%d)", 336 mp->m_dalign, mp->m_sb.sb_blocksize); 337 return -EINVAL; 338 } 339 340 mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth); 341 342 if (!xfs_has_dalign(mp)) { 343 xfs_warn(mp, 344 "cannot change alignment: superblock does not support data alignment"); 345 return -EINVAL; 346 } 347 348 return 0; 349 } 350 351 /* Update alignment values based on mount options and sb values. */ 352 STATIC int 353 xfs_update_alignment( 354 struct xfs_mount *mp) 355 { 356 struct xfs_sb *sbp = &mp->m_sb; 357 358 if (mp->m_dalign) { 359 bool update_sb; 360 int error; 361 362 if (sbp->sb_unit == mp->m_dalign && 363 sbp->sb_width == mp->m_swidth) 364 return 0; 365 366 error = xfs_check_new_dalign(mp, mp->m_dalign, &update_sb); 367 if (error || !update_sb) 368 return error; 369 370 sbp->sb_unit = mp->m_dalign; 371 sbp->sb_width = mp->m_swidth; 372 mp->m_update_sb = true; 373 } else if (!xfs_has_noalign(mp) && xfs_has_dalign(mp)) { 374 mp->m_dalign = sbp->sb_unit; 375 mp->m_swidth = sbp->sb_width; 376 } 377 378 return 0; 379 } 380 381 /* 382 * precalculate the low space thresholds for dynamic speculative preallocation. 383 */ 384 void 385 xfs_set_low_space_thresholds( 386 struct xfs_mount *mp) 387 { 388 uint64_t dblocks = mp->m_sb.sb_dblocks; 389 uint64_t rtexts = mp->m_sb.sb_rextents; 390 int i; 391 392 do_div(dblocks, 100); 393 do_div(rtexts, 100); 394 395 for (i = 0; i < XFS_LOWSP_MAX; i++) { 396 mp->m_low_space[i] = dblocks * (i + 1); 397 mp->m_low_rtexts[i] = rtexts * (i + 1); 398 } 399 } 400 401 /* 402 * Check that the data (and log if separate) is an ok size. 403 */ 404 STATIC int 405 xfs_check_sizes( 406 struct xfs_mount *mp) 407 { 408 struct xfs_buf *bp; 409 xfs_daddr_t d; 410 int error; 411 412 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); 413 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) { 414 xfs_warn(mp, "filesystem size mismatch detected"); 415 return -EFBIG; 416 } 417 error = xfs_buf_read_uncached(mp->m_ddev_targp, 418 d - XFS_FSS_TO_BB(mp, 1), 419 XFS_FSS_TO_BB(mp, 1), &bp, NULL); 420 if (error) { 421 xfs_warn(mp, "last sector read failed"); 422 return error; 423 } 424 xfs_buf_relse(bp); 425 426 if (mp->m_logdev_targp == mp->m_ddev_targp) 427 return 0; 428 429 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); 430 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { 431 xfs_warn(mp, "log size mismatch detected"); 432 return -EFBIG; 433 } 434 error = xfs_buf_read_uncached(mp->m_logdev_targp, 435 d - XFS_FSB_TO_BB(mp, 1), 436 XFS_FSB_TO_BB(mp, 1), &bp, NULL); 437 if (error) { 438 xfs_warn(mp, "log device read failed"); 439 return error; 440 } 441 xfs_buf_relse(bp); 442 return 0; 443 } 444 445 /* 446 * Clear the quotaflags in memory and in the superblock. 447 */ 448 int 449 xfs_mount_reset_sbqflags( 450 struct xfs_mount *mp) 451 { 452 mp->m_qflags = 0; 453 454 /* It is OK to look at sb_qflags in the mount path without m_sb_lock. */ 455 if (mp->m_sb.sb_qflags == 0) 456 return 0; 457 spin_lock(&mp->m_sb_lock); 458 mp->m_sb.sb_qflags = 0; 459 spin_unlock(&mp->m_sb_lock); 460 461 if (!xfs_fs_writable(mp, SB_FREEZE_WRITE)) 462 return 0; 463 464 return xfs_sync_sb(mp, false); 465 } 466 467 static const char *const xfs_free_pool_name[] = { 468 [XC_FREE_BLOCKS] = "free blocks", 469 [XC_FREE_RTEXTENTS] = "free rt extents", 470 [XC_FREE_RTAVAILABLE] = "available rt extents", 471 }; 472 473 uint64_t 474 xfs_default_resblks( 475 struct xfs_mount *mp, 476 enum xfs_free_counter ctr) 477 { 478 switch (ctr) { 479 case XC_FREE_BLOCKS: 480 /* 481 * Default to 5% or 8192 FSBs of space reserved, whichever is 482 * smaller. 483 * 484 * This is intended to cover concurrent allocation transactions 485 * when we initially hit ENOSPC. These each require a 4 block 486 * reservation. Hence by default we cover roughly 2000 487 * concurrent allocation reservations. 488 */ 489 return min(div_u64(mp->m_sb.sb_dblocks, 20), 8192ULL); 490 case XC_FREE_RTEXTENTS: 491 case XC_FREE_RTAVAILABLE: 492 if (IS_ENABLED(CONFIG_XFS_RT) && xfs_has_zoned(mp)) 493 return xfs_zoned_default_resblks(mp, ctr); 494 return 0; 495 default: 496 ASSERT(0); 497 return 0; 498 } 499 } 500 501 /* Ensure the summary counts are correct. */ 502 STATIC int 503 xfs_check_summary_counts( 504 struct xfs_mount *mp) 505 { 506 int error = 0; 507 508 /* 509 * The AG0 superblock verifier rejects in-progress filesystems, 510 * so we should never see the flag set this far into mounting. 511 */ 512 if (mp->m_sb.sb_inprogress) { 513 xfs_err(mp, "sb_inprogress set after log recovery??"); 514 WARN_ON(1); 515 return -EFSCORRUPTED; 516 } 517 518 /* 519 * Now the log is mounted, we know if it was an unclean shutdown or 520 * not. If it was, with the first phase of recovery has completed, we 521 * have consistent AG blocks on disk. We have not recovered EFIs yet, 522 * but they are recovered transactionally in the second recovery phase 523 * later. 524 * 525 * If the log was clean when we mounted, we can check the summary 526 * counters. If any of them are obviously incorrect, we can recompute 527 * them from the AGF headers in the next step. 528 */ 529 if (xfs_is_clean(mp) && 530 (mp->m_sb.sb_fdblocks > mp->m_sb.sb_dblocks || 531 !xfs_verify_icount(mp, mp->m_sb.sb_icount) || 532 mp->m_sb.sb_ifree > mp->m_sb.sb_icount)) 533 xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS); 534 535 /* 536 * We can safely re-initialise incore superblock counters from the 537 * per-ag data. These may not be correct if the filesystem was not 538 * cleanly unmounted, so we waited for recovery to finish before doing 539 * this. 540 * 541 * If the filesystem was cleanly unmounted or the previous check did 542 * not flag anything weird, then we can trust the values in the 543 * superblock to be correct and we don't need to do anything here. 544 * Otherwise, recalculate the summary counters. 545 */ 546 if ((xfs_has_lazysbcount(mp) && !xfs_is_clean(mp)) || 547 xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS)) { 548 error = xfs_initialize_perag_data(mp, mp->m_sb.sb_agcount); 549 if (error) 550 return error; 551 } 552 553 /* 554 * Older kernels misused sb_frextents to reflect both incore 555 * reservations made by running transactions and the actual count of 556 * free rt extents in the ondisk metadata. Transactions committed 557 * during runtime can therefore contain a superblock update that 558 * undercounts the number of free rt extents tracked in the rt bitmap. 559 * A clean unmount record will have the correct frextents value since 560 * there can be no other transactions running at that point. 561 * 562 * If we're mounting the rt volume after recovering the log, recompute 563 * frextents from the rtbitmap file to fix the inconsistency. 564 */ 565 if (xfs_has_realtime(mp) && !xfs_has_zoned(mp) && !xfs_is_clean(mp)) { 566 error = xfs_rtalloc_reinit_frextents(mp); 567 if (error) 568 return error; 569 } 570 571 return 0; 572 } 573 574 static void 575 xfs_unmount_check( 576 struct xfs_mount *mp) 577 { 578 if (xfs_is_shutdown(mp)) 579 return; 580 581 if (percpu_counter_sum(&mp->m_ifree) > 582 percpu_counter_sum(&mp->m_icount)) { 583 xfs_alert(mp, "ifree/icount mismatch at unmount"); 584 xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS); 585 } 586 } 587 588 /* 589 * Flush and reclaim dirty inodes in preparation for unmount. Inodes and 590 * internal inode structures can be sitting in the CIL and AIL at this point, 591 * so we need to unpin them, write them back and/or reclaim them before unmount 592 * can proceed. In other words, callers are required to have inactivated all 593 * inodes. 594 * 595 * An inode cluster that has been freed can have its buffer still pinned in 596 * memory because the transaction is still sitting in a iclog. The stale inodes 597 * on that buffer will be pinned to the buffer until the transaction hits the 598 * disk and the callbacks run. Pushing the AIL will skip the stale inodes and 599 * may never see the pinned buffer, so nothing will push out the iclog and 600 * unpin the buffer. 601 * 602 * Hence we need to force the log to unpin everything first. However, log 603 * forces don't wait for the discards they issue to complete, so we have to 604 * explicitly wait for them to complete here as well. 605 * 606 * Then we can tell the world we are unmounting so that error handling knows 607 * that the filesystem is going away and we should error out anything that we 608 * have been retrying in the background. This will prevent never-ending 609 * retries in AIL pushing from hanging the unmount. 610 * 611 * Finally, we can push the AIL to clean all the remaining dirty objects, then 612 * reclaim the remaining inodes that are still in memory at this point in time. 613 */ 614 static void 615 xfs_unmount_flush_inodes( 616 struct xfs_mount *mp) 617 { 618 xfs_log_force(mp, XFS_LOG_SYNC); 619 xfs_extent_busy_wait_all(mp); 620 flush_workqueue(xfs_discard_wq); 621 622 xfs_set_unmounting(mp); 623 624 xfs_ail_push_all_sync(mp->m_ail); 625 xfs_inodegc_stop(mp); 626 cancel_delayed_work_sync(&mp->m_reclaim_work); 627 xfs_reclaim_inodes(mp); 628 xfs_health_unmount(mp); 629 xfs_healthmon_unmount(mp); 630 } 631 632 static void 633 xfs_mount_setup_inode_geom( 634 struct xfs_mount *mp) 635 { 636 struct xfs_ino_geometry *igeo = M_IGEO(mp); 637 638 igeo->attr_fork_offset = xfs_bmap_compute_attr_offset(mp); 639 ASSERT(igeo->attr_fork_offset < XFS_LITINO(mp)); 640 641 xfs_ialloc_setup_geometry(mp); 642 } 643 644 /* Mount the metadata directory tree root. */ 645 STATIC int 646 xfs_mount_setup_metadir( 647 struct xfs_mount *mp) 648 { 649 int error; 650 651 /* Load the metadata directory root inode into memory. */ 652 error = xfs_metafile_iget(mp, mp->m_sb.sb_metadirino, XFS_METAFILE_DIR, 653 &mp->m_metadirip); 654 if (error) 655 xfs_warn(mp, "Failed to load metadir root directory, error %d", 656 error); 657 return error; 658 } 659 660 /* Compute maximum possible height for per-AG btree types for this fs. */ 661 static inline void 662 xfs_agbtree_compute_maxlevels( 663 struct xfs_mount *mp) 664 { 665 unsigned int levels; 666 667 levels = max(mp->m_alloc_maxlevels, M_IGEO(mp)->inobt_maxlevels); 668 levels = max(levels, mp->m_rmap_maxlevels); 669 mp->m_agbtree_maxlevels = max(levels, mp->m_refc_maxlevels); 670 } 671 672 /* Maximum atomic write IO size that the kernel allows. */ 673 static inline xfs_extlen_t xfs_calc_atomic_write_max(struct xfs_mount *mp) 674 { 675 return rounddown_pow_of_two(XFS_B_TO_FSB(mp, MAX_RW_COUNT)); 676 } 677 678 /* 679 * If the underlying device advertises atomic write support, limit the size of 680 * atomic writes to the greatest power-of-two factor of the group size so 681 * that every atomic write unit aligns with the start of every group. This is 682 * required so that the allocations for an atomic write will always be 683 * aligned compatibly with the alignment requirements of the storage. 684 * 685 * If the device doesn't advertise atomic writes, then there are no alignment 686 * restrictions and the largest out-of-place write we can do ourselves is the 687 * number of blocks that user files can allocate from any group. 688 */ 689 static xfs_extlen_t 690 xfs_calc_group_awu_max( 691 struct xfs_mount *mp, 692 enum xfs_group_type type) 693 { 694 struct xfs_groups *g = &mp->m_groups[type]; 695 struct xfs_buftarg *btp = xfs_group_type_buftarg(mp, type); 696 697 if (g->blocks == 0) 698 return 0; 699 if (btp && btp->bt_awu_min > 0) 700 return max_pow_of_two_factor(g->blocks); 701 return rounddown_pow_of_two(g->blocks); 702 } 703 704 /* Compute the maximum atomic write unit size for each section. */ 705 static inline void 706 xfs_calc_atomic_write_unit_max( 707 struct xfs_mount *mp, 708 enum xfs_group_type type) 709 { 710 struct xfs_groups *g = &mp->m_groups[type]; 711 712 const xfs_extlen_t max_write = xfs_calc_atomic_write_max(mp); 713 const xfs_extlen_t max_ioend = xfs_reflink_max_atomic_cow(mp); 714 const xfs_extlen_t max_gsize = xfs_calc_group_awu_max(mp, type); 715 716 g->awu_max = min3(max_write, max_ioend, max_gsize); 717 trace_xfs_calc_atomic_write_unit_max(mp, type, max_write, max_ioend, 718 max_gsize, g->awu_max); 719 } 720 721 /* 722 * Try to set the atomic write maximum to a new value that we got from 723 * userspace via mount option. 724 */ 725 int 726 xfs_set_max_atomic_write_opt( 727 struct xfs_mount *mp, 728 unsigned long long new_max_bytes) 729 { 730 const xfs_filblks_t new_max_fsbs = XFS_B_TO_FSBT(mp, new_max_bytes); 731 const xfs_extlen_t max_write = xfs_calc_atomic_write_max(mp); 732 const xfs_extlen_t max_group = 733 max(mp->m_groups[XG_TYPE_AG].blocks, 734 mp->m_groups[XG_TYPE_RTG].blocks); 735 const xfs_extlen_t max_group_write = 736 max(xfs_calc_group_awu_max(mp, XG_TYPE_AG), 737 xfs_calc_group_awu_max(mp, XG_TYPE_RTG)); 738 int error; 739 740 if (new_max_bytes == 0) 741 goto set_limit; 742 743 ASSERT(max_write <= U32_MAX); 744 745 /* generic_atomic_write_valid enforces power of two length */ 746 if (!is_power_of_2(new_max_bytes)) { 747 xfs_warn(mp, 748 "max atomic write size of %llu bytes is not a power of 2", 749 new_max_bytes); 750 return -EINVAL; 751 } 752 753 if (new_max_bytes & mp->m_blockmask) { 754 xfs_warn(mp, 755 "max atomic write size of %llu bytes not aligned with fsblock", 756 new_max_bytes); 757 return -EINVAL; 758 } 759 760 if (new_max_fsbs > max_write) { 761 xfs_warn(mp, 762 "max atomic write size of %lluk cannot be larger than max write size %lluk", 763 new_max_bytes >> 10, 764 XFS_FSB_TO_B(mp, max_write) >> 10); 765 return -EINVAL; 766 } 767 768 if (new_max_fsbs > max_group) { 769 xfs_warn(mp, 770 "max atomic write size of %lluk cannot be larger than allocation group size %lluk", 771 new_max_bytes >> 10, 772 XFS_FSB_TO_B(mp, max_group) >> 10); 773 return -EINVAL; 774 } 775 776 if (new_max_fsbs > max_group_write) { 777 xfs_warn(mp, 778 "max atomic write size of %lluk cannot be larger than max allocation group write size %lluk", 779 new_max_bytes >> 10, 780 XFS_FSB_TO_B(mp, max_group_write) >> 10); 781 return -EINVAL; 782 } 783 784 if (xfs_has_reflink(mp)) 785 goto set_limit; 786 787 if (new_max_fsbs == 1) { 788 if (mp->m_ddev_targp->bt_awu_max || 789 (mp->m_rtdev_targp && mp->m_rtdev_targp->bt_awu_max)) { 790 } else { 791 xfs_warn(mp, 792 "cannot support atomic writes of size %lluk with no reflink or HW support", 793 new_max_bytes >> 10); 794 return -EINVAL; 795 } 796 } else { 797 xfs_warn(mp, 798 "cannot support atomic writes of size %lluk with no reflink support", 799 new_max_bytes >> 10); 800 return -EINVAL; 801 } 802 803 set_limit: 804 error = xfs_calc_atomic_write_reservation(mp, new_max_fsbs); 805 if (error) { 806 xfs_warn(mp, 807 "cannot support completing atomic writes of %lluk", 808 new_max_bytes >> 10); 809 return error; 810 } 811 812 xfs_calc_atomic_write_unit_max(mp, XG_TYPE_AG); 813 xfs_calc_atomic_write_unit_max(mp, XG_TYPE_RTG); 814 mp->m_awu_max_bytes = new_max_bytes; 815 return 0; 816 } 817 818 /* Compute maximum possible height for realtime btree types for this fs. */ 819 static inline void 820 xfs_rtbtree_compute_maxlevels( 821 struct xfs_mount *mp) 822 { 823 mp->m_rtbtree_maxlevels = max(mp->m_rtrmap_maxlevels, 824 mp->m_rtrefc_maxlevels); 825 } 826 827 /* 828 * This function does the following on an initial mount of a file system: 829 * - reads the superblock from disk and init the mount struct 830 * - if we're a 32-bit kernel, do a size check on the superblock 831 * so we don't mount terabyte filesystems 832 * - init mount struct realtime fields 833 * - allocate inode hash table for fs 834 * - init directory manager 835 * - perform recovery and init the log manager 836 */ 837 int 838 xfs_mountfs( 839 struct xfs_mount *mp) 840 { 841 struct xfs_sb *sbp = &(mp->m_sb); 842 struct xfs_inode *rip; 843 struct xfs_ino_geometry *igeo = M_IGEO(mp); 844 uint quotamount = 0; 845 uint quotaflags = 0; 846 int error = 0; 847 int i; 848 849 xfs_sb_mount_common(mp, sbp); 850 851 /* 852 * Check for a mismatched features2 values. Older kernels read & wrote 853 * into the wrong sb offset for sb_features2 on some platforms due to 854 * xfs_sb_t not being 64bit size aligned when sb_features2 was added, 855 * which made older superblock reading/writing routines swap it as a 856 * 64-bit value. 857 * 858 * For backwards compatibility, we make both slots equal. 859 * 860 * If we detect a mismatched field, we OR the set bits into the existing 861 * features2 field in case it has already been modified; we don't want 862 * to lose any features. We then update the bad location with the ORed 863 * value so that older kernels will see any features2 flags. The 864 * superblock writeback code ensures the new sb_features2 is copied to 865 * sb_bad_features2 before it is logged or written to disk. 866 */ 867 if (xfs_sb_has_mismatched_features2(sbp)) { 868 xfs_warn(mp, "correcting sb_features alignment problem"); 869 sbp->sb_features2 |= sbp->sb_bad_features2; 870 mp->m_update_sb = true; 871 } 872 873 874 /* always use v2 inodes by default now */ 875 if (!(mp->m_sb.sb_versionnum & XFS_SB_VERSION_NLINKBIT)) { 876 mp->m_sb.sb_versionnum |= XFS_SB_VERSION_NLINKBIT; 877 mp->m_features |= XFS_FEAT_NLINK; 878 mp->m_update_sb = true; 879 } 880 881 /* 882 * If we were given new sunit/swidth options, do some basic validation 883 * checks and convert the incore dalign and swidth values to the 884 * same units (FSB) that everything else uses. This /must/ happen 885 * before computing the inode geometry. 886 */ 887 error = xfs_validate_new_dalign(mp); 888 if (error) 889 goto out; 890 891 xfs_alloc_compute_maxlevels(mp); 892 xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK); 893 xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK); 894 xfs_mount_setup_inode_geom(mp); 895 xfs_rmapbt_compute_maxlevels(mp); 896 xfs_rtrmapbt_compute_maxlevels(mp); 897 xfs_refcountbt_compute_maxlevels(mp); 898 xfs_rtrefcountbt_compute_maxlevels(mp); 899 900 xfs_agbtree_compute_maxlevels(mp); 901 xfs_rtbtree_compute_maxlevels(mp); 902 903 /* 904 * Check if sb_agblocks is aligned at stripe boundary. If sb_agblocks 905 * is NOT aligned turn off m_dalign since allocator alignment is within 906 * an ag, therefore ag has to be aligned at stripe boundary. Note that 907 * we must compute the free space and rmap btree geometry before doing 908 * this. 909 */ 910 error = xfs_update_alignment(mp); 911 if (error) 912 goto out; 913 914 /* enable fail_at_unmount as default */ 915 mp->m_fail_unmount = true; 916 917 error = xfs_mount_sysfs_init(mp); 918 if (error) 919 goto out_remove_scrub_stats; 920 921 xchk_stats_register(mp->m_scrub_stats, mp->m_debugfs); 922 923 error = xfs_errortag_init(mp); 924 if (error) 925 goto out_remove_sysfs; 926 927 error = xfs_uuid_mount(mp); 928 if (error) 929 goto out_remove_errortag; 930 931 /* 932 * Update the preferred write size based on the information from the 933 * on-disk superblock. 934 */ 935 mp->m_allocsize_log = 936 max_t(uint32_t, sbp->sb_blocklog, mp->m_allocsize_log); 937 mp->m_allocsize_blocks = 1U << (mp->m_allocsize_log - sbp->sb_blocklog); 938 939 /* set the low space thresholds for dynamic preallocation */ 940 xfs_set_low_space_thresholds(mp); 941 942 /* 943 * If enabled, sparse inode chunk alignment is expected to match the 944 * cluster size. Full inode chunk alignment must match the chunk size, 945 * but that is checked on sb read verification... 946 */ 947 if (xfs_has_sparseinodes(mp) && 948 mp->m_sb.sb_spino_align != 949 XFS_B_TO_FSBT(mp, igeo->inode_cluster_size_raw)) { 950 xfs_warn(mp, 951 "Sparse inode block alignment (%u) must match cluster size (%llu).", 952 mp->m_sb.sb_spino_align, 953 XFS_B_TO_FSBT(mp, igeo->inode_cluster_size_raw)); 954 error = -EINVAL; 955 goto out_remove_uuid; 956 } 957 958 /* 959 * Check that the data (and log if separate) is an ok size. 960 */ 961 error = xfs_check_sizes(mp); 962 if (error) 963 goto out_remove_uuid; 964 965 /* 966 * Initialize realtime fields in the mount structure 967 */ 968 error = xfs_rtmount_init(mp); 969 if (error) { 970 xfs_warn(mp, "RT mount failed"); 971 goto out_remove_uuid; 972 } 973 974 /* 975 * Copies the low order bits of the timestamp and the randomly 976 * set "sequence" number out of a UUID. 977 */ 978 mp->m_fixedfsid[0] = 979 (get_unaligned_be16(&sbp->sb_uuid.b[8]) << 16) | 980 get_unaligned_be16(&sbp->sb_uuid.b[4]); 981 mp->m_fixedfsid[1] = get_unaligned_be32(&sbp->sb_uuid.b[0]); 982 983 error = xfs_da_mount(mp); 984 if (error) { 985 xfs_warn(mp, "Failed dir/attr init: %d", error); 986 goto out_remove_uuid; 987 } 988 989 /* 990 * Initialize the precomputed transaction reservations values. 991 */ 992 xfs_trans_init(mp); 993 994 /* 995 * Allocate and initialize the per-ag data. 996 */ 997 error = xfs_initialize_perag(mp, 0, sbp->sb_agcount, 998 mp->m_sb.sb_dblocks, &mp->m_maxagi); 999 if (error) { 1000 xfs_warn(mp, "Failed per-ag init: %d", error); 1001 goto out_free_dir; 1002 } 1003 1004 error = xfs_initialize_rtgroups(mp, 0, sbp->sb_rgcount, 1005 mp->m_sb.sb_rextents); 1006 if (error) { 1007 xfs_warn(mp, "Failed rtgroup init: %d", error); 1008 goto out_free_perag; 1009 } 1010 1011 if (XFS_IS_CORRUPT(mp, !sbp->sb_logblocks)) { 1012 xfs_warn(mp, "no log defined"); 1013 error = -EFSCORRUPTED; 1014 goto out_free_rtgroup; 1015 } 1016 1017 error = xfs_inodegc_register_shrinker(mp); 1018 if (error) 1019 goto out_fail_wait; 1020 1021 /* 1022 * If we're resuming quota status, pick up the preliminary qflags from 1023 * the ondisk superblock so that we know if we should recover dquots. 1024 */ 1025 if (xfs_is_resuming_quotaon(mp)) 1026 xfs_qm_resume_quotaon(mp); 1027 1028 /* 1029 * Log's mount-time initialization. The first part of recovery can place 1030 * some items on the AIL, to be handled when recovery is finished or 1031 * cancelled. 1032 */ 1033 error = xfs_log_mount(mp, mp->m_logdev_targp, 1034 XFS_FSB_TO_DADDR(mp, sbp->sb_logstart), 1035 XFS_FSB_TO_BB(mp, sbp->sb_logblocks)); 1036 if (error) { 1037 xfs_warn(mp, "log mount failed"); 1038 goto out_inodegc_shrinker; 1039 } 1040 1041 /* 1042 * If we're resuming quota status and recovered the log, re-sample the 1043 * qflags from the ondisk superblock now that we've recovered it, just 1044 * in case someone shut down enforcement just before a crash. 1045 */ 1046 if (xfs_clear_resuming_quotaon(mp) && xlog_recovery_needed(mp->m_log)) 1047 xfs_qm_resume_quotaon(mp); 1048 1049 /* 1050 * If logged xattrs are still enabled after log recovery finishes, then 1051 * they'll be available until unmount. Otherwise, turn them off. 1052 */ 1053 if (xfs_sb_version_haslogxattrs(&mp->m_sb)) 1054 xfs_set_using_logged_xattrs(mp); 1055 else 1056 xfs_clear_using_logged_xattrs(mp); 1057 1058 /* Enable background inode inactivation workers. */ 1059 xfs_inodegc_start(mp); 1060 xfs_blockgc_start(mp); 1061 1062 if (xfs_has_metadir(mp)) { 1063 error = xfs_mount_setup_metadir(mp); 1064 if (error) 1065 goto out_free_metadir; 1066 } 1067 1068 /* 1069 * Get and sanity-check the root inode. 1070 * Save the pointer to it in the mount structure. 1071 */ 1072 error = xfs_iget(mp, NULL, sbp->sb_rootino, XFS_IGET_UNTRUSTED, 1073 XFS_ILOCK_EXCL, &rip); 1074 if (error) { 1075 xfs_warn(mp, 1076 "Failed to read root inode 0x%llx, error %d", 1077 sbp->sb_rootino, -error); 1078 goto out_free_metadir; 1079 } 1080 1081 ASSERT(rip != NULL); 1082 1083 if (XFS_IS_CORRUPT(mp, !S_ISDIR(VFS_I(rip)->i_mode))) { 1084 xfs_warn(mp, "corrupted root inode %llu: not a directory", 1085 (unsigned long long)rip->i_ino); 1086 xfs_iunlock(rip, XFS_ILOCK_EXCL); 1087 error = -EFSCORRUPTED; 1088 goto out_rele_rip; 1089 } 1090 mp->m_rootip = rip; /* save it */ 1091 1092 xfs_iunlock(rip, XFS_ILOCK_EXCL); 1093 1094 /* 1095 * Initialize realtime inode pointers in the mount structure 1096 */ 1097 error = xfs_rtmount_inodes(mp); 1098 if (error) { 1099 /* 1100 * Free up the root inode. 1101 */ 1102 xfs_warn(mp, "failed to read RT inodes"); 1103 goto out_rele_rip; 1104 } 1105 1106 /* Make sure the summary counts are ok. */ 1107 error = xfs_check_summary_counts(mp); 1108 if (error) 1109 goto out_rtunmount; 1110 1111 /* 1112 * If this is a read-only mount defer the superblock updates until 1113 * the next remount into writeable mode. Otherwise we would never 1114 * perform the update e.g. for the root filesystem. 1115 */ 1116 if (mp->m_update_sb && !xfs_is_readonly(mp)) { 1117 error = xfs_sync_sb(mp, false); 1118 if (error) { 1119 xfs_warn(mp, "failed to write sb changes"); 1120 goto out_rtunmount; 1121 } 1122 } 1123 1124 /* 1125 * Initialise the XFS quota management subsystem for this mount 1126 */ 1127 if (XFS_IS_QUOTA_ON(mp)) { 1128 error = xfs_qm_newmount(mp, "amount, "aflags); 1129 if (error) 1130 goto out_rtunmount; 1131 } else { 1132 /* 1133 * If a file system had quotas running earlier, but decided to 1134 * mount without -o uquota/pquota/gquota options, revoke the 1135 * quotachecked license. 1136 */ 1137 if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) { 1138 xfs_notice(mp, "resetting quota flags"); 1139 error = xfs_mount_reset_sbqflags(mp); 1140 if (error) 1141 goto out_rtunmount; 1142 } 1143 } 1144 1145 /* 1146 * Finish recovering the file system. This part needed to be delayed 1147 * until after the root and real-time bitmap inodes were consistently 1148 * read in. Temporarily create per-AG space reservations for metadata 1149 * btree shape changes because space freeing transactions (for inode 1150 * inactivation) require the per-AG reservation in lieu of reserving 1151 * blocks. 1152 */ 1153 error = xfs_fs_reserve_ag_blocks(mp); 1154 if (error && error == -ENOSPC) 1155 xfs_warn(mp, 1156 "ENOSPC reserving per-AG metadata pool, log recovery may fail."); 1157 error = xfs_log_mount_finish(mp); 1158 xfs_fs_unreserve_ag_blocks(mp); 1159 if (error) { 1160 xfs_warn(mp, "log mount finish failed"); 1161 goto out_rtunmount; 1162 } 1163 1164 /* 1165 * Now the log is fully replayed, we can transition to full read-only 1166 * mode for read-only mounts. This will sync all the metadata and clean 1167 * the log so that the recovery we just performed does not have to be 1168 * replayed again on the next mount. 1169 * 1170 * We use the same quiesce mechanism as the rw->ro remount, as they are 1171 * semantically identical operations. 1172 */ 1173 if (xfs_is_readonly(mp) && !xfs_has_norecovery(mp)) 1174 xfs_log_clean(mp); 1175 1176 if (xfs_has_zoned(mp)) { 1177 error = xfs_mount_zones(mp); 1178 if (error) 1179 goto out_rtunmount; 1180 } 1181 1182 /* 1183 * Complete the quota initialisation, post-log-replay component. 1184 */ 1185 if (quotamount) { 1186 ASSERT(mp->m_qflags == 0); 1187 mp->m_qflags = quotaflags; 1188 1189 xfs_qm_mount_quotas(mp); 1190 } 1191 1192 /* 1193 * Now we are mounted, reserve a small amount of unused space for 1194 * privileged transactions. This is needed so that transaction 1195 * space required for critical operations can dip into this pool 1196 * when at ENOSPC. This is needed for operations like create with 1197 * attr, unwritten extent conversion at ENOSPC, garbage collection 1198 * etc. Data allocations are not allowed to use this reserved space. 1199 * 1200 * This may drive us straight to ENOSPC on mount, but that implies 1201 * we were already there on the last unmount. Warn if this occurs. 1202 */ 1203 if (!xfs_is_readonly(mp)) { 1204 for (i = 0; i < XC_FREE_NR; i++) { 1205 error = xfs_reserve_blocks(mp, i, 1206 xfs_default_resblks(mp, i)); 1207 if (error) 1208 xfs_warn(mp, 1209 "Unable to allocate reserve blocks. Continuing without reserve pool for %s.", 1210 xfs_free_pool_name[i]); 1211 } 1212 1213 /* Reserve AG blocks for future btree expansion. */ 1214 error = xfs_fs_reserve_ag_blocks(mp); 1215 if (error && error != -ENOSPC) 1216 goto out_agresv; 1217 1218 xfs_zone_gc_start(mp); 1219 } 1220 1221 /* 1222 * Pre-calculate atomic write unit max. This involves computations 1223 * derived from transaction reservations, so we must do this after the 1224 * log is fully initialized. 1225 */ 1226 error = xfs_set_max_atomic_write_opt(mp, mp->m_awu_max_bytes); 1227 if (error) 1228 goto out_agresv; 1229 1230 return 0; 1231 1232 out_agresv: 1233 xfs_fs_unreserve_ag_blocks(mp); 1234 xfs_qm_unmount_quotas(mp); 1235 if (xfs_has_zoned(mp)) 1236 xfs_unmount_zones(mp); 1237 out_rtunmount: 1238 xfs_rtunmount_inodes(mp); 1239 out_rele_rip: 1240 xfs_irele(rip); 1241 /* Clean out dquots that might be in memory after quotacheck. */ 1242 xfs_qm_unmount(mp); 1243 out_free_metadir: 1244 if (mp->m_metadirip) 1245 xfs_irele(mp->m_metadirip); 1246 1247 /* 1248 * Inactivate all inodes that might still be in memory after a log 1249 * intent recovery failure so that reclaim can free them. Metadata 1250 * inodes and the root directory shouldn't need inactivation, but the 1251 * mount failed for some reason, so pull down all the state and flee. 1252 */ 1253 xfs_inodegc_flush(mp); 1254 1255 /* 1256 * Flush all inode reclamation work and flush the log. 1257 * We have to do this /after/ rtunmount and qm_unmount because those 1258 * two will have scheduled delayed reclaim for the rt/quota inodes. 1259 * 1260 * This is slightly different from the unmountfs call sequence 1261 * because we could be tearing down a partially set up mount. In 1262 * particular, if log_mount_finish fails we bail out without calling 1263 * qm_unmount_quotas and therefore rely on qm_unmount to release the 1264 * quota inodes. 1265 */ 1266 xfs_unmount_flush_inodes(mp); 1267 xfs_log_mount_cancel(mp); 1268 out_inodegc_shrinker: 1269 shrinker_free(mp->m_inodegc_shrinker); 1270 out_fail_wait: 1271 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) 1272 xfs_buftarg_drain(mp->m_logdev_targp); 1273 xfs_buftarg_drain(mp->m_ddev_targp); 1274 out_free_rtgroup: 1275 xfs_free_rtgroups(mp, 0, mp->m_sb.sb_rgcount); 1276 out_free_perag: 1277 xfs_free_perag_range(mp, 0, mp->m_sb.sb_agcount); 1278 out_free_dir: 1279 xfs_da_unmount(mp); 1280 out_remove_uuid: 1281 xfs_uuid_unmount(mp); 1282 out_remove_errortag: 1283 xfs_errortag_del(mp); 1284 out_remove_sysfs: 1285 xfs_mount_sysfs_del(mp); 1286 out_remove_scrub_stats: 1287 xchk_stats_unregister(mp->m_scrub_stats); 1288 out: 1289 return error; 1290 } 1291 1292 /* 1293 * This flushes out the inodes,dquots and the superblock, unmounts the 1294 * log and makes sure that incore structures are freed. 1295 */ 1296 void 1297 xfs_unmountfs( 1298 struct xfs_mount *mp) 1299 { 1300 int error; 1301 1302 /* 1303 * Perform all on-disk metadata updates required to inactivate inodes 1304 * that the VFS evicted earlier in the unmount process. Freeing inodes 1305 * and discarding CoW fork preallocations can cause shape changes to 1306 * the free inode and refcount btrees, respectively, so we must finish 1307 * this before we discard the metadata space reservations. Metadata 1308 * inodes and the root directory do not require inactivation. 1309 */ 1310 xfs_inodegc_flush(mp); 1311 1312 xfs_blockgc_stop(mp); 1313 if (!test_bit(XFS_OPSTATE_READONLY, &mp->m_opstate)) 1314 xfs_zone_gc_stop(mp); 1315 xfs_fs_unreserve_ag_blocks(mp); 1316 xfs_qm_unmount_quotas(mp); 1317 if (xfs_has_zoned(mp)) 1318 xfs_unmount_zones(mp); 1319 xfs_rtunmount_inodes(mp); 1320 xfs_irele(mp->m_rootip); 1321 if (mp->m_metadirip) 1322 xfs_irele(mp->m_metadirip); 1323 1324 xfs_unmount_flush_inodes(mp); 1325 1326 xfs_qm_unmount(mp); 1327 1328 /* 1329 * Unreserve any blocks we have so that when we unmount we don't account 1330 * the reserved free space as used. This is really only necessary for 1331 * lazy superblock counting because it trusts the incore superblock 1332 * counters to be absolutely correct on clean unmount. 1333 * 1334 * We don't bother correcting this elsewhere for lazy superblock 1335 * counting because on mount of an unclean filesystem we reconstruct the 1336 * correct counter value and this is irrelevant. 1337 * 1338 * For non-lazy counter filesystems, this doesn't matter at all because 1339 * we only every apply deltas to the superblock and hence the incore 1340 * value does not matter.... 1341 */ 1342 error = xfs_reserve_blocks(mp, XC_FREE_BLOCKS, 0); 1343 if (error) 1344 xfs_warn(mp, "Unable to free reserved block pool. " 1345 "Freespace may not be correct on next mount."); 1346 xfs_unmount_check(mp); 1347 1348 /* 1349 * Indicate that it's ok to clear log incompat bits before cleaning 1350 * the log and writing the unmount record. 1351 */ 1352 xfs_set_done_with_log_incompat(mp); 1353 xfs_log_unmount(mp); 1354 xfs_da_unmount(mp); 1355 xfs_uuid_unmount(mp); 1356 1357 #if defined(DEBUG) 1358 xfs_errortag_clearall(mp); 1359 #endif 1360 shrinker_free(mp->m_inodegc_shrinker); 1361 xfs_free_rtgroups(mp, 0, mp->m_sb.sb_rgcount); 1362 xfs_free_perag_range(mp, 0, mp->m_sb.sb_agcount); 1363 xfs_errortag_del(mp); 1364 xchk_stats_unregister(mp->m_scrub_stats); 1365 xfs_mount_sysfs_del(mp); 1366 } 1367 1368 /* 1369 * Determine whether modifications can proceed. The caller specifies the minimum 1370 * freeze level for which modifications should not be allowed. This allows 1371 * certain operations to proceed while the freeze sequence is in progress, if 1372 * necessary. 1373 */ 1374 bool 1375 xfs_fs_writable( 1376 struct xfs_mount *mp, 1377 int level) 1378 { 1379 ASSERT(level > SB_UNFROZEN); 1380 if ((mp->m_super->s_writers.frozen >= level) || 1381 xfs_is_shutdown(mp) || xfs_is_readonly(mp)) 1382 return false; 1383 1384 return true; 1385 } 1386 1387 /* 1388 * Estimate the amount of free space that is not available to userspace and is 1389 * not explicitly reserved from the incore fdblocks. This includes: 1390 * 1391 * - The minimum number of blocks needed to support splitting a bmap btree 1392 * - The blocks currently in use by the freespace btrees because they record 1393 * the actual blocks that will fill per-AG metadata space reservations 1394 */ 1395 uint64_t 1396 xfs_freecounter_unavailable( 1397 struct xfs_mount *mp, 1398 enum xfs_free_counter ctr) 1399 { 1400 if (ctr != XC_FREE_BLOCKS) 1401 return 0; 1402 return mp->m_alloc_set_aside + atomic64_read(&mp->m_allocbt_blks); 1403 } 1404 1405 void 1406 xfs_add_freecounter( 1407 struct xfs_mount *mp, 1408 enum xfs_free_counter ctr, 1409 uint64_t delta) 1410 { 1411 struct xfs_freecounter *counter = &mp->m_free[ctr]; 1412 uint64_t res_used; 1413 1414 /* 1415 * If the reserve pool is depleted, put blocks back into it first. 1416 * Most of the time the pool is full. 1417 */ 1418 if (likely(counter->res_avail == counter->res_total)) { 1419 percpu_counter_add(&counter->count, delta); 1420 return; 1421 } 1422 1423 spin_lock(&mp->m_sb_lock); 1424 res_used = counter->res_total - counter->res_avail; 1425 if (res_used > delta) { 1426 counter->res_avail += delta; 1427 } else { 1428 delta -= res_used; 1429 counter->res_avail = counter->res_total; 1430 percpu_counter_add(&counter->count, delta); 1431 } 1432 spin_unlock(&mp->m_sb_lock); 1433 } 1434 1435 1436 /* Adjust in-core free blocks or RT extents. */ 1437 int 1438 xfs_dec_freecounter( 1439 struct xfs_mount *mp, 1440 enum xfs_free_counter ctr, 1441 uint64_t delta, 1442 bool rsvd) 1443 { 1444 struct xfs_freecounter *counter = &mp->m_free[ctr]; 1445 s32 batch; 1446 1447 ASSERT(ctr < XC_FREE_NR); 1448 1449 /* 1450 * Taking blocks away, need to be more accurate the closer we 1451 * are to zero. 1452 * 1453 * If the counter has a value of less than 2 * max batch size, 1454 * then make everything serialise as we are real close to 1455 * ENOSPC. 1456 */ 1457 if (__percpu_counter_compare(&counter->count, 2 * XFS_FDBLOCKS_BATCH, 1458 XFS_FDBLOCKS_BATCH) < 0) 1459 batch = 1; 1460 else 1461 batch = XFS_FDBLOCKS_BATCH; 1462 1463 /* 1464 * Set aside allocbt blocks because these blocks are tracked as free 1465 * space but not available for allocation. Technically this means that a 1466 * single reservation cannot consume all remaining free space, but the 1467 * ratio of allocbt blocks to usable free blocks should be rather small. 1468 * The tradeoff without this is that filesystems that maintain high 1469 * perag block reservations can over reserve physical block availability 1470 * and fail physical allocation, which leads to much more serious 1471 * problems (i.e. transaction abort, pagecache discards, etc.) than 1472 * slightly premature -ENOSPC. 1473 */ 1474 percpu_counter_add_batch(&counter->count, -((int64_t)delta), batch); 1475 if (__percpu_counter_compare(&counter->count, 1476 xfs_freecounter_unavailable(mp, ctr), 1477 XFS_FDBLOCKS_BATCH) < 0) { 1478 /* 1479 * Lock up the sb for dipping into reserves before releasing the 1480 * space that took us to ENOSPC. 1481 */ 1482 spin_lock(&mp->m_sb_lock); 1483 percpu_counter_add(&counter->count, delta); 1484 if (!rsvd) 1485 goto fdblocks_enospc; 1486 if (delta > counter->res_avail) { 1487 if (ctr == XC_FREE_BLOCKS) 1488 xfs_warn_once(mp, 1489 "Reserve blocks depleted! Consider increasing reserve pool size."); 1490 goto fdblocks_enospc; 1491 } 1492 counter->res_avail -= delta; 1493 trace_xfs_freecounter_reserved(mp, ctr, delta, _RET_IP_); 1494 spin_unlock(&mp->m_sb_lock); 1495 } 1496 1497 /* we had space! */ 1498 return 0; 1499 1500 fdblocks_enospc: 1501 trace_xfs_freecounter_enospc(mp, ctr, delta, _RET_IP_); 1502 spin_unlock(&mp->m_sb_lock); 1503 return -ENOSPC; 1504 } 1505 1506 /* 1507 * Used to free the superblock along various error paths. 1508 */ 1509 void 1510 xfs_freesb( 1511 struct xfs_mount *mp) 1512 { 1513 struct xfs_buf *bp = mp->m_sb_bp; 1514 1515 xfs_buf_lock(bp); 1516 mp->m_sb_bp = NULL; 1517 xfs_buf_relse(bp); 1518 } 1519 1520 /* 1521 * If the underlying (data/log/rt) device is readonly, there are some 1522 * operations that cannot proceed. 1523 */ 1524 int 1525 xfs_dev_is_read_only( 1526 struct xfs_mount *mp, 1527 char *message) 1528 { 1529 if (xfs_readonly_buftarg(mp->m_ddev_targp) || 1530 xfs_readonly_buftarg(mp->m_logdev_targp) || 1531 (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) { 1532 xfs_notice(mp, "%s required on read-only device.", message); 1533 xfs_notice(mp, "write access unavailable, cannot proceed."); 1534 return -EROFS; 1535 } 1536 return 0; 1537 } 1538 1539 /* Force the summary counters to be recalculated at next mount. */ 1540 void 1541 xfs_force_summary_recalc( 1542 struct xfs_mount *mp) 1543 { 1544 if (!xfs_has_lazysbcount(mp)) 1545 return; 1546 1547 xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS); 1548 } 1549 1550 /* 1551 * Enable a log incompat feature flag in the primary superblock. The caller 1552 * cannot have any other transactions in progress. 1553 */ 1554 int 1555 xfs_add_incompat_log_feature( 1556 struct xfs_mount *mp, 1557 uint32_t feature) 1558 { 1559 struct xfs_dsb *dsb; 1560 int error; 1561 1562 ASSERT(hweight32(feature) == 1); 1563 ASSERT(!(feature & XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)); 1564 1565 /* 1566 * Force the log to disk and kick the background AIL thread to reduce 1567 * the chances that the bwrite will stall waiting for the AIL to unpin 1568 * the primary superblock buffer. This isn't a data integrity 1569 * operation, so we don't need a synchronous push. 1570 */ 1571 error = xfs_log_force(mp, XFS_LOG_SYNC); 1572 if (error) 1573 return error; 1574 xfs_ail_push_all(mp->m_ail); 1575 1576 /* 1577 * Lock the primary superblock buffer to serialize all callers that 1578 * are trying to set feature bits. 1579 */ 1580 xfs_buf_lock(mp->m_sb_bp); 1581 xfs_buf_hold(mp->m_sb_bp); 1582 1583 if (xfs_is_shutdown(mp)) { 1584 error = -EIO; 1585 goto rele; 1586 } 1587 1588 if (xfs_sb_has_incompat_log_feature(&mp->m_sb, feature)) 1589 goto rele; 1590 1591 /* 1592 * Write the primary superblock to disk immediately, because we need 1593 * the log_incompat bit to be set in the primary super now to protect 1594 * the log items that we're going to commit later. 1595 */ 1596 dsb = mp->m_sb_bp->b_addr; 1597 xfs_sb_to_disk(dsb, &mp->m_sb); 1598 dsb->sb_features_log_incompat |= cpu_to_be32(feature); 1599 error = xfs_bwrite(mp->m_sb_bp); 1600 if (error) 1601 goto shutdown; 1602 1603 /* 1604 * Add the feature bits to the incore superblock before we unlock the 1605 * buffer. 1606 */ 1607 xfs_sb_add_incompat_log_features(&mp->m_sb, feature); 1608 xfs_buf_relse(mp->m_sb_bp); 1609 1610 /* Log the superblock to disk. */ 1611 return xfs_sync_sb(mp, false); 1612 shutdown: 1613 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); 1614 rele: 1615 xfs_buf_relse(mp->m_sb_bp); 1616 return error; 1617 } 1618 1619 /* 1620 * Clear all the log incompat flags from the superblock. 1621 * 1622 * The caller cannot be in a transaction, must ensure that the log does not 1623 * contain any log items protected by any log incompat bit, and must ensure 1624 * that there are no other threads that depend on the state of the log incompat 1625 * feature flags in the primary super. 1626 * 1627 * Returns true if the superblock is dirty. 1628 */ 1629 bool 1630 xfs_clear_incompat_log_features( 1631 struct xfs_mount *mp) 1632 { 1633 bool ret = false; 1634 1635 if (!xfs_has_crc(mp) || 1636 !xfs_sb_has_incompat_log_feature(&mp->m_sb, 1637 XFS_SB_FEAT_INCOMPAT_LOG_ALL) || 1638 xfs_is_shutdown(mp) || 1639 !xfs_is_done_with_log_incompat(mp)) 1640 return false; 1641 1642 /* 1643 * Update the incore superblock. We synchronize on the primary super 1644 * buffer lock to be consistent with the add function, though at least 1645 * in theory this shouldn't be necessary. 1646 */ 1647 xfs_buf_lock(mp->m_sb_bp); 1648 xfs_buf_hold(mp->m_sb_bp); 1649 1650 if (xfs_sb_has_incompat_log_feature(&mp->m_sb, 1651 XFS_SB_FEAT_INCOMPAT_LOG_ALL)) { 1652 xfs_sb_remove_incompat_log_features(&mp->m_sb); 1653 ret = true; 1654 } 1655 1656 xfs_buf_relse(mp->m_sb_bp); 1657 return ret; 1658 } 1659 1660 /* 1661 * Update the in-core delayed block counter. 1662 * 1663 * We prefer to update the counter without having to take a spinlock for every 1664 * counter update (i.e. batching). Each change to delayed allocation 1665 * reservations can change can easily exceed the default percpu counter 1666 * batching, so we use a larger batch factor here. 1667 * 1668 * Note that we don't currently have any callers requiring fast summation 1669 * (e.g. percpu_counter_read) so we can use a big batch value here. 1670 */ 1671 #define XFS_DELALLOC_BATCH (4096) 1672 void 1673 xfs_mod_delalloc( 1674 struct xfs_inode *ip, 1675 int64_t data_delta, 1676 int64_t ind_delta) 1677 { 1678 struct xfs_mount *mp = ip->i_mount; 1679 1680 if (XFS_IS_REALTIME_INODE(ip)) { 1681 percpu_counter_add_batch(&mp->m_delalloc_rtextents, 1682 xfs_blen_to_rtbxlen(mp, data_delta), 1683 XFS_DELALLOC_BATCH); 1684 if (!ind_delta) 1685 return; 1686 data_delta = 0; 1687 } 1688 percpu_counter_add_batch(&mp->m_delalloc_blks, data_delta + ind_delta, 1689 XFS_DELALLOC_BATCH); 1690 } 1691