10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0 21da177e4SLinus Torvalds /* 37b718769SNathan Scott * Copyright (c) 2000-2005 Silicon Graphics, Inc. 47b718769SNathan Scott * All Rights Reserved. 51da177e4SLinus Torvalds */ 61da177e4SLinus Torvalds #include "xfs.h" 7a844f451SNathan Scott #include "xfs_fs.h" 870a9883cSDave Chinner #include "xfs_shared.h" 9239880efSDave Chinner #include "xfs_format.h" 10239880efSDave Chinner #include "xfs_log_format.h" 11239880efSDave Chinner #include "xfs_trans_resv.h" 12a844f451SNathan Scott #include "xfs_bit.h" 131da177e4SLinus Torvalds #include "xfs_sb.h" 141da177e4SLinus Torvalds #include "xfs_mount.h" 153ab78df2SDarrick J. Wong #include "xfs_defer.h" 1657062787SDave Chinner #include "xfs_da_format.h" 179a2cc41cSDave Chinner #include "xfs_da_btree.h" 181da177e4SLinus Torvalds #include "xfs_inode.h" 19a4fbe6abSDave Chinner #include "xfs_dir2.h" 20a844f451SNathan Scott #include "xfs_ialloc.h" 211da177e4SLinus Torvalds #include "xfs_alloc.h" 221da177e4SLinus Torvalds #include "xfs_rtalloc.h" 231da177e4SLinus Torvalds #include "xfs_bmap.h" 24a4fbe6abSDave Chinner #include "xfs_trans.h" 25a4fbe6abSDave Chinner #include "xfs_trans_priv.h" 26a4fbe6abSDave Chinner #include "xfs_log.h" 271da177e4SLinus Torvalds #include "xfs_error.h" 281da177e4SLinus Torvalds #include "xfs_quota.h" 291da177e4SLinus Torvalds #include "xfs_fsops.h" 300b1b213fSChristoph Hellwig #include "xfs_trace.h" 316d8b79cfSDave Chinner #include "xfs_icache.h" 32a31b1d3dSBrian Foster #include "xfs_sysfs.h" 33035e00acSDarrick J. Wong #include "xfs_rmap_btree.h" 341946b91cSDarrick J. Wong #include "xfs_refcount_btree.h" 35174edb0eSDarrick J. Wong #include "xfs_reflink.h" 36ebf55872SChristoph Hellwig #include "xfs_extent_busy.h" 370b1b213fSChristoph Hellwig 381da177e4SLinus Torvalds 3927174203SChristoph Hellwig static DEFINE_MUTEX(xfs_uuid_table_mutex); 4027174203SChristoph Hellwig static int xfs_uuid_table_size; 4127174203SChristoph Hellwig static uuid_t *xfs_uuid_table; 4227174203SChristoph Hellwig 43af3b6382SDarrick J. Wong void 44af3b6382SDarrick J. Wong xfs_uuid_table_free(void) 45af3b6382SDarrick J. Wong { 46af3b6382SDarrick J. Wong if (xfs_uuid_table_size == 0) 47af3b6382SDarrick J. Wong return; 48af3b6382SDarrick J. Wong kmem_free(xfs_uuid_table); 49af3b6382SDarrick J. Wong xfs_uuid_table = NULL; 50af3b6382SDarrick J. Wong xfs_uuid_table_size = 0; 51af3b6382SDarrick J. Wong } 52af3b6382SDarrick J. Wong 5327174203SChristoph Hellwig /* 5427174203SChristoph Hellwig * See if the UUID is unique among mounted XFS filesystems. 5527174203SChristoph Hellwig * Mount fails if UUID is nil or a FS with the same UUID is already mounted. 5627174203SChristoph Hellwig */ 5727174203SChristoph Hellwig STATIC int 5827174203SChristoph Hellwig xfs_uuid_mount( 5927174203SChristoph Hellwig struct xfs_mount *mp) 6027174203SChristoph Hellwig { 6127174203SChristoph Hellwig uuid_t *uuid = &mp->m_sb.sb_uuid; 6227174203SChristoph Hellwig int hole, i; 6327174203SChristoph Hellwig 648f720d9fSAmir Goldstein /* Publish UUID in struct super_block */ 6585787090SChristoph Hellwig uuid_copy(&mp->m_super->s_uuid, uuid); 668f720d9fSAmir Goldstein 6727174203SChristoph Hellwig if (mp->m_flags & XFS_MOUNT_NOUUID) 6827174203SChristoph Hellwig return 0; 6927174203SChristoph Hellwig 70d905fdaaSAmir Goldstein if (uuid_is_null(uuid)) { 71d905fdaaSAmir Goldstein xfs_warn(mp, "Filesystem has null UUID - can't mount"); 722451337dSDave Chinner return -EINVAL; 7327174203SChristoph Hellwig } 7427174203SChristoph Hellwig 7527174203SChristoph Hellwig mutex_lock(&xfs_uuid_table_mutex); 7627174203SChristoph Hellwig for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) { 77d905fdaaSAmir Goldstein if (uuid_is_null(&xfs_uuid_table[i])) { 7827174203SChristoph Hellwig hole = i; 7927174203SChristoph Hellwig continue; 8027174203SChristoph Hellwig } 8127174203SChristoph Hellwig if (uuid_equal(uuid, &xfs_uuid_table[i])) 8227174203SChristoph Hellwig goto out_duplicate; 8327174203SChristoph Hellwig } 8427174203SChristoph Hellwig 8527174203SChristoph Hellwig if (hole < 0) { 8627174203SChristoph Hellwig xfs_uuid_table = kmem_realloc(xfs_uuid_table, 8727174203SChristoph Hellwig (xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table), 8827174203SChristoph Hellwig KM_SLEEP); 8927174203SChristoph Hellwig hole = xfs_uuid_table_size++; 9027174203SChristoph Hellwig } 9127174203SChristoph Hellwig xfs_uuid_table[hole] = *uuid; 9227174203SChristoph Hellwig mutex_unlock(&xfs_uuid_table_mutex); 9327174203SChristoph Hellwig 9427174203SChristoph Hellwig return 0; 9527174203SChristoph Hellwig 9627174203SChristoph Hellwig out_duplicate: 9727174203SChristoph Hellwig mutex_unlock(&xfs_uuid_table_mutex); 98021000e5SMitsuo Hayasaka xfs_warn(mp, "Filesystem has duplicate UUID %pU - can't mount", uuid); 992451337dSDave Chinner return -EINVAL; 10027174203SChristoph Hellwig } 10127174203SChristoph Hellwig 10227174203SChristoph Hellwig STATIC void 10327174203SChristoph Hellwig xfs_uuid_unmount( 10427174203SChristoph Hellwig struct xfs_mount *mp) 10527174203SChristoph Hellwig { 10627174203SChristoph Hellwig uuid_t *uuid = &mp->m_sb.sb_uuid; 10727174203SChristoph Hellwig int i; 10827174203SChristoph Hellwig 10927174203SChristoph Hellwig if (mp->m_flags & XFS_MOUNT_NOUUID) 11027174203SChristoph Hellwig return; 11127174203SChristoph Hellwig 11227174203SChristoph Hellwig mutex_lock(&xfs_uuid_table_mutex); 11327174203SChristoph Hellwig for (i = 0; i < xfs_uuid_table_size; i++) { 114d905fdaaSAmir Goldstein if (uuid_is_null(&xfs_uuid_table[i])) 11527174203SChristoph Hellwig continue; 11627174203SChristoph Hellwig if (!uuid_equal(uuid, &xfs_uuid_table[i])) 11727174203SChristoph Hellwig continue; 11827174203SChristoph Hellwig memset(&xfs_uuid_table[i], 0, sizeof(uuid_t)); 11927174203SChristoph Hellwig break; 12027174203SChristoph Hellwig } 12127174203SChristoph Hellwig ASSERT(i < xfs_uuid_table_size); 12227174203SChristoph Hellwig mutex_unlock(&xfs_uuid_table_mutex); 12327174203SChristoph Hellwig } 12427174203SChristoph Hellwig 12527174203SChristoph Hellwig 126e176579eSDave Chinner STATIC void 127e176579eSDave Chinner __xfs_free_perag( 128e176579eSDave Chinner struct rcu_head *head) 129e176579eSDave Chinner { 130e176579eSDave Chinner struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head); 131e176579eSDave Chinner 132e176579eSDave Chinner ASSERT(atomic_read(&pag->pag_ref) == 0); 133e176579eSDave Chinner kmem_free(pag); 134e176579eSDave Chinner } 135e176579eSDave Chinner 1360fa800fbSDave Chinner /* 137e176579eSDave Chinner * Free up the per-ag resources associated with the mount structure. 1381da177e4SLinus Torvalds */ 139c962fb79SChristoph Hellwig STATIC void 140ff4f038cSChristoph Hellwig xfs_free_perag( 141745f6919SChristoph Hellwig xfs_mount_t *mp) 1421da177e4SLinus Torvalds { 1431c1c6ebcSDave Chinner xfs_agnumber_t agno; 1441c1c6ebcSDave Chinner struct xfs_perag *pag; 1451da177e4SLinus Torvalds 1461c1c6ebcSDave Chinner for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 1471c1c6ebcSDave Chinner spin_lock(&mp->m_perag_lock); 1481c1c6ebcSDave Chinner pag = radix_tree_delete(&mp->m_perag_tree, agno); 1491c1c6ebcSDave Chinner spin_unlock(&mp->m_perag_lock); 150e176579eSDave Chinner ASSERT(pag); 151f83282a8SDave Chinner ASSERT(atomic_read(&pag->pag_ref) == 0); 1529b247179SDarrick J. Wong xfs_iunlink_destroy(pag); 1536031e73aSLucas Stach xfs_buf_hash_destroy(pag); 1541da06189SXiongwei Song mutex_destroy(&pag->pag_ici_reclaim_lock); 155e176579eSDave Chinner call_rcu(&pag->rcu_head, __xfs_free_perag); 1561da177e4SLinus Torvalds } 1571da177e4SLinus Torvalds } 1581da177e4SLinus Torvalds 1594cc929eeSNathan Scott /* 1604cc929eeSNathan Scott * Check size of device based on the (data/realtime) block count. 1614cc929eeSNathan Scott * Note: this check is used by the growfs code as well as mount. 1624cc929eeSNathan Scott */ 1634cc929eeSNathan Scott int 1644cc929eeSNathan Scott xfs_sb_validate_fsb_count( 1654cc929eeSNathan Scott xfs_sb_t *sbp, 166c8ce540dSDarrick J. Wong uint64_t nblocks) 1674cc929eeSNathan Scott { 1684cc929eeSNathan Scott ASSERT(PAGE_SHIFT >= sbp->sb_blocklog); 1694cc929eeSNathan Scott ASSERT(sbp->sb_blocklog >= BBSHIFT); 1704cc929eeSNathan Scott 171d5cf09baSChristoph Hellwig /* Limited by ULONG_MAX of page cache index */ 17209cbfeafSKirill A. Shutemov if (nblocks >> (PAGE_SHIFT - sbp->sb_blocklog) > ULONG_MAX) 1732451337dSDave Chinner return -EFBIG; 1744cc929eeSNathan Scott return 0; 1754cc929eeSNathan Scott } 1761da177e4SLinus Torvalds 1771c1c6ebcSDave Chinner int 178c11e2c36SNathan Scott xfs_initialize_perag( 179c11e2c36SNathan Scott xfs_mount_t *mp, 1801c1c6ebcSDave Chinner xfs_agnumber_t agcount, 1811c1c6ebcSDave Chinner xfs_agnumber_t *maxagi) 1821da177e4SLinus Torvalds { 1832d2194f6SCarlos Maiolino xfs_agnumber_t index; 184b20fe473SBill O'Donnell xfs_agnumber_t first_initialised = NULLAGNUMBER; 1851da177e4SLinus Torvalds xfs_perag_t *pag; 1868b26c582SDave Chinner int error = -ENOMEM; 1871da177e4SLinus Torvalds 1881c1c6ebcSDave Chinner /* 1891c1c6ebcSDave Chinner * Walk the current per-ag tree so we don't try to initialise AGs 1901c1c6ebcSDave Chinner * that already exist (growfs case). Allocate and insert all the 1911c1c6ebcSDave Chinner * AGs we don't find ready for initialisation. 1921c1c6ebcSDave Chinner */ 1931c1c6ebcSDave Chinner for (index = 0; index < agcount; index++) { 1941c1c6ebcSDave Chinner pag = xfs_perag_get(mp, index); 1951c1c6ebcSDave Chinner if (pag) { 1961c1c6ebcSDave Chinner xfs_perag_put(pag); 1971c1c6ebcSDave Chinner continue; 1981c1c6ebcSDave Chinner } 199fb3b504aSChristoph Hellwig 2001c1c6ebcSDave Chinner pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL); 2011c1c6ebcSDave Chinner if (!pag) 202b20fe473SBill O'Donnell goto out_unwind_new_pags; 203fb3b504aSChristoph Hellwig pag->pag_agno = index; 204fb3b504aSChristoph Hellwig pag->pag_mount = mp; 2051a427ab0SDave Chinner spin_lock_init(&pag->pag_ici_lock); 20669b491c2SDave Chinner mutex_init(&pag->pag_ici_reclaim_lock); 207fb3b504aSChristoph Hellwig INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); 2086031e73aSLucas Stach if (xfs_buf_hash_init(pag)) 209b20fe473SBill O'Donnell goto out_free_pag; 210ebf55872SChristoph Hellwig init_waitqueue_head(&pag->pagb_wait); 211ff23f4afSDarrick J. Wong spin_lock_init(&pag->pagb_lock); 212ff23f4afSDarrick J. Wong pag->pagb_count = 0; 213ff23f4afSDarrick J. Wong pag->pagb_tree = RB_ROOT; 214fb3b504aSChristoph Hellwig 2151c1c6ebcSDave Chinner if (radix_tree_preload(GFP_NOFS)) 216b20fe473SBill O'Donnell goto out_hash_destroy; 217fb3b504aSChristoph Hellwig 2181c1c6ebcSDave Chinner spin_lock(&mp->m_perag_lock); 2191c1c6ebcSDave Chinner if (radix_tree_insert(&mp->m_perag_tree, index, pag)) { 2201c1c6ebcSDave Chinner BUG(); 2211c1c6ebcSDave Chinner spin_unlock(&mp->m_perag_lock); 2228b26c582SDave Chinner radix_tree_preload_end(); 2238b26c582SDave Chinner error = -EEXIST; 224b20fe473SBill O'Donnell goto out_hash_destroy; 2251c1c6ebcSDave Chinner } 2261c1c6ebcSDave Chinner spin_unlock(&mp->m_perag_lock); 2271c1c6ebcSDave Chinner radix_tree_preload_end(); 228b20fe473SBill O'Donnell /* first new pag is fully initialized */ 229b20fe473SBill O'Donnell if (first_initialised == NULLAGNUMBER) 230b20fe473SBill O'Donnell first_initialised = index; 2319b247179SDarrick J. Wong error = xfs_iunlink_init(pag); 2329b247179SDarrick J. Wong if (error) 2339b247179SDarrick J. Wong goto out_hash_destroy; 234*6772c1f1SDarrick J. Wong spin_lock_init(&pag->pag_state_lock); 2351c1c6ebcSDave Chinner } 2361c1c6ebcSDave Chinner 23712c3f05cSEric Sandeen index = xfs_set_inode_alloc(mp, agcount); 238fb3b504aSChristoph Hellwig 2391c1c6ebcSDave Chinner if (maxagi) 2401c1c6ebcSDave Chinner *maxagi = index; 2418018026eSDarrick J. Wong 2428018026eSDarrick J. Wong mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp); 2431c1c6ebcSDave Chinner return 0; 2448b26c582SDave Chinner 245b20fe473SBill O'Donnell out_hash_destroy: 2466031e73aSLucas Stach xfs_buf_hash_destroy(pag); 247b20fe473SBill O'Donnell out_free_pag: 2481da06189SXiongwei Song mutex_destroy(&pag->pag_ici_reclaim_lock); 2498b26c582SDave Chinner kmem_free(pag); 250b20fe473SBill O'Donnell out_unwind_new_pags: 251b20fe473SBill O'Donnell /* unwind any prior newly initialized pags */ 252b20fe473SBill O'Donnell for (index = first_initialised; index < agcount; index++) { 2538b26c582SDave Chinner pag = radix_tree_delete(&mp->m_perag_tree, index); 254b20fe473SBill O'Donnell if (!pag) 255b20fe473SBill O'Donnell break; 2566031e73aSLucas Stach xfs_buf_hash_destroy(pag); 2579b247179SDarrick J. Wong xfs_iunlink_destroy(pag); 2581da06189SXiongwei Song mutex_destroy(&pag->pag_ici_reclaim_lock); 2598b26c582SDave Chinner kmem_free(pag); 2608b26c582SDave Chinner } 2618b26c582SDave Chinner return error; 2621da177e4SLinus Torvalds } 2631da177e4SLinus Torvalds 2641da177e4SLinus Torvalds /* 2651da177e4SLinus Torvalds * xfs_readsb 2661da177e4SLinus Torvalds * 2671da177e4SLinus Torvalds * Does the initial read of the superblock. 2681da177e4SLinus Torvalds */ 2691da177e4SLinus Torvalds int 270ff55068cSDave Chinner xfs_readsb( 271ff55068cSDave Chinner struct xfs_mount *mp, 272ff55068cSDave Chinner int flags) 2731da177e4SLinus Torvalds { 2741da177e4SLinus Torvalds unsigned int sector_size; 27504a1e6c5SDave Chinner struct xfs_buf *bp; 27604a1e6c5SDave Chinner struct xfs_sb *sbp = &mp->m_sb; 2771da177e4SLinus Torvalds int error; 278af34e09dSDave Chinner int loud = !(flags & XFS_MFSI_QUIET); 279daba5427SEric Sandeen const struct xfs_buf_ops *buf_ops; 2801da177e4SLinus Torvalds 2811da177e4SLinus Torvalds ASSERT(mp->m_sb_bp == NULL); 2821da177e4SLinus Torvalds ASSERT(mp->m_ddev_targp != NULL); 2831da177e4SLinus Torvalds 2841da177e4SLinus Torvalds /* 285daba5427SEric Sandeen * For the initial read, we must guess at the sector 286daba5427SEric Sandeen * size based on the block device. It's enough to 287daba5427SEric Sandeen * get the sb_sectsize out of the superblock and 288daba5427SEric Sandeen * then reread with the proper length. 289daba5427SEric Sandeen * We don't verify it yet, because it may not be complete. 290daba5427SEric Sandeen */ 291daba5427SEric Sandeen sector_size = xfs_getsize_buftarg(mp->m_ddev_targp); 292daba5427SEric Sandeen buf_ops = NULL; 293daba5427SEric Sandeen 294daba5427SEric Sandeen /* 295c891c30aSBrian Foster * Allocate a (locked) buffer to hold the superblock. This will be kept 296c891c30aSBrian Foster * around at all times to optimize access to the superblock. Therefore, 297c891c30aSBrian Foster * set XBF_NO_IOACCT to make sure it doesn't hold the buftarg count 298c891c30aSBrian Foster * elevated. 2991da177e4SLinus Torvalds */ 30026af6552SDave Chinner reread: 301ba372674SDave Chinner error = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR, 302c891c30aSBrian Foster BTOBB(sector_size), XBF_NO_IOACCT, &bp, 303c891c30aSBrian Foster buf_ops); 304ba372674SDave Chinner if (error) { 305eab4e633SDave Chinner if (loud) 306e721f504SDave Chinner xfs_warn(mp, "SB validate failed with error %d.", error); 307ac75a1f7SDave Chinner /* bad CRC means corrupted metadata */ 3082451337dSDave Chinner if (error == -EFSBADCRC) 3092451337dSDave Chinner error = -EFSCORRUPTED; 310ba372674SDave Chinner return error; 311eab4e633SDave Chinner } 3121da177e4SLinus Torvalds 3131da177e4SLinus Torvalds /* 3141da177e4SLinus Torvalds * Initialize the mount structure from the superblock. 3151da177e4SLinus Torvalds */ 316556b8883SDave Chinner xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp)); 317556b8883SDave Chinner 318556b8883SDave Chinner /* 319556b8883SDave Chinner * If we haven't validated the superblock, do so now before we try 320556b8883SDave Chinner * to check the sector size and reread the superblock appropriately. 321556b8883SDave Chinner */ 322556b8883SDave Chinner if (sbp->sb_magicnum != XFS_SB_MAGIC) { 323556b8883SDave Chinner if (loud) 324556b8883SDave Chinner xfs_warn(mp, "Invalid superblock magic number"); 3252451337dSDave Chinner error = -EINVAL; 326556b8883SDave Chinner goto release_buf; 327556b8883SDave Chinner } 328ff55068cSDave Chinner 3291da177e4SLinus Torvalds /* 3301da177e4SLinus Torvalds * We must be able to do sector-sized and sector-aligned IO. 3311da177e4SLinus Torvalds */ 33204a1e6c5SDave Chinner if (sector_size > sbp->sb_sectsize) { 333af34e09dSDave Chinner if (loud) 334af34e09dSDave Chinner xfs_warn(mp, "device supports %u byte sectors (not %u)", 33504a1e6c5SDave Chinner sector_size, sbp->sb_sectsize); 3362451337dSDave Chinner error = -ENOSYS; 33726af6552SDave Chinner goto release_buf; 3381da177e4SLinus Torvalds } 3391da177e4SLinus Torvalds 340556b8883SDave Chinner if (buf_ops == NULL) { 3411da177e4SLinus Torvalds /* 342daba5427SEric Sandeen * Re-read the superblock so the buffer is correctly sized, 343daba5427SEric Sandeen * and properly verified. 3441da177e4SLinus Torvalds */ 3451da177e4SLinus Torvalds xfs_buf_relse(bp); 34604a1e6c5SDave Chinner sector_size = sbp->sb_sectsize; 347daba5427SEric Sandeen buf_ops = loud ? &xfs_sb_buf_ops : &xfs_sb_quiet_buf_ops; 34826af6552SDave Chinner goto reread; 3491da177e4SLinus Torvalds } 3501da177e4SLinus Torvalds 3515681ca40SDave Chinner xfs_reinit_percpu_counters(mp); 3528d280b98SDavid Chinner 35304a1e6c5SDave Chinner /* no need to be quiet anymore, so reset the buf ops */ 35404a1e6c5SDave Chinner bp->b_ops = &xfs_sb_buf_ops; 35504a1e6c5SDave Chinner 3561da177e4SLinus Torvalds mp->m_sb_bp = bp; 35726af6552SDave Chinner xfs_buf_unlock(bp); 3581da177e4SLinus Torvalds return 0; 3591da177e4SLinus Torvalds 36026af6552SDave Chinner release_buf: 3611da177e4SLinus Torvalds xfs_buf_relse(bp); 3621da177e4SLinus Torvalds return error; 3631da177e4SLinus Torvalds } 3641da177e4SLinus Torvalds 3651da177e4SLinus Torvalds /* 3660771fb45SEric Sandeen * Update alignment values based on mount options and sb values 3671da177e4SLinus Torvalds */ 3680771fb45SEric Sandeen STATIC int 3697884bc86SChristoph Hellwig xfs_update_alignment(xfs_mount_t *mp) 3701da177e4SLinus Torvalds { 3711da177e4SLinus Torvalds xfs_sb_t *sbp = &(mp->m_sb); 3721da177e4SLinus Torvalds 3734249023aSChristoph Hellwig if (mp->m_dalign) { 3741da177e4SLinus Torvalds /* 3751da177e4SLinus Torvalds * If stripe unit and stripe width are not multiples 3761da177e4SLinus Torvalds * of the fs blocksize turn off alignment. 3771da177e4SLinus Torvalds */ 3781da177e4SLinus Torvalds if ((BBTOB(mp->m_dalign) & mp->m_blockmask) || 3791da177e4SLinus Torvalds (BBTOB(mp->m_swidth) & mp->m_blockmask)) { 38039a45d84SJie Liu xfs_warn(mp, 38139a45d84SJie Liu "alignment check failed: sunit/swidth vs. blocksize(%d)", 38239a45d84SJie Liu sbp->sb_blocksize); 3832451337dSDave Chinner return -EINVAL; 3841da177e4SLinus Torvalds } else { 3851da177e4SLinus Torvalds /* 3861da177e4SLinus Torvalds * Convert the stripe unit and width to FSBs. 3871da177e4SLinus Torvalds */ 3881da177e4SLinus Torvalds mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign); 3891da177e4SLinus Torvalds if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) { 39053487786SDave Chinner xfs_warn(mp, 39139a45d84SJie Liu "alignment check failed: sunit/swidth vs. agsize(%d)", 3921da177e4SLinus Torvalds sbp->sb_agblocks); 3932451337dSDave Chinner return -EINVAL; 3941da177e4SLinus Torvalds } else if (mp->m_dalign) { 3951da177e4SLinus Torvalds mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth); 3961da177e4SLinus Torvalds } else { 39739a45d84SJie Liu xfs_warn(mp, 39839a45d84SJie Liu "alignment check failed: sunit(%d) less than bsize(%d)", 39939a45d84SJie Liu mp->m_dalign, sbp->sb_blocksize); 4002451337dSDave Chinner return -EINVAL; 4011da177e4SLinus Torvalds } 4021da177e4SLinus Torvalds } 4031da177e4SLinus Torvalds 4041da177e4SLinus Torvalds /* 4051da177e4SLinus Torvalds * Update superblock with new values 4061da177e4SLinus Torvalds * and log changes 4071da177e4SLinus Torvalds */ 40862118709SEric Sandeen if (xfs_sb_version_hasdalign(sbp)) { 4091da177e4SLinus Torvalds if (sbp->sb_unit != mp->m_dalign) { 4101da177e4SLinus Torvalds sbp->sb_unit = mp->m_dalign; 41161e63ecbSDave Chinner mp->m_update_sb = true; 4121da177e4SLinus Torvalds } 4131da177e4SLinus Torvalds if (sbp->sb_width != mp->m_swidth) { 4141da177e4SLinus Torvalds sbp->sb_width = mp->m_swidth; 41561e63ecbSDave Chinner mp->m_update_sb = true; 4161da177e4SLinus Torvalds } 41734d7f603SJie Liu } else { 41834d7f603SJie Liu xfs_warn(mp, 41934d7f603SJie Liu "cannot change alignment: superblock does not support data alignment"); 4202451337dSDave Chinner return -EINVAL; 4211da177e4SLinus Torvalds } 4221da177e4SLinus Torvalds } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN && 42362118709SEric Sandeen xfs_sb_version_hasdalign(&mp->m_sb)) { 4241da177e4SLinus Torvalds mp->m_dalign = sbp->sb_unit; 4251da177e4SLinus Torvalds mp->m_swidth = sbp->sb_width; 4261da177e4SLinus Torvalds } 4271da177e4SLinus Torvalds 4280771fb45SEric Sandeen return 0; 4290771fb45SEric Sandeen } 4301da177e4SLinus Torvalds 4310771fb45SEric Sandeen /* 4320771fb45SEric Sandeen * Set the maximum inode count for this filesystem 4330771fb45SEric Sandeen */ 4340771fb45SEric Sandeen STATIC void 4350771fb45SEric Sandeen xfs_set_maxicount(xfs_mount_t *mp) 4360771fb45SEric Sandeen { 4370771fb45SEric Sandeen xfs_sb_t *sbp = &(mp->m_sb); 438c8ce540dSDarrick J. Wong uint64_t icount; 4391da177e4SLinus Torvalds 4400771fb45SEric Sandeen if (sbp->sb_imax_pct) { 4410771fb45SEric Sandeen /* 4420771fb45SEric Sandeen * Make sure the maximum inode count is a multiple 4430771fb45SEric Sandeen * of the units we allocate inodes in. 4441da177e4SLinus Torvalds */ 4451da177e4SLinus Torvalds icount = sbp->sb_dblocks * sbp->sb_imax_pct; 4461da177e4SLinus Torvalds do_div(icount, 100); 4471da177e4SLinus Torvalds do_div(icount, mp->m_ialloc_blks); 4481da177e4SLinus Torvalds mp->m_maxicount = (icount * mp->m_ialloc_blks) << 4491da177e4SLinus Torvalds sbp->sb_inopblog; 4500771fb45SEric Sandeen } else { 4511da177e4SLinus Torvalds mp->m_maxicount = 0; 4521da177e4SLinus Torvalds } 4531da177e4SLinus Torvalds } 4541da177e4SLinus Torvalds 4551da177e4SLinus Torvalds /* 4561da177e4SLinus Torvalds * Set the default minimum read and write sizes unless 4571da177e4SLinus Torvalds * already specified in a mount option. 4581da177e4SLinus Torvalds * We use smaller I/O sizes when the file system 4591da177e4SLinus Torvalds * is being used for NFS service (wsync mount option). 4601da177e4SLinus Torvalds */ 4610771fb45SEric Sandeen STATIC void 4620771fb45SEric Sandeen xfs_set_rw_sizes(xfs_mount_t *mp) 4630771fb45SEric Sandeen { 4640771fb45SEric Sandeen xfs_sb_t *sbp = &(mp->m_sb); 4650771fb45SEric Sandeen int readio_log, writeio_log; 4660771fb45SEric Sandeen 4671da177e4SLinus Torvalds if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) { 4681da177e4SLinus Torvalds if (mp->m_flags & XFS_MOUNT_WSYNC) { 4691da177e4SLinus Torvalds readio_log = XFS_WSYNC_READIO_LOG; 4701da177e4SLinus Torvalds writeio_log = XFS_WSYNC_WRITEIO_LOG; 4711da177e4SLinus Torvalds } else { 4721da177e4SLinus Torvalds readio_log = XFS_READIO_LOG_LARGE; 4731da177e4SLinus Torvalds writeio_log = XFS_WRITEIO_LOG_LARGE; 4741da177e4SLinus Torvalds } 4751da177e4SLinus Torvalds } else { 4761da177e4SLinus Torvalds readio_log = mp->m_readio_log; 4771da177e4SLinus Torvalds writeio_log = mp->m_writeio_log; 4781da177e4SLinus Torvalds } 4791da177e4SLinus Torvalds 4801da177e4SLinus Torvalds if (sbp->sb_blocklog > readio_log) { 4811da177e4SLinus Torvalds mp->m_readio_log = sbp->sb_blocklog; 4821da177e4SLinus Torvalds } else { 4831da177e4SLinus Torvalds mp->m_readio_log = readio_log; 4841da177e4SLinus Torvalds } 4851da177e4SLinus Torvalds mp->m_readio_blocks = 1 << (mp->m_readio_log - sbp->sb_blocklog); 4861da177e4SLinus Torvalds if (sbp->sb_blocklog > writeio_log) { 4871da177e4SLinus Torvalds mp->m_writeio_log = sbp->sb_blocklog; 4881da177e4SLinus Torvalds } else { 4891da177e4SLinus Torvalds mp->m_writeio_log = writeio_log; 4901da177e4SLinus Torvalds } 4911da177e4SLinus Torvalds mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog); 4920771fb45SEric Sandeen } 493425f9dddSEric Sandeen 4941da177e4SLinus Torvalds /* 495055388a3SDave Chinner * precalculate the low space thresholds for dynamic speculative preallocation. 496055388a3SDave Chinner */ 497055388a3SDave Chinner void 498055388a3SDave Chinner xfs_set_low_space_thresholds( 499055388a3SDave Chinner struct xfs_mount *mp) 500055388a3SDave Chinner { 501055388a3SDave Chinner int i; 502055388a3SDave Chinner 503055388a3SDave Chinner for (i = 0; i < XFS_LOWSP_MAX; i++) { 504c8ce540dSDarrick J. Wong uint64_t space = mp->m_sb.sb_dblocks; 505055388a3SDave Chinner 506055388a3SDave Chinner do_div(space, 100); 507055388a3SDave Chinner mp->m_low_space[i] = space * (i + 1); 508055388a3SDave Chinner } 509055388a3SDave Chinner } 510055388a3SDave Chinner 511055388a3SDave Chinner 512055388a3SDave Chinner /* 5131da177e4SLinus Torvalds * Set whether we're using inode alignment. 5141da177e4SLinus Torvalds */ 5150771fb45SEric Sandeen STATIC void 5160771fb45SEric Sandeen xfs_set_inoalignment(xfs_mount_t *mp) 5170771fb45SEric Sandeen { 51862118709SEric Sandeen if (xfs_sb_version_hasalign(&mp->m_sb) && 519d5825712SChandan Rajendra mp->m_sb.sb_inoalignmt >= xfs_icluster_size_fsb(mp)) 5201da177e4SLinus Torvalds mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1; 5211da177e4SLinus Torvalds else 5221da177e4SLinus Torvalds mp->m_inoalign_mask = 0; 5231da177e4SLinus Torvalds /* 5241da177e4SLinus Torvalds * If we are using stripe alignment, check whether 5251da177e4SLinus Torvalds * the stripe unit is a multiple of the inode alignment 5261da177e4SLinus Torvalds */ 5271da177e4SLinus Torvalds if (mp->m_dalign && mp->m_inoalign_mask && 5281da177e4SLinus Torvalds !(mp->m_dalign & mp->m_inoalign_mask)) 5291da177e4SLinus Torvalds mp->m_sinoalign = mp->m_dalign; 5301da177e4SLinus Torvalds else 5311da177e4SLinus Torvalds mp->m_sinoalign = 0; 5320771fb45SEric Sandeen } 5330771fb45SEric Sandeen 5341da177e4SLinus Torvalds /* 5350471f62eSZhi Yong Wu * Check that the data (and log if separate) is an ok size. 5361da177e4SLinus Torvalds */ 5370771fb45SEric Sandeen STATIC int 538ba372674SDave Chinner xfs_check_sizes( 539ba372674SDave Chinner struct xfs_mount *mp) 5400771fb45SEric Sandeen { 541ba372674SDave Chinner struct xfs_buf *bp; 5420771fb45SEric Sandeen xfs_daddr_t d; 543ba372674SDave Chinner int error; 5440771fb45SEric Sandeen 5451da177e4SLinus Torvalds d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); 5461da177e4SLinus Torvalds if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) { 5470b932cccSDave Chinner xfs_warn(mp, "filesystem size mismatch detected"); 5482451337dSDave Chinner return -EFBIG; 5491da177e4SLinus Torvalds } 550ba372674SDave Chinner error = xfs_buf_read_uncached(mp->m_ddev_targp, 5511da177e4SLinus Torvalds d - XFS_FSS_TO_BB(mp, 1), 552ba372674SDave Chinner XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL); 553ba372674SDave Chinner if (error) { 5540b932cccSDave Chinner xfs_warn(mp, "last sector read failed"); 555ba372674SDave Chinner return error; 5561da177e4SLinus Torvalds } 5571922c949SDave Chinner xfs_buf_relse(bp); 5581da177e4SLinus Torvalds 559ba372674SDave Chinner if (mp->m_logdev_targp == mp->m_ddev_targp) 560ba372674SDave Chinner return 0; 561ba372674SDave Chinner 5621da177e4SLinus Torvalds d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); 5631da177e4SLinus Torvalds if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { 5640b932cccSDave Chinner xfs_warn(mp, "log size mismatch detected"); 5652451337dSDave Chinner return -EFBIG; 5661da177e4SLinus Torvalds } 567ba372674SDave Chinner error = xfs_buf_read_uncached(mp->m_logdev_targp, 5681da177e4SLinus Torvalds d - XFS_FSB_TO_BB(mp, 1), 569ba372674SDave Chinner XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL); 570ba372674SDave Chinner if (error) { 5710b932cccSDave Chinner xfs_warn(mp, "log device read failed"); 572ba372674SDave Chinner return error; 5731da177e4SLinus Torvalds } 5741922c949SDave Chinner xfs_buf_relse(bp); 5750771fb45SEric Sandeen return 0; 5760771fb45SEric Sandeen } 5770771fb45SEric Sandeen 5780771fb45SEric Sandeen /* 5797d095257SChristoph Hellwig * Clear the quotaflags in memory and in the superblock. 5807d095257SChristoph Hellwig */ 5817d095257SChristoph Hellwig int 5827d095257SChristoph Hellwig xfs_mount_reset_sbqflags( 5837d095257SChristoph Hellwig struct xfs_mount *mp) 5847d095257SChristoph Hellwig { 5857d095257SChristoph Hellwig mp->m_qflags = 0; 5867d095257SChristoph Hellwig 58761e63ecbSDave Chinner /* It is OK to look at sb_qflags in the mount path without m_sb_lock. */ 5887d095257SChristoph Hellwig if (mp->m_sb.sb_qflags == 0) 5897d095257SChristoph Hellwig return 0; 5907d095257SChristoph Hellwig spin_lock(&mp->m_sb_lock); 5917d095257SChristoph Hellwig mp->m_sb.sb_qflags = 0; 5927d095257SChristoph Hellwig spin_unlock(&mp->m_sb_lock); 5937d095257SChristoph Hellwig 59461e63ecbSDave Chinner if (!xfs_fs_writable(mp, SB_FREEZE_WRITE)) 5957d095257SChristoph Hellwig return 0; 5967d095257SChristoph Hellwig 59761e63ecbSDave Chinner return xfs_sync_sb(mp, false); 5987d095257SChristoph Hellwig } 5997d095257SChristoph Hellwig 600c8ce540dSDarrick J. Wong uint64_t 601d5db0f97SEric Sandeen xfs_default_resblks(xfs_mount_t *mp) 602d5db0f97SEric Sandeen { 603c8ce540dSDarrick J. Wong uint64_t resblks; 604d5db0f97SEric Sandeen 605d5db0f97SEric Sandeen /* 6068babd8a2SDave Chinner * We default to 5% or 8192 fsbs of space reserved, whichever is 6078babd8a2SDave Chinner * smaller. This is intended to cover concurrent allocation 6088babd8a2SDave Chinner * transactions when we initially hit enospc. These each require a 4 6098babd8a2SDave Chinner * block reservation. Hence by default we cover roughly 2000 concurrent 6108babd8a2SDave Chinner * allocation reservations. 611d5db0f97SEric Sandeen */ 612d5db0f97SEric Sandeen resblks = mp->m_sb.sb_dblocks; 613d5db0f97SEric Sandeen do_div(resblks, 20); 614c8ce540dSDarrick J. Wong resblks = min_t(uint64_t, resblks, 8192); 615d5db0f97SEric Sandeen return resblks; 616d5db0f97SEric Sandeen } 617d5db0f97SEric Sandeen 6182e9e6481SDarrick J. Wong /* Ensure the summary counts are correct. */ 6192e9e6481SDarrick J. Wong STATIC int 6202e9e6481SDarrick J. Wong xfs_check_summary_counts( 6212e9e6481SDarrick J. Wong struct xfs_mount *mp) 6222e9e6481SDarrick J. Wong { 6232e9e6481SDarrick J. Wong /* 6242e9e6481SDarrick J. Wong * The AG0 superblock verifier rejects in-progress filesystems, 6252e9e6481SDarrick J. Wong * so we should never see the flag set this far into mounting. 6262e9e6481SDarrick J. Wong */ 6272e9e6481SDarrick J. Wong if (mp->m_sb.sb_inprogress) { 6282e9e6481SDarrick J. Wong xfs_err(mp, "sb_inprogress set after log recovery??"); 6292e9e6481SDarrick J. Wong WARN_ON(1); 6302e9e6481SDarrick J. Wong return -EFSCORRUPTED; 6312e9e6481SDarrick J. Wong } 6322e9e6481SDarrick J. Wong 6332e9e6481SDarrick J. Wong /* 6342e9e6481SDarrick J. Wong * Now the log is mounted, we know if it was an unclean shutdown or 6352e9e6481SDarrick J. Wong * not. If it was, with the first phase of recovery has completed, we 6362e9e6481SDarrick J. Wong * have consistent AG blocks on disk. We have not recovered EFIs yet, 6372e9e6481SDarrick J. Wong * but they are recovered transactionally in the second recovery phase 6382e9e6481SDarrick J. Wong * later. 6392e9e6481SDarrick J. Wong * 6402e9e6481SDarrick J. Wong * If the log was clean when we mounted, we can check the summary 6412e9e6481SDarrick J. Wong * counters. If any of them are obviously incorrect, we can recompute 6422e9e6481SDarrick J. Wong * them from the AGF headers in the next step. 6432e9e6481SDarrick J. Wong */ 6442e9e6481SDarrick J. Wong if (XFS_LAST_UNMOUNT_WAS_CLEAN(mp) && 6452e9e6481SDarrick J. Wong (mp->m_sb.sb_fdblocks > mp->m_sb.sb_dblocks || 64600d22a1cSDarrick J. Wong !xfs_verify_icount(mp, mp->m_sb.sb_icount) || 6472e9e6481SDarrick J. Wong mp->m_sb.sb_ifree > mp->m_sb.sb_icount)) 6482e9e6481SDarrick J. Wong mp->m_flags |= XFS_MOUNT_BAD_SUMMARY; 6492e9e6481SDarrick J. Wong 6502e9e6481SDarrick J. Wong /* 6512e9e6481SDarrick J. Wong * We can safely re-initialise incore superblock counters from the 6522e9e6481SDarrick J. Wong * per-ag data. These may not be correct if the filesystem was not 6532e9e6481SDarrick J. Wong * cleanly unmounted, so we waited for recovery to finish before doing 6542e9e6481SDarrick J. Wong * this. 6552e9e6481SDarrick J. Wong * 6562e9e6481SDarrick J. Wong * If the filesystem was cleanly unmounted or the previous check did 6572e9e6481SDarrick J. Wong * not flag anything weird, then we can trust the values in the 6582e9e6481SDarrick J. Wong * superblock to be correct and we don't need to do anything here. 6592e9e6481SDarrick J. Wong * Otherwise, recalculate the summary counters. 6602e9e6481SDarrick J. Wong */ 6612e9e6481SDarrick J. Wong if ((!xfs_sb_version_haslazysbcount(&mp->m_sb) || 6622e9e6481SDarrick J. Wong XFS_LAST_UNMOUNT_WAS_CLEAN(mp)) && 6632e9e6481SDarrick J. Wong !(mp->m_flags & XFS_MOUNT_BAD_SUMMARY)) 6642e9e6481SDarrick J. Wong return 0; 6652e9e6481SDarrick J. Wong 6662e9e6481SDarrick J. Wong return xfs_initialize_perag_data(mp, mp->m_sb.sb_agcount); 6672e9e6481SDarrick J. Wong } 6682e9e6481SDarrick J. Wong 6697d095257SChristoph Hellwig /* 6700771fb45SEric Sandeen * This function does the following on an initial mount of a file system: 6710771fb45SEric Sandeen * - reads the superblock from disk and init the mount struct 6720771fb45SEric Sandeen * - if we're a 32-bit kernel, do a size check on the superblock 6730771fb45SEric Sandeen * so we don't mount terabyte filesystems 6740771fb45SEric Sandeen * - init mount struct realtime fields 6750771fb45SEric Sandeen * - allocate inode hash table for fs 6760771fb45SEric Sandeen * - init directory manager 6770771fb45SEric Sandeen * - perform recovery and init the log manager 6780771fb45SEric Sandeen */ 6790771fb45SEric Sandeen int 6800771fb45SEric Sandeen xfs_mountfs( 681f0b2efadSBrian Foster struct xfs_mount *mp) 6820771fb45SEric Sandeen { 683f0b2efadSBrian Foster struct xfs_sb *sbp = &(mp->m_sb); 684f0b2efadSBrian Foster struct xfs_inode *rip; 685c8ce540dSDarrick J. Wong uint64_t resblks; 6867d095257SChristoph Hellwig uint quotamount = 0; 6877d095257SChristoph Hellwig uint quotaflags = 0; 6880771fb45SEric Sandeen int error = 0; 6890771fb45SEric Sandeen 690ff55068cSDave Chinner xfs_sb_mount_common(mp, sbp); 6910771fb45SEric Sandeen 6920771fb45SEric Sandeen /* 693074e427bSDave Chinner * Check for a mismatched features2 values. Older kernels read & wrote 694074e427bSDave Chinner * into the wrong sb offset for sb_features2 on some platforms due to 695074e427bSDave Chinner * xfs_sb_t not being 64bit size aligned when sb_features2 was added, 696074e427bSDave Chinner * which made older superblock reading/writing routines swap it as a 697074e427bSDave Chinner * 64-bit value. 698ee1c0908SDavid Chinner * 699e6957ea4SEric Sandeen * For backwards compatibility, we make both slots equal. 700e6957ea4SEric Sandeen * 701074e427bSDave Chinner * If we detect a mismatched field, we OR the set bits into the existing 702074e427bSDave Chinner * features2 field in case it has already been modified; we don't want 703074e427bSDave Chinner * to lose any features. We then update the bad location with the ORed 704074e427bSDave Chinner * value so that older kernels will see any features2 flags. The 705074e427bSDave Chinner * superblock writeback code ensures the new sb_features2 is copied to 706074e427bSDave Chinner * sb_bad_features2 before it is logged or written to disk. 707ee1c0908SDavid Chinner */ 708e6957ea4SEric Sandeen if (xfs_sb_has_mismatched_features2(sbp)) { 7090b932cccSDave Chinner xfs_warn(mp, "correcting sb_features alignment problem"); 710ee1c0908SDavid Chinner sbp->sb_features2 |= sbp->sb_bad_features2; 71161e63ecbSDave Chinner mp->m_update_sb = true; 712e6957ea4SEric Sandeen 713e6957ea4SEric Sandeen /* 714e6957ea4SEric Sandeen * Re-check for ATTR2 in case it was found in bad_features2 715e6957ea4SEric Sandeen * slot. 716e6957ea4SEric Sandeen */ 7177c12f296STim Shimmin if (xfs_sb_version_hasattr2(&mp->m_sb) && 7187c12f296STim Shimmin !(mp->m_flags & XFS_MOUNT_NOATTR2)) 719e6957ea4SEric Sandeen mp->m_flags |= XFS_MOUNT_ATTR2; 7207c12f296STim Shimmin } 721e6957ea4SEric Sandeen 7227c12f296STim Shimmin if (xfs_sb_version_hasattr2(&mp->m_sb) && 7237c12f296STim Shimmin (mp->m_flags & XFS_MOUNT_NOATTR2)) { 7247c12f296STim Shimmin xfs_sb_version_removeattr2(&mp->m_sb); 72561e63ecbSDave Chinner mp->m_update_sb = true; 7267c12f296STim Shimmin 7277c12f296STim Shimmin /* update sb_versionnum for the clearing of the morebits */ 7287c12f296STim Shimmin if (!sbp->sb_features2) 72961e63ecbSDave Chinner mp->m_update_sb = true; 730ee1c0908SDavid Chinner } 731ee1c0908SDavid Chinner 732263997a6SDave Chinner /* always use v2 inodes by default now */ 733263997a6SDave Chinner if (!(mp->m_sb.sb_versionnum & XFS_SB_VERSION_NLINKBIT)) { 734263997a6SDave Chinner mp->m_sb.sb_versionnum |= XFS_SB_VERSION_NLINKBIT; 73561e63ecbSDave Chinner mp->m_update_sb = true; 736263997a6SDave Chinner } 737263997a6SDave Chinner 738ee1c0908SDavid Chinner /* 7390771fb45SEric Sandeen * Check if sb_agblocks is aligned at stripe boundary 7400771fb45SEric Sandeen * If sb_agblocks is NOT aligned turn off m_dalign since 7410771fb45SEric Sandeen * allocator alignment is within an ag, therefore ag has 7420771fb45SEric Sandeen * to be aligned at stripe boundary. 7430771fb45SEric Sandeen */ 7447884bc86SChristoph Hellwig error = xfs_update_alignment(mp); 7450771fb45SEric Sandeen if (error) 746f9057e3dSChristoph Hellwig goto out; 7470771fb45SEric Sandeen 7480771fb45SEric Sandeen xfs_alloc_compute_maxlevels(mp); 7490771fb45SEric Sandeen xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK); 7500771fb45SEric Sandeen xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK); 7510771fb45SEric Sandeen xfs_ialloc_compute_maxlevels(mp); 752035e00acSDarrick J. Wong xfs_rmapbt_compute_maxlevels(mp); 7531946b91cSDarrick J. Wong xfs_refcountbt_compute_maxlevels(mp); 7540771fb45SEric Sandeen 7550771fb45SEric Sandeen xfs_set_maxicount(mp); 7560771fb45SEric Sandeen 757e6b3bb78SCarlos Maiolino /* enable fail_at_unmount as default */ 758749f24f3SThomas Meyer mp->m_fail_unmount = true; 759e6b3bb78SCarlos Maiolino 760a31b1d3dSBrian Foster error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname); 76127174203SChristoph Hellwig if (error) 762f9057e3dSChristoph Hellwig goto out; 7631da177e4SLinus Torvalds 764225e4635SBill O'Donnell error = xfs_sysfs_init(&mp->m_stats.xs_kobj, &xfs_stats_ktype, 765225e4635SBill O'Donnell &mp->m_kobj, "stats"); 766a31b1d3dSBrian Foster if (error) 767a31b1d3dSBrian Foster goto out_remove_sysfs; 768a31b1d3dSBrian Foster 769192852beSCarlos Maiolino error = xfs_error_sysfs_init(mp); 770225e4635SBill O'Donnell if (error) 771225e4635SBill O'Donnell goto out_del_stats; 772225e4635SBill O'Donnell 77331965ef3SDarrick J. Wong error = xfs_errortag_init(mp); 77431965ef3SDarrick J. Wong if (error) 77531965ef3SDarrick J. Wong goto out_remove_error_sysfs; 776192852beSCarlos Maiolino 777192852beSCarlos Maiolino error = xfs_uuid_mount(mp); 778192852beSCarlos Maiolino if (error) 77931965ef3SDarrick J. Wong goto out_remove_errortag; 780192852beSCarlos Maiolino 7811da177e4SLinus Torvalds /* 7820771fb45SEric Sandeen * Set the minimum read and write sizes 7830771fb45SEric Sandeen */ 7840771fb45SEric Sandeen xfs_set_rw_sizes(mp); 7850771fb45SEric Sandeen 786055388a3SDave Chinner /* set the low space thresholds for dynamic preallocation */ 787055388a3SDave Chinner xfs_set_low_space_thresholds(mp); 788055388a3SDave Chinner 7890771fb45SEric Sandeen /* 7900771fb45SEric Sandeen * Set the inode cluster size. 7910771fb45SEric Sandeen * This may still be overridden by the file system 7920771fb45SEric Sandeen * block size if it is larger than the chosen cluster size. 7938f80587bSDave Chinner * 7948f80587bSDave Chinner * For v5 filesystems, scale the cluster size with the inode size to 7958f80587bSDave Chinner * keep a constant ratio of inode per cluster buffer, but only if mkfs 7968f80587bSDave Chinner * has set the inode alignment value appropriately for larger cluster 7978f80587bSDave Chinner * sizes. 7980771fb45SEric Sandeen */ 7990771fb45SEric Sandeen mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE; 8008f80587bSDave Chinner if (xfs_sb_version_hascrc(&mp->m_sb)) { 8018f80587bSDave Chinner int new_size = mp->m_inode_cluster_size; 8028f80587bSDave Chinner 8038f80587bSDave Chinner new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE; 8048f80587bSDave Chinner if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size)) 8058f80587bSDave Chinner mp->m_inode_cluster_size = new_size; 8068f80587bSDave Chinner } 80783dcdb44SDarrick J. Wong mp->m_blocks_per_cluster = xfs_icluster_size_fsb(mp); 80883dcdb44SDarrick J. Wong mp->m_inodes_per_cluster = XFS_FSB_TO_INO(mp, mp->m_blocks_per_cluster); 809c1b4a321SDarrick J. Wong mp->m_cluster_align = xfs_ialloc_cluster_alignment(mp); 810c1b4a321SDarrick J. Wong mp->m_cluster_align_inodes = XFS_FSB_TO_INO(mp, mp->m_cluster_align); 8110771fb45SEric Sandeen 8120771fb45SEric Sandeen /* 813e5376fc1SBrian Foster * If enabled, sparse inode chunk alignment is expected to match the 814e5376fc1SBrian Foster * cluster size. Full inode chunk alignment must match the chunk size, 815e5376fc1SBrian Foster * but that is checked on sb read verification... 816e5376fc1SBrian Foster */ 817e5376fc1SBrian Foster if (xfs_sb_version_hassparseinodes(&mp->m_sb) && 818e5376fc1SBrian Foster mp->m_sb.sb_spino_align != 819e5376fc1SBrian Foster XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size)) { 820e5376fc1SBrian Foster xfs_warn(mp, 821e5376fc1SBrian Foster "Sparse inode block alignment (%u) must match cluster size (%llu).", 822e5376fc1SBrian Foster mp->m_sb.sb_spino_align, 823e5376fc1SBrian Foster XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size)); 824e5376fc1SBrian Foster error = -EINVAL; 825e5376fc1SBrian Foster goto out_remove_uuid; 826e5376fc1SBrian Foster } 827e5376fc1SBrian Foster 828e5376fc1SBrian Foster /* 8290771fb45SEric Sandeen * Set inode alignment fields 8300771fb45SEric Sandeen */ 8310771fb45SEric Sandeen xfs_set_inoalignment(mp); 8320771fb45SEric Sandeen 8330771fb45SEric Sandeen /* 834c2bfbc9bSZhi Yong Wu * Check that the data (and log if separate) is an ok size. 8350771fb45SEric Sandeen */ 8364249023aSChristoph Hellwig error = xfs_check_sizes(mp); 8370771fb45SEric Sandeen if (error) 838f9057e3dSChristoph Hellwig goto out_remove_uuid; 8390771fb45SEric Sandeen 8400771fb45SEric Sandeen /* 8411da177e4SLinus Torvalds * Initialize realtime fields in the mount structure 8421da177e4SLinus Torvalds */ 8430771fb45SEric Sandeen error = xfs_rtmount_init(mp); 8440771fb45SEric Sandeen if (error) { 8450b932cccSDave Chinner xfs_warn(mp, "RT mount failed"); 846f9057e3dSChristoph Hellwig goto out_remove_uuid; 8471da177e4SLinus Torvalds } 8481da177e4SLinus Torvalds 8491da177e4SLinus Torvalds /* 8501da177e4SLinus Torvalds * Copies the low order bits of the timestamp and the randomly 8511da177e4SLinus Torvalds * set "sequence" number out of a UUID. 8521da177e4SLinus Torvalds */ 853cb0ba6ccSChristoph Hellwig mp->m_fixedfsid[0] = 854cb0ba6ccSChristoph Hellwig (get_unaligned_be16(&sbp->sb_uuid.b[8]) << 16) | 855cb0ba6ccSChristoph Hellwig get_unaligned_be16(&sbp->sb_uuid.b[4]); 856cb0ba6ccSChristoph Hellwig mp->m_fixedfsid[1] = get_unaligned_be32(&sbp->sb_uuid.b[0]); 8571da177e4SLinus Torvalds 8580650b554SDave Chinner error = xfs_da_mount(mp); 8590650b554SDave Chinner if (error) { 8600650b554SDave Chinner xfs_warn(mp, "Failed dir/attr init: %d", error); 8610650b554SDave Chinner goto out_remove_uuid; 8620650b554SDave Chinner } 8631da177e4SLinus Torvalds 8641da177e4SLinus Torvalds /* 8651da177e4SLinus Torvalds * Initialize the precomputed transaction reservations values. 8661da177e4SLinus Torvalds */ 8671da177e4SLinus Torvalds xfs_trans_init(mp); 8681da177e4SLinus Torvalds 8691da177e4SLinus Torvalds /* 8701da177e4SLinus Torvalds * Allocate and initialize the per-ag data. 8711da177e4SLinus Torvalds */ 8721c1c6ebcSDave Chinner error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi); 8731c1c6ebcSDave Chinner if (error) { 8740b932cccSDave Chinner xfs_warn(mp, "Failed per-ag init: %d", error); 8750650b554SDave Chinner goto out_free_dir; 8761c1c6ebcSDave Chinner } 8771da177e4SLinus Torvalds 878f9057e3dSChristoph Hellwig if (!sbp->sb_logblocks) { 8790b932cccSDave Chinner xfs_warn(mp, "no log defined"); 880f9057e3dSChristoph Hellwig XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW, mp); 8812451337dSDave Chinner error = -EFSCORRUPTED; 882f9057e3dSChristoph Hellwig goto out_free_perag; 883f9057e3dSChristoph Hellwig } 884f9057e3dSChristoph Hellwig 8851da177e4SLinus Torvalds /* 886f0b2efadSBrian Foster * Log's mount-time initialization. The first part of recovery can place 887f0b2efadSBrian Foster * some items on the AIL, to be handled when recovery is finished or 888f0b2efadSBrian Foster * cancelled. 8891da177e4SLinus Torvalds */ 8901da177e4SLinus Torvalds error = xfs_log_mount(mp, mp->m_logdev_targp, 8911da177e4SLinus Torvalds XFS_FSB_TO_DADDR(mp, sbp->sb_logstart), 8921da177e4SLinus Torvalds XFS_FSB_TO_BB(mp, sbp->sb_logblocks)); 8931da177e4SLinus Torvalds if (error) { 8940b932cccSDave Chinner xfs_warn(mp, "log mount failed"); 895d4f3512bSDave Chinner goto out_fail_wait; 8961da177e4SLinus Torvalds } 8971da177e4SLinus Torvalds 8982e9e6481SDarrick J. Wong /* Make sure the summary counts are ok. */ 8992e9e6481SDarrick J. Wong error = xfs_check_summary_counts(mp); 900f9057e3dSChristoph Hellwig if (error) 9016eee8972Skbuild test robot goto out_log_dealloc; 902f9057e3dSChristoph Hellwig 90392821e2bSDavid Chinner /* 9041da177e4SLinus Torvalds * Get and sanity-check the root inode. 9051da177e4SLinus Torvalds * Save the pointer to it in the mount structure. 9061da177e4SLinus Torvalds */ 907541b5accSDave Chinner error = xfs_iget(mp, NULL, sbp->sb_rootino, XFS_IGET_UNTRUSTED, 908541b5accSDave Chinner XFS_ILOCK_EXCL, &rip); 9091da177e4SLinus Torvalds if (error) { 910541b5accSDave Chinner xfs_warn(mp, 911541b5accSDave Chinner "Failed to read root inode 0x%llx, error %d", 912541b5accSDave Chinner sbp->sb_rootino, -error); 913f9057e3dSChristoph Hellwig goto out_log_dealloc; 9141da177e4SLinus Torvalds } 9151da177e4SLinus Torvalds 9161da177e4SLinus Torvalds ASSERT(rip != NULL); 9171da177e4SLinus Torvalds 918c19b3b05SDave Chinner if (unlikely(!S_ISDIR(VFS_I(rip)->i_mode))) { 9190b932cccSDave Chinner xfs_warn(mp, "corrupted root inode %llu: not a directory", 920b6574520SNathan Scott (unsigned long long)rip->i_ino); 9211da177e4SLinus Torvalds xfs_iunlock(rip, XFS_ILOCK_EXCL); 9221da177e4SLinus Torvalds XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW, 9231da177e4SLinus Torvalds mp); 9242451337dSDave Chinner error = -EFSCORRUPTED; 925f9057e3dSChristoph Hellwig goto out_rele_rip; 9261da177e4SLinus Torvalds } 9271da177e4SLinus Torvalds mp->m_rootip = rip; /* save it */ 9281da177e4SLinus Torvalds 9291da177e4SLinus Torvalds xfs_iunlock(rip, XFS_ILOCK_EXCL); 9301da177e4SLinus Torvalds 9311da177e4SLinus Torvalds /* 9321da177e4SLinus Torvalds * Initialize realtime inode pointers in the mount structure 9331da177e4SLinus Torvalds */ 9340771fb45SEric Sandeen error = xfs_rtmount_inodes(mp); 9350771fb45SEric Sandeen if (error) { 9361da177e4SLinus Torvalds /* 9371da177e4SLinus Torvalds * Free up the root inode. 9381da177e4SLinus Torvalds */ 9390b932cccSDave Chinner xfs_warn(mp, "failed to read RT inodes"); 940f9057e3dSChristoph Hellwig goto out_rele_rip; 9411da177e4SLinus Torvalds } 9421da177e4SLinus Torvalds 9431da177e4SLinus Torvalds /* 9447884bc86SChristoph Hellwig * If this is a read-only mount defer the superblock updates until 9457884bc86SChristoph Hellwig * the next remount into writeable mode. Otherwise we would never 9467884bc86SChristoph Hellwig * perform the update e.g. for the root filesystem. 9471da177e4SLinus Torvalds */ 94861e63ecbSDave Chinner if (mp->m_update_sb && !(mp->m_flags & XFS_MOUNT_RDONLY)) { 94961e63ecbSDave Chinner error = xfs_sync_sb(mp, false); 950e5720eecSDavid Chinner if (error) { 9510b932cccSDave Chinner xfs_warn(mp, "failed to write sb changes"); 952b93b6e43SChristoph Hellwig goto out_rtunmount; 953e5720eecSDavid Chinner } 954e5720eecSDavid Chinner } 9551da177e4SLinus Torvalds 9561da177e4SLinus Torvalds /* 9571da177e4SLinus Torvalds * Initialise the XFS quota management subsystem for this mount 9581da177e4SLinus Torvalds */ 9597d095257SChristoph Hellwig if (XFS_IS_QUOTA_RUNNING(mp)) { 9607d095257SChristoph Hellwig error = xfs_qm_newmount(mp, "amount, "aflags); 9610771fb45SEric Sandeen if (error) 962b93b6e43SChristoph Hellwig goto out_rtunmount; 9637d095257SChristoph Hellwig } else { 9647d095257SChristoph Hellwig ASSERT(!XFS_IS_QUOTA_ON(mp)); 9657d095257SChristoph Hellwig 9667d095257SChristoph Hellwig /* 9677d095257SChristoph Hellwig * If a file system had quotas running earlier, but decided to 9687d095257SChristoph Hellwig * mount without -o uquota/pquota/gquota options, revoke the 9697d095257SChristoph Hellwig * quotachecked license. 9707d095257SChristoph Hellwig */ 9717d095257SChristoph Hellwig if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) { 9720b932cccSDave Chinner xfs_notice(mp, "resetting quota flags"); 9737d095257SChristoph Hellwig error = xfs_mount_reset_sbqflags(mp); 9747d095257SChristoph Hellwig if (error) 975a70a4fa5SBrian Foster goto out_rtunmount; 9767d095257SChristoph Hellwig } 9777d095257SChristoph Hellwig } 9781da177e4SLinus Torvalds 9791da177e4SLinus Torvalds /* 980f0b2efadSBrian Foster * Finish recovering the file system. This part needed to be delayed 981f0b2efadSBrian Foster * until after the root and real-time bitmap inodes were consistently 982f0b2efadSBrian Foster * read in. 9831da177e4SLinus Torvalds */ 9844249023aSChristoph Hellwig error = xfs_log_mount_finish(mp); 9851da177e4SLinus Torvalds if (error) { 9860b932cccSDave Chinner xfs_warn(mp, "log mount finish failed"); 987b93b6e43SChristoph Hellwig goto out_rtunmount; 9881da177e4SLinus Torvalds } 9891da177e4SLinus Torvalds 9901da177e4SLinus Torvalds /* 991ddeb14f4SDave Chinner * Now the log is fully replayed, we can transition to full read-only 992ddeb14f4SDave Chinner * mode for read-only mounts. This will sync all the metadata and clean 993ddeb14f4SDave Chinner * the log so that the recovery we just performed does not have to be 994ddeb14f4SDave Chinner * replayed again on the next mount. 995ddeb14f4SDave Chinner * 996ddeb14f4SDave Chinner * We use the same quiesce mechanism as the rw->ro remount, as they are 997ddeb14f4SDave Chinner * semantically identical operations. 998ddeb14f4SDave Chinner */ 999ddeb14f4SDave Chinner if ((mp->m_flags & (XFS_MOUNT_RDONLY|XFS_MOUNT_NORECOVERY)) == 1000ddeb14f4SDave Chinner XFS_MOUNT_RDONLY) { 1001ddeb14f4SDave Chinner xfs_quiesce_attr(mp); 1002ddeb14f4SDave Chinner } 1003ddeb14f4SDave Chinner 1004ddeb14f4SDave Chinner /* 10051da177e4SLinus Torvalds * Complete the quota initialisation, post-log-replay component. 10061da177e4SLinus Torvalds */ 10077d095257SChristoph Hellwig if (quotamount) { 10087d095257SChristoph Hellwig ASSERT(mp->m_qflags == 0); 10097d095257SChristoph Hellwig mp->m_qflags = quotaflags; 10107d095257SChristoph Hellwig 10117d095257SChristoph Hellwig xfs_qm_mount_quotas(mp); 10127d095257SChristoph Hellwig } 10137d095257SChristoph Hellwig 101484e1e99fSDavid Chinner /* 101584e1e99fSDavid Chinner * Now we are mounted, reserve a small amount of unused space for 101684e1e99fSDavid Chinner * privileged transactions. This is needed so that transaction 101784e1e99fSDavid Chinner * space required for critical operations can dip into this pool 101884e1e99fSDavid Chinner * when at ENOSPC. This is needed for operations like create with 101984e1e99fSDavid Chinner * attr, unwritten extent conversion at ENOSPC, etc. Data allocations 102084e1e99fSDavid Chinner * are not allowed to use this reserved space. 10218babd8a2SDave Chinner * 10228babd8a2SDave Chinner * This may drive us straight to ENOSPC on mount, but that implies 10238babd8a2SDave Chinner * we were already there on the last unmount. Warn if this occurs. 102484e1e99fSDavid Chinner */ 1025d5db0f97SEric Sandeen if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { 1026d5db0f97SEric Sandeen resblks = xfs_default_resblks(mp); 1027714082bcSDavid Chinner error = xfs_reserve_blocks(mp, &resblks, NULL); 1028714082bcSDavid Chinner if (error) 10290b932cccSDave Chinner xfs_warn(mp, 10300b932cccSDave Chinner "Unable to allocate reserve blocks. Continuing without reserve pool."); 1031174edb0eSDarrick J. Wong 1032174edb0eSDarrick J. Wong /* Recover any CoW blocks that never got remapped. */ 1033174edb0eSDarrick J. Wong error = xfs_reflink_recover_cow(mp); 1034174edb0eSDarrick J. Wong if (error) { 1035174edb0eSDarrick J. Wong xfs_err(mp, 1036174edb0eSDarrick J. Wong "Error %d recovering leftover CoW allocations.", error); 1037174edb0eSDarrick J. Wong xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1038174edb0eSDarrick J. Wong goto out_quota; 1039174edb0eSDarrick J. Wong } 104084d69619SDarrick J. Wong 104184d69619SDarrick J. Wong /* Reserve AG blocks for future btree expansion. */ 104284d69619SDarrick J. Wong error = xfs_fs_reserve_ag_blocks(mp); 104384d69619SDarrick J. Wong if (error && error != -ENOSPC) 104484d69619SDarrick J. Wong goto out_agresv; 1045d5db0f97SEric Sandeen } 104684e1e99fSDavid Chinner 10471da177e4SLinus Torvalds return 0; 10481da177e4SLinus Torvalds 104984d69619SDarrick J. Wong out_agresv: 105084d69619SDarrick J. Wong xfs_fs_unreserve_ag_blocks(mp); 1051174edb0eSDarrick J. Wong out_quota: 1052174edb0eSDarrick J. Wong xfs_qm_unmount_quotas(mp); 1053b93b6e43SChristoph Hellwig out_rtunmount: 1054b93b6e43SChristoph Hellwig xfs_rtunmount_inodes(mp); 1055f9057e3dSChristoph Hellwig out_rele_rip: 105644a8736bSDarrick J. Wong xfs_irele(rip); 105777aff8c7SDarrick J. Wong /* Clean out dquots that might be in memory after quotacheck. */ 105877aff8c7SDarrick J. Wong xfs_qm_unmount(mp); 10592d1d1da3SDarrick J. Wong /* 10602d1d1da3SDarrick J. Wong * Cancel all delayed reclaim work and reclaim the inodes directly. 10612d1d1da3SDarrick J. Wong * We have to do this /after/ rtunmount and qm_unmount because those 10622d1d1da3SDarrick J. Wong * two will have scheduled delayed reclaim for the rt/quota inodes. 10632d1d1da3SDarrick J. Wong * 10642d1d1da3SDarrick J. Wong * This is slightly different from the unmountfs call sequence 10652d1d1da3SDarrick J. Wong * because we could be tearing down a partially set up mount. In 10662d1d1da3SDarrick J. Wong * particular, if log_mount_finish fails we bail out without calling 10672d1d1da3SDarrick J. Wong * qm_unmount_quotas and therefore rely on qm_unmount to release the 10682d1d1da3SDarrick J. Wong * quota inodes. 10692d1d1da3SDarrick J. Wong */ 10702d1d1da3SDarrick J. Wong cancel_delayed_work_sync(&mp->m_reclaim_work); 10712d1d1da3SDarrick J. Wong xfs_reclaim_inodes(mp, SYNC_WAIT); 1072f9057e3dSChristoph Hellwig out_log_dealloc: 1073e6b3bb78SCarlos Maiolino mp->m_flags |= XFS_MOUNT_UNMOUNTING; 1074f0b2efadSBrian Foster xfs_log_mount_cancel(mp); 1075d4f3512bSDave Chinner out_fail_wait: 1076d4f3512bSDave Chinner if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) 1077d4f3512bSDave Chinner xfs_wait_buftarg(mp->m_logdev_targp); 1078d4f3512bSDave Chinner xfs_wait_buftarg(mp->m_ddev_targp); 1079f9057e3dSChristoph Hellwig out_free_perag: 1080ff4f038cSChristoph Hellwig xfs_free_perag(mp); 10810650b554SDave Chinner out_free_dir: 10820650b554SDave Chinner xfs_da_unmount(mp); 1083f9057e3dSChristoph Hellwig out_remove_uuid: 108427174203SChristoph Hellwig xfs_uuid_unmount(mp); 108531965ef3SDarrick J. Wong out_remove_errortag: 108631965ef3SDarrick J. Wong xfs_errortag_del(mp); 1087192852beSCarlos Maiolino out_remove_error_sysfs: 1088192852beSCarlos Maiolino xfs_error_sysfs_del(mp); 1089225e4635SBill O'Donnell out_del_stats: 1090225e4635SBill O'Donnell xfs_sysfs_del(&mp->m_stats.xs_kobj); 1091a31b1d3dSBrian Foster out_remove_sysfs: 1092a31b1d3dSBrian Foster xfs_sysfs_del(&mp->m_kobj); 1093f9057e3dSChristoph Hellwig out: 10941da177e4SLinus Torvalds return error; 10951da177e4SLinus Torvalds } 10961da177e4SLinus Torvalds 10971da177e4SLinus Torvalds /* 10981da177e4SLinus Torvalds * This flushes out the inodes,dquots and the superblock, unmounts the 10991da177e4SLinus Torvalds * log and makes sure that incore structures are freed. 11001da177e4SLinus Torvalds */ 110141b5c2e7SChristoph Hellwig void 110241b5c2e7SChristoph Hellwig xfs_unmountfs( 110341b5c2e7SChristoph Hellwig struct xfs_mount *mp) 11041da177e4SLinus Torvalds { 1105c8ce540dSDarrick J. Wong uint64_t resblks; 110641b5c2e7SChristoph Hellwig int error; 11071da177e4SLinus Torvalds 1108d6b636ebSDarrick J. Wong xfs_icache_disable_reclaim(mp); 110984d69619SDarrick J. Wong xfs_fs_unreserve_ag_blocks(mp); 11107d095257SChristoph Hellwig xfs_qm_unmount_quotas(mp); 1111b93b6e43SChristoph Hellwig xfs_rtunmount_inodes(mp); 111244a8736bSDarrick J. Wong xfs_irele(mp->m_rootip); 111377508ec8SChristoph Hellwig 1114641c56fbSDavid Chinner /* 1115641c56fbSDavid Chinner * We can potentially deadlock here if we have an inode cluster 11169da096fdSMalcolm Parsons * that has been freed has its buffer still pinned in memory because 1117641c56fbSDavid Chinner * the transaction is still sitting in a iclog. The stale inodes 1118641c56fbSDavid Chinner * on that buffer will have their flush locks held until the 1119641c56fbSDavid Chinner * transaction hits the disk and the callbacks run. the inode 1120641c56fbSDavid Chinner * flush takes the flush lock unconditionally and with nothing to 1121641c56fbSDavid Chinner * push out the iclog we will never get that unlocked. hence we 1122641c56fbSDavid Chinner * need to force the log first. 1123641c56fbSDavid Chinner */ 1124a14a348bSChristoph Hellwig xfs_log_force(mp, XFS_LOG_SYNC); 1125c854363eSDave Chinner 1126c854363eSDave Chinner /* 1127ebf55872SChristoph Hellwig * Wait for all busy extents to be freed, including completion of 1128ebf55872SChristoph Hellwig * any discard operation. 1129ebf55872SChristoph Hellwig */ 1130ebf55872SChristoph Hellwig xfs_extent_busy_wait_all(mp); 11314560e78fSChristoph Hellwig flush_workqueue(xfs_discard_wq); 1132ebf55872SChristoph Hellwig 1133ebf55872SChristoph Hellwig /* 1134e6b3bb78SCarlos Maiolino * We now need to tell the world we are unmounting. This will allow 1135e6b3bb78SCarlos Maiolino * us to detect that the filesystem is going away and we should error 1136e6b3bb78SCarlos Maiolino * out anything that we have been retrying in the background. This will 1137e6b3bb78SCarlos Maiolino * prevent neverending retries in AIL pushing from hanging the unmount. 1138e6b3bb78SCarlos Maiolino */ 1139e6b3bb78SCarlos Maiolino mp->m_flags |= XFS_MOUNT_UNMOUNTING; 1140e6b3bb78SCarlos Maiolino 1141e6b3bb78SCarlos Maiolino /* 1142211e4d43SChristoph Hellwig * Flush all pending changes from the AIL. 1143c854363eSDave Chinner */ 1144211e4d43SChristoph Hellwig xfs_ail_push_all_sync(mp->m_ail); 1145211e4d43SChristoph Hellwig 1146211e4d43SChristoph Hellwig /* 1147211e4d43SChristoph Hellwig * And reclaim all inodes. At this point there should be no dirty 11487e18530bSDave Chinner * inodes and none should be pinned or locked, but use synchronous 11497e18530bSDave Chinner * reclaim just to be sure. We can stop background inode reclaim 11507e18530bSDave Chinner * here as well if it is still running. 1151211e4d43SChristoph Hellwig */ 11527e18530bSDave Chinner cancel_delayed_work_sync(&mp->m_reclaim_work); 1153c854363eSDave Chinner xfs_reclaim_inodes(mp, SYNC_WAIT); 11541da177e4SLinus Torvalds 11557d095257SChristoph Hellwig xfs_qm_unmount(mp); 1156a357a121SLachlan McIlroy 11571da177e4SLinus Torvalds /* 115884e1e99fSDavid Chinner * Unreserve any blocks we have so that when we unmount we don't account 115984e1e99fSDavid Chinner * the reserved free space as used. This is really only necessary for 116084e1e99fSDavid Chinner * lazy superblock counting because it trusts the incore superblock 11619da096fdSMalcolm Parsons * counters to be absolutely correct on clean unmount. 116284e1e99fSDavid Chinner * 116384e1e99fSDavid Chinner * We don't bother correcting this elsewhere for lazy superblock 116484e1e99fSDavid Chinner * counting because on mount of an unclean filesystem we reconstruct the 116584e1e99fSDavid Chinner * correct counter value and this is irrelevant. 116684e1e99fSDavid Chinner * 116784e1e99fSDavid Chinner * For non-lazy counter filesystems, this doesn't matter at all because 116884e1e99fSDavid Chinner * we only every apply deltas to the superblock and hence the incore 116984e1e99fSDavid Chinner * value does not matter.... 117084e1e99fSDavid Chinner */ 117184e1e99fSDavid Chinner resblks = 0; 1172714082bcSDavid Chinner error = xfs_reserve_blocks(mp, &resblks, NULL); 1173714082bcSDavid Chinner if (error) 11740b932cccSDave Chinner xfs_warn(mp, "Unable to free reserved block pool. " 1175714082bcSDavid Chinner "Freespace may not be correct on next mount."); 1176714082bcSDavid Chinner 1177adab0f67SChandra Seetharaman error = xfs_log_sbcount(mp); 1178e5720eecSDavid Chinner if (error) 11790b932cccSDave Chinner xfs_warn(mp, "Unable to update superblock counters. " 1180e5720eecSDavid Chinner "Freespace may not be correct on next mount."); 118187c7bec7SChristoph Hellwig 1182225e4635SBill O'Donnell 118321b699c8SChristoph Hellwig xfs_log_unmount(mp); 11840650b554SDave Chinner xfs_da_unmount(mp); 118527174203SChristoph Hellwig xfs_uuid_unmount(mp); 11861da177e4SLinus Torvalds 11871550d0b0SChristoph Hellwig #if defined(DEBUG) 118831965ef3SDarrick J. Wong xfs_errortag_clearall(mp); 11891da177e4SLinus Torvalds #endif 1190ff4f038cSChristoph Hellwig xfs_free_perag(mp); 1191a31b1d3dSBrian Foster 119231965ef3SDarrick J. Wong xfs_errortag_del(mp); 1193192852beSCarlos Maiolino xfs_error_sysfs_del(mp); 1194225e4635SBill O'Donnell xfs_sysfs_del(&mp->m_stats.xs_kobj); 1195a31b1d3dSBrian Foster xfs_sysfs_del(&mp->m_kobj); 11961da177e4SLinus Torvalds } 11971da177e4SLinus Torvalds 119891ee575fSBrian Foster /* 119991ee575fSBrian Foster * Determine whether modifications can proceed. The caller specifies the minimum 120091ee575fSBrian Foster * freeze level for which modifications should not be allowed. This allows 120191ee575fSBrian Foster * certain operations to proceed while the freeze sequence is in progress, if 120291ee575fSBrian Foster * necessary. 120391ee575fSBrian Foster */ 120491ee575fSBrian Foster bool 120591ee575fSBrian Foster xfs_fs_writable( 120691ee575fSBrian Foster struct xfs_mount *mp, 120791ee575fSBrian Foster int level) 120892821e2bSDavid Chinner { 120991ee575fSBrian Foster ASSERT(level > SB_UNFROZEN); 121091ee575fSBrian Foster if ((mp->m_super->s_writers.frozen >= level) || 121191ee575fSBrian Foster XFS_FORCED_SHUTDOWN(mp) || (mp->m_flags & XFS_MOUNT_RDONLY)) 121291ee575fSBrian Foster return false; 121391ee575fSBrian Foster 121491ee575fSBrian Foster return true; 121592821e2bSDavid Chinner } 121692821e2bSDavid Chinner 121792821e2bSDavid Chinner /* 1218b2ce3974SAlex Elder * xfs_log_sbcount 1219b2ce3974SAlex Elder * 1220adab0f67SChandra Seetharaman * Sync the superblock counters to disk. 1221b2ce3974SAlex Elder * 122291ee575fSBrian Foster * Note this code can be called during the process of freezing, so we use the 122391ee575fSBrian Foster * transaction allocator that does not block when the transaction subsystem is 122491ee575fSBrian Foster * in its frozen state. 122592821e2bSDavid Chinner */ 122692821e2bSDavid Chinner int 1227adab0f67SChandra Seetharaman xfs_log_sbcount(xfs_mount_t *mp) 122892821e2bSDavid Chinner { 122991ee575fSBrian Foster /* allow this to proceed during the freeze sequence... */ 123091ee575fSBrian Foster if (!xfs_fs_writable(mp, SB_FREEZE_COMPLETE)) 123192821e2bSDavid Chinner return 0; 123292821e2bSDavid Chinner 123392821e2bSDavid Chinner /* 123492821e2bSDavid Chinner * we don't need to do this if we are updating the superblock 123592821e2bSDavid Chinner * counters on every modification. 123692821e2bSDavid Chinner */ 123792821e2bSDavid Chinner if (!xfs_sb_version_haslazysbcount(&mp->m_sb)) 123892821e2bSDavid Chinner return 0; 123992821e2bSDavid Chinner 124061e63ecbSDave Chinner return xfs_sync_sb(mp, true); 124192821e2bSDavid Chinner } 124292821e2bSDavid Chinner 12438c1903d3SDave Chinner /* 12448c1903d3SDave Chinner * Deltas for the inode count are +/-64, hence we use a large batch size 12458c1903d3SDave Chinner * of 128 so we don't need to take the counter lock on every update. 12468c1903d3SDave Chinner */ 12478c1903d3SDave Chinner #define XFS_ICOUNT_BATCH 128 1248501ab323SDave Chinner int 1249501ab323SDave Chinner xfs_mod_icount( 1250501ab323SDave Chinner struct xfs_mount *mp, 1251501ab323SDave Chinner int64_t delta) 1252501ab323SDave Chinner { 1253104b4e51SNikolay Borisov percpu_counter_add_batch(&mp->m_icount, delta, XFS_ICOUNT_BATCH); 12548c1903d3SDave Chinner if (__percpu_counter_compare(&mp->m_icount, 0, XFS_ICOUNT_BATCH) < 0) { 1255501ab323SDave Chinner ASSERT(0); 1256501ab323SDave Chinner percpu_counter_add(&mp->m_icount, -delta); 1257501ab323SDave Chinner return -EINVAL; 1258501ab323SDave Chinner } 1259501ab323SDave Chinner return 0; 1260501ab323SDave Chinner } 1261501ab323SDave Chinner 1262e88b64eaSDave Chinner int 1263e88b64eaSDave Chinner xfs_mod_ifree( 1264e88b64eaSDave Chinner struct xfs_mount *mp, 1265e88b64eaSDave Chinner int64_t delta) 1266e88b64eaSDave Chinner { 1267e88b64eaSDave Chinner percpu_counter_add(&mp->m_ifree, delta); 1268e88b64eaSDave Chinner if (percpu_counter_compare(&mp->m_ifree, 0) < 0) { 1269e88b64eaSDave Chinner ASSERT(0); 1270e88b64eaSDave Chinner percpu_counter_add(&mp->m_ifree, -delta); 1271e88b64eaSDave Chinner return -EINVAL; 1272e88b64eaSDave Chinner } 1273e88b64eaSDave Chinner return 0; 1274e88b64eaSDave Chinner } 12750d485adaSDave Chinner 12768c1903d3SDave Chinner /* 12778c1903d3SDave Chinner * Deltas for the block count can vary from 1 to very large, but lock contention 12788c1903d3SDave Chinner * only occurs on frequent small block count updates such as in the delayed 12798c1903d3SDave Chinner * allocation path for buffered writes (page a time updates). Hence we set 12808c1903d3SDave Chinner * a large batch count (1024) to minimise global counter updates except when 12818c1903d3SDave Chinner * we get near to ENOSPC and we have to be very accurate with our updates. 12828c1903d3SDave Chinner */ 12838c1903d3SDave Chinner #define XFS_FDBLOCKS_BATCH 1024 12840d485adaSDave Chinner int 12850d485adaSDave Chinner xfs_mod_fdblocks( 12860d485adaSDave Chinner struct xfs_mount *mp, 12870d485adaSDave Chinner int64_t delta, 12880d485adaSDave Chinner bool rsvd) 12890d485adaSDave Chinner { 12900d485adaSDave Chinner int64_t lcounter; 12910d485adaSDave Chinner long long res_used; 12920d485adaSDave Chinner s32 batch; 12930d485adaSDave Chinner 12940d485adaSDave Chinner if (delta > 0) { 12950d485adaSDave Chinner /* 12960d485adaSDave Chinner * If the reserve pool is depleted, put blocks back into it 12970d485adaSDave Chinner * first. Most of the time the pool is full. 12980d485adaSDave Chinner */ 12990d485adaSDave Chinner if (likely(mp->m_resblks == mp->m_resblks_avail)) { 13000d485adaSDave Chinner percpu_counter_add(&mp->m_fdblocks, delta); 13010d485adaSDave Chinner return 0; 13020d485adaSDave Chinner } 13030d485adaSDave Chinner 13040d485adaSDave Chinner spin_lock(&mp->m_sb_lock); 13050d485adaSDave Chinner res_used = (long long)(mp->m_resblks - mp->m_resblks_avail); 13060d485adaSDave Chinner 13070d485adaSDave Chinner if (res_used > delta) { 13080d485adaSDave Chinner mp->m_resblks_avail += delta; 13090d485adaSDave Chinner } else { 13100d485adaSDave Chinner delta -= res_used; 13110d485adaSDave Chinner mp->m_resblks_avail = mp->m_resblks; 13120d485adaSDave Chinner percpu_counter_add(&mp->m_fdblocks, delta); 13130d485adaSDave Chinner } 13140d485adaSDave Chinner spin_unlock(&mp->m_sb_lock); 13150d485adaSDave Chinner return 0; 13160d485adaSDave Chinner } 13170d485adaSDave Chinner 13180d485adaSDave Chinner /* 13190d485adaSDave Chinner * Taking blocks away, need to be more accurate the closer we 13200d485adaSDave Chinner * are to zero. 13210d485adaSDave Chinner * 13220d485adaSDave Chinner * If the counter has a value of less than 2 * max batch size, 13230d485adaSDave Chinner * then make everything serialise as we are real close to 13240d485adaSDave Chinner * ENOSPC. 13250d485adaSDave Chinner */ 13268c1903d3SDave Chinner if (__percpu_counter_compare(&mp->m_fdblocks, 2 * XFS_FDBLOCKS_BATCH, 13278c1903d3SDave Chinner XFS_FDBLOCKS_BATCH) < 0) 13280d485adaSDave Chinner batch = 1; 13290d485adaSDave Chinner else 13308c1903d3SDave Chinner batch = XFS_FDBLOCKS_BATCH; 13310d485adaSDave Chinner 1332104b4e51SNikolay Borisov percpu_counter_add_batch(&mp->m_fdblocks, delta, batch); 133352548852SDarrick J. Wong if (__percpu_counter_compare(&mp->m_fdblocks, mp->m_alloc_set_aside, 13348c1903d3SDave Chinner XFS_FDBLOCKS_BATCH) >= 0) { 13350d485adaSDave Chinner /* we had space! */ 13360d485adaSDave Chinner return 0; 13370d485adaSDave Chinner } 13380d485adaSDave Chinner 13390d485adaSDave Chinner /* 13400d485adaSDave Chinner * lock up the sb for dipping into reserves before releasing the space 13410d485adaSDave Chinner * that took us to ENOSPC. 13420d485adaSDave Chinner */ 13430d485adaSDave Chinner spin_lock(&mp->m_sb_lock); 13440d485adaSDave Chinner percpu_counter_add(&mp->m_fdblocks, -delta); 13450d485adaSDave Chinner if (!rsvd) 13460d485adaSDave Chinner goto fdblocks_enospc; 13470d485adaSDave Chinner 13480d485adaSDave Chinner lcounter = (long long)mp->m_resblks_avail + delta; 13490d485adaSDave Chinner if (lcounter >= 0) { 13500d485adaSDave Chinner mp->m_resblks_avail = lcounter; 13510d485adaSDave Chinner spin_unlock(&mp->m_sb_lock); 13520d485adaSDave Chinner return 0; 13530d485adaSDave Chinner } 13540d485adaSDave Chinner printk_once(KERN_WARNING 13550d485adaSDave Chinner "Filesystem \"%s\": reserve blocks depleted! " 13560d485adaSDave Chinner "Consider increasing reserve pool size.", 13570d485adaSDave Chinner mp->m_fsname); 13580d485adaSDave Chinner fdblocks_enospc: 13590d485adaSDave Chinner spin_unlock(&mp->m_sb_lock); 13600d485adaSDave Chinner return -ENOSPC; 13610d485adaSDave Chinner } 13620d485adaSDave Chinner 1363bab98bbeSDave Chinner int 1364bab98bbeSDave Chinner xfs_mod_frextents( 1365bab98bbeSDave Chinner struct xfs_mount *mp, 1366bab98bbeSDave Chinner int64_t delta) 1367bab98bbeSDave Chinner { 1368bab98bbeSDave Chinner int64_t lcounter; 1369bab98bbeSDave Chinner int ret = 0; 1370bab98bbeSDave Chinner 1371bab98bbeSDave Chinner spin_lock(&mp->m_sb_lock); 1372bab98bbeSDave Chinner lcounter = mp->m_sb.sb_frextents + delta; 1373bab98bbeSDave Chinner if (lcounter < 0) 1374bab98bbeSDave Chinner ret = -ENOSPC; 1375bab98bbeSDave Chinner else 1376bab98bbeSDave Chinner mp->m_sb.sb_frextents = lcounter; 1377bab98bbeSDave Chinner spin_unlock(&mp->m_sb_lock); 1378bab98bbeSDave Chinner return ret; 1379bab98bbeSDave Chinner } 1380bab98bbeSDave Chinner 13811da177e4SLinus Torvalds /* 13821da177e4SLinus Torvalds * xfs_getsb() is called to obtain the buffer for the superblock. 13831da177e4SLinus Torvalds * The buffer is returned locked and read in from disk. 13841da177e4SLinus Torvalds * The buffer should be released with a call to xfs_brelse(). 13851da177e4SLinus Torvalds * 13861da177e4SLinus Torvalds * If the flags parameter is BUF_TRYLOCK, then we'll only return 13871da177e4SLinus Torvalds * the superblock buffer if it can be locked without sleeping. 13881da177e4SLinus Torvalds * If it can't then we'll return NULL. 13891da177e4SLinus Torvalds */ 13900c842ad4SChristoph Hellwig struct xfs_buf * 13911da177e4SLinus Torvalds xfs_getsb( 13920c842ad4SChristoph Hellwig struct xfs_mount *mp, 13931da177e4SLinus Torvalds int flags) 13941da177e4SLinus Torvalds { 13950c842ad4SChristoph Hellwig struct xfs_buf *bp = mp->m_sb_bp; 13961da177e4SLinus Torvalds 13970c842ad4SChristoph Hellwig if (!xfs_buf_trylock(bp)) { 13980c842ad4SChristoph Hellwig if (flags & XBF_TRYLOCK) 13991da177e4SLinus Torvalds return NULL; 14000c842ad4SChristoph Hellwig xfs_buf_lock(bp); 14011da177e4SLinus Torvalds } 14020c842ad4SChristoph Hellwig 140372790aa1SChandra Seetharaman xfs_buf_hold(bp); 1404b0388bf1SDave Chinner ASSERT(bp->b_flags & XBF_DONE); 1405014c2544SJesper Juhl return bp; 14061da177e4SLinus Torvalds } 14071da177e4SLinus Torvalds 14081da177e4SLinus Torvalds /* 14091da177e4SLinus Torvalds * Used to free the superblock along various error paths. 14101da177e4SLinus Torvalds */ 14111da177e4SLinus Torvalds void 14121da177e4SLinus Torvalds xfs_freesb( 141326af6552SDave Chinner struct xfs_mount *mp) 14141da177e4SLinus Torvalds { 141526af6552SDave Chinner struct xfs_buf *bp = mp->m_sb_bp; 14161da177e4SLinus Torvalds 141726af6552SDave Chinner xfs_buf_lock(bp); 14181da177e4SLinus Torvalds mp->m_sb_bp = NULL; 141926af6552SDave Chinner xfs_buf_relse(bp); 14201da177e4SLinus Torvalds } 14211da177e4SLinus Torvalds 14221da177e4SLinus Torvalds /* 1423dda35b8fSChristoph Hellwig * If the underlying (data/log/rt) device is readonly, there are some 1424dda35b8fSChristoph Hellwig * operations that cannot proceed. 1425dda35b8fSChristoph Hellwig */ 1426dda35b8fSChristoph Hellwig int 1427dda35b8fSChristoph Hellwig xfs_dev_is_read_only( 1428dda35b8fSChristoph Hellwig struct xfs_mount *mp, 1429dda35b8fSChristoph Hellwig char *message) 1430dda35b8fSChristoph Hellwig { 1431dda35b8fSChristoph Hellwig if (xfs_readonly_buftarg(mp->m_ddev_targp) || 1432dda35b8fSChristoph Hellwig xfs_readonly_buftarg(mp->m_logdev_targp) || 1433dda35b8fSChristoph Hellwig (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) { 14340b932cccSDave Chinner xfs_notice(mp, "%s required on read-only device.", message); 14350b932cccSDave Chinner xfs_notice(mp, "write access unavailable, cannot proceed."); 14362451337dSDave Chinner return -EROFS; 1437dda35b8fSChristoph Hellwig } 1438dda35b8fSChristoph Hellwig return 0; 1439dda35b8fSChristoph Hellwig } 1440f467cad9SDarrick J. Wong 1441f467cad9SDarrick J. Wong /* Force the summary counters to be recalculated at next mount. */ 1442f467cad9SDarrick J. Wong void 1443f467cad9SDarrick J. Wong xfs_force_summary_recalc( 1444f467cad9SDarrick J. Wong struct xfs_mount *mp) 1445f467cad9SDarrick J. Wong { 1446f467cad9SDarrick J. Wong if (!xfs_sb_version_haslazysbcount(&mp->m_sb)) 1447f467cad9SDarrick J. Wong return; 1448f467cad9SDarrick J. Wong 1449f467cad9SDarrick J. Wong spin_lock(&mp->m_sb_lock); 1450f467cad9SDarrick J. Wong mp->m_flags |= XFS_MOUNT_BAD_SUMMARY; 1451f467cad9SDarrick J. Wong spin_unlock(&mp->m_sb_lock); 1452f467cad9SDarrick J. Wong } 1453