11da177e4SLinus Torvalds /* 27b718769SNathan Scott * Copyright (c) 2000-2005 Silicon Graphics, Inc. 37b718769SNathan Scott * All Rights Reserved. 41da177e4SLinus Torvalds * 57b718769SNathan Scott * This program is free software; you can redistribute it and/or 67b718769SNathan Scott * modify it under the terms of the GNU General Public License as 71da177e4SLinus Torvalds * published by the Free Software Foundation. 81da177e4SLinus Torvalds * 97b718769SNathan Scott * This program is distributed in the hope that it would be useful, 107b718769SNathan Scott * but WITHOUT ANY WARRANTY; without even the implied warranty of 117b718769SNathan Scott * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 127b718769SNathan Scott * GNU General Public License for more details. 131da177e4SLinus Torvalds * 147b718769SNathan Scott * You should have received a copy of the GNU General Public License 157b718769SNathan Scott * along with this program; if not, write the Free Software Foundation, 167b718769SNathan Scott * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 171da177e4SLinus Torvalds */ 181da177e4SLinus Torvalds #include "xfs.h" 19a844f451SNathan Scott #include "xfs_fs.h" 2070a9883cSDave Chinner #include "xfs_shared.h" 21239880efSDave Chinner #include "xfs_format.h" 22239880efSDave Chinner #include "xfs_log_format.h" 23239880efSDave Chinner #include "xfs_trans_resv.h" 24a844f451SNathan Scott #include "xfs_bit.h" 25a844f451SNathan Scott #include "xfs_inum.h" 261da177e4SLinus Torvalds #include "xfs_sb.h" 271da177e4SLinus Torvalds #include "xfs_ag.h" 281da177e4SLinus Torvalds #include "xfs_mount.h" 2957062787SDave Chinner #include "xfs_da_format.h" 301da177e4SLinus Torvalds #include "xfs_inode.h" 31a4fbe6abSDave Chinner #include "xfs_dir2.h" 32a844f451SNathan Scott #include "xfs_ialloc.h" 331da177e4SLinus Torvalds #include "xfs_alloc.h" 341da177e4SLinus Torvalds #include "xfs_rtalloc.h" 351da177e4SLinus Torvalds #include "xfs_bmap.h" 36a4fbe6abSDave Chinner #include "xfs_trans.h" 37a4fbe6abSDave Chinner #include "xfs_trans_priv.h" 38a4fbe6abSDave Chinner #include "xfs_log.h" 391da177e4SLinus Torvalds #include "xfs_error.h" 401da177e4SLinus Torvalds #include "xfs_quota.h" 411da177e4SLinus Torvalds #include "xfs_fsops.h" 420b1b213fSChristoph Hellwig #include "xfs_trace.h" 436d8b79cfSDave Chinner #include "xfs_icache.h" 448f80587bSDave Chinner #include "xfs_dinode.h" 450b1b213fSChristoph Hellwig 461da177e4SLinus Torvalds 478d280b98SDavid Chinner #ifdef HAVE_PERCPU_SB 4820f4ebf2SDavid Chinner STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t, 4945af6c6dSChristoph Hellwig int); 5045af6c6dSChristoph Hellwig STATIC void xfs_icsb_balance_counter_locked(xfs_mount_t *, xfs_sb_field_t, 5145af6c6dSChristoph Hellwig int); 5236fbe6e6SDavid Chinner STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t); 538d280b98SDavid Chinner #else 548d280b98SDavid Chinner 5545af6c6dSChristoph Hellwig #define xfs_icsb_balance_counter(mp, a, b) do { } while (0) 5645af6c6dSChristoph Hellwig #define xfs_icsb_balance_counter_locked(mp, a, b) do { } while (0) 578d280b98SDavid Chinner #endif 588d280b98SDavid Chinner 5927174203SChristoph Hellwig static DEFINE_MUTEX(xfs_uuid_table_mutex); 6027174203SChristoph Hellwig static int xfs_uuid_table_size; 6127174203SChristoph Hellwig static uuid_t *xfs_uuid_table; 6227174203SChristoph Hellwig 6327174203SChristoph Hellwig /* 6427174203SChristoph Hellwig * See if the UUID is unique among mounted XFS filesystems. 6527174203SChristoph Hellwig * Mount fails if UUID is nil or a FS with the same UUID is already mounted. 6627174203SChristoph Hellwig */ 6727174203SChristoph Hellwig STATIC int 6827174203SChristoph Hellwig xfs_uuid_mount( 6927174203SChristoph Hellwig struct xfs_mount *mp) 7027174203SChristoph Hellwig { 7127174203SChristoph Hellwig uuid_t *uuid = &mp->m_sb.sb_uuid; 7227174203SChristoph Hellwig int hole, i; 7327174203SChristoph Hellwig 7427174203SChristoph Hellwig if (mp->m_flags & XFS_MOUNT_NOUUID) 7527174203SChristoph Hellwig return 0; 7627174203SChristoph Hellwig 7727174203SChristoph Hellwig if (uuid_is_nil(uuid)) { 780b932cccSDave Chinner xfs_warn(mp, "Filesystem has nil UUID - can't mount"); 7927174203SChristoph Hellwig return XFS_ERROR(EINVAL); 8027174203SChristoph Hellwig } 8127174203SChristoph Hellwig 8227174203SChristoph Hellwig mutex_lock(&xfs_uuid_table_mutex); 8327174203SChristoph Hellwig for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) { 8427174203SChristoph Hellwig if (uuid_is_nil(&xfs_uuid_table[i])) { 8527174203SChristoph Hellwig hole = i; 8627174203SChristoph Hellwig continue; 8727174203SChristoph Hellwig } 8827174203SChristoph Hellwig if (uuid_equal(uuid, &xfs_uuid_table[i])) 8927174203SChristoph Hellwig goto out_duplicate; 9027174203SChristoph Hellwig } 9127174203SChristoph Hellwig 9227174203SChristoph Hellwig if (hole < 0) { 9327174203SChristoph Hellwig xfs_uuid_table = kmem_realloc(xfs_uuid_table, 9427174203SChristoph Hellwig (xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table), 9527174203SChristoph Hellwig xfs_uuid_table_size * sizeof(*xfs_uuid_table), 9627174203SChristoph Hellwig KM_SLEEP); 9727174203SChristoph Hellwig hole = xfs_uuid_table_size++; 9827174203SChristoph Hellwig } 9927174203SChristoph Hellwig xfs_uuid_table[hole] = *uuid; 10027174203SChristoph Hellwig mutex_unlock(&xfs_uuid_table_mutex); 10127174203SChristoph Hellwig 10227174203SChristoph Hellwig return 0; 10327174203SChristoph Hellwig 10427174203SChristoph Hellwig out_duplicate: 10527174203SChristoph Hellwig mutex_unlock(&xfs_uuid_table_mutex); 106021000e5SMitsuo Hayasaka xfs_warn(mp, "Filesystem has duplicate UUID %pU - can't mount", uuid); 10727174203SChristoph Hellwig return XFS_ERROR(EINVAL); 10827174203SChristoph Hellwig } 10927174203SChristoph Hellwig 11027174203SChristoph Hellwig STATIC void 11127174203SChristoph Hellwig xfs_uuid_unmount( 11227174203SChristoph Hellwig struct xfs_mount *mp) 11327174203SChristoph Hellwig { 11427174203SChristoph Hellwig uuid_t *uuid = &mp->m_sb.sb_uuid; 11527174203SChristoph Hellwig int i; 11627174203SChristoph Hellwig 11727174203SChristoph Hellwig if (mp->m_flags & XFS_MOUNT_NOUUID) 11827174203SChristoph Hellwig return; 11927174203SChristoph Hellwig 12027174203SChristoph Hellwig mutex_lock(&xfs_uuid_table_mutex); 12127174203SChristoph Hellwig for (i = 0; i < xfs_uuid_table_size; i++) { 12227174203SChristoph Hellwig if (uuid_is_nil(&xfs_uuid_table[i])) 12327174203SChristoph Hellwig continue; 12427174203SChristoph Hellwig if (!uuid_equal(uuid, &xfs_uuid_table[i])) 12527174203SChristoph Hellwig continue; 12627174203SChristoph Hellwig memset(&xfs_uuid_table[i], 0, sizeof(uuid_t)); 12727174203SChristoph Hellwig break; 12827174203SChristoph Hellwig } 12927174203SChristoph Hellwig ASSERT(i < xfs_uuid_table_size); 13027174203SChristoph Hellwig mutex_unlock(&xfs_uuid_table_mutex); 13127174203SChristoph Hellwig } 13227174203SChristoph Hellwig 13327174203SChristoph Hellwig 134e176579eSDave Chinner STATIC void 135e176579eSDave Chinner __xfs_free_perag( 136e176579eSDave Chinner struct rcu_head *head) 137e176579eSDave Chinner { 138e176579eSDave Chinner struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head); 139e176579eSDave Chinner 140e176579eSDave Chinner ASSERT(atomic_read(&pag->pag_ref) == 0); 141e176579eSDave Chinner kmem_free(pag); 142e176579eSDave Chinner } 143e176579eSDave Chinner 1440fa800fbSDave Chinner /* 145e176579eSDave Chinner * Free up the per-ag resources associated with the mount structure. 1461da177e4SLinus Torvalds */ 147c962fb79SChristoph Hellwig STATIC void 148ff4f038cSChristoph Hellwig xfs_free_perag( 149745f6919SChristoph Hellwig xfs_mount_t *mp) 1501da177e4SLinus Torvalds { 1511c1c6ebcSDave Chinner xfs_agnumber_t agno; 1521c1c6ebcSDave Chinner struct xfs_perag *pag; 1531da177e4SLinus Torvalds 1541c1c6ebcSDave Chinner for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 1551c1c6ebcSDave Chinner spin_lock(&mp->m_perag_lock); 1561c1c6ebcSDave Chinner pag = radix_tree_delete(&mp->m_perag_tree, agno); 1571c1c6ebcSDave Chinner spin_unlock(&mp->m_perag_lock); 158e176579eSDave Chinner ASSERT(pag); 159f83282a8SDave Chinner ASSERT(atomic_read(&pag->pag_ref) == 0); 160e176579eSDave Chinner call_rcu(&pag->rcu_head, __xfs_free_perag); 1611da177e4SLinus Torvalds } 1621da177e4SLinus Torvalds } 1631da177e4SLinus Torvalds 1644cc929eeSNathan Scott /* 1654cc929eeSNathan Scott * Check size of device based on the (data/realtime) block count. 1664cc929eeSNathan Scott * Note: this check is used by the growfs code as well as mount. 1674cc929eeSNathan Scott */ 1684cc929eeSNathan Scott int 1694cc929eeSNathan Scott xfs_sb_validate_fsb_count( 1704cc929eeSNathan Scott xfs_sb_t *sbp, 1714cc929eeSNathan Scott __uint64_t nblocks) 1724cc929eeSNathan Scott { 1734cc929eeSNathan Scott ASSERT(PAGE_SHIFT >= sbp->sb_blocklog); 1744cc929eeSNathan Scott ASSERT(sbp->sb_blocklog >= BBSHIFT); 1754cc929eeSNathan Scott 1764cc929eeSNathan Scott #if XFS_BIG_BLKNOS /* Limited by ULONG_MAX of page cache index */ 1774cc929eeSNathan Scott if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX) 178657a4cffSEric Sandeen return EFBIG; 1794cc929eeSNathan Scott #else /* Limited by UINT_MAX of sectors */ 1804cc929eeSNathan Scott if (nblocks << (sbp->sb_blocklog - BBSHIFT) > UINT_MAX) 181657a4cffSEric Sandeen return EFBIG; 1824cc929eeSNathan Scott #endif 1834cc929eeSNathan Scott return 0; 1844cc929eeSNathan Scott } 1851da177e4SLinus Torvalds 1861c1c6ebcSDave Chinner int 187c11e2c36SNathan Scott xfs_initialize_perag( 188c11e2c36SNathan Scott xfs_mount_t *mp, 1891c1c6ebcSDave Chinner xfs_agnumber_t agcount, 1901c1c6ebcSDave Chinner xfs_agnumber_t *maxagi) 1911da177e4SLinus Torvalds { 1922d2194f6SCarlos Maiolino xfs_agnumber_t index; 1938b26c582SDave Chinner xfs_agnumber_t first_initialised = 0; 1941da177e4SLinus Torvalds xfs_perag_t *pag; 1951da177e4SLinus Torvalds xfs_agino_t agino; 1961da177e4SLinus Torvalds xfs_ino_t ino; 1971da177e4SLinus Torvalds xfs_sb_t *sbp = &mp->m_sb; 1988b26c582SDave Chinner int error = -ENOMEM; 1991da177e4SLinus Torvalds 2001c1c6ebcSDave Chinner /* 2011c1c6ebcSDave Chinner * Walk the current per-ag tree so we don't try to initialise AGs 2021c1c6ebcSDave Chinner * that already exist (growfs case). Allocate and insert all the 2031c1c6ebcSDave Chinner * AGs we don't find ready for initialisation. 2041c1c6ebcSDave Chinner */ 2051c1c6ebcSDave Chinner for (index = 0; index < agcount; index++) { 2061c1c6ebcSDave Chinner pag = xfs_perag_get(mp, index); 2071c1c6ebcSDave Chinner if (pag) { 2081c1c6ebcSDave Chinner xfs_perag_put(pag); 2091c1c6ebcSDave Chinner continue; 2101c1c6ebcSDave Chinner } 2118b26c582SDave Chinner if (!first_initialised) 2128b26c582SDave Chinner first_initialised = index; 213fb3b504aSChristoph Hellwig 2141c1c6ebcSDave Chinner pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL); 2151c1c6ebcSDave Chinner if (!pag) 2168b26c582SDave Chinner goto out_unwind; 217fb3b504aSChristoph Hellwig pag->pag_agno = index; 218fb3b504aSChristoph Hellwig pag->pag_mount = mp; 2191a427ab0SDave Chinner spin_lock_init(&pag->pag_ici_lock); 22069b491c2SDave Chinner mutex_init(&pag->pag_ici_reclaim_lock); 221fb3b504aSChristoph Hellwig INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); 22274f75a0cSDave Chinner spin_lock_init(&pag->pag_buf_lock); 22374f75a0cSDave Chinner pag->pag_buf_tree = RB_ROOT; 224fb3b504aSChristoph Hellwig 2251c1c6ebcSDave Chinner if (radix_tree_preload(GFP_NOFS)) 2268b26c582SDave Chinner goto out_unwind; 227fb3b504aSChristoph Hellwig 2281c1c6ebcSDave Chinner spin_lock(&mp->m_perag_lock); 2291c1c6ebcSDave Chinner if (radix_tree_insert(&mp->m_perag_tree, index, pag)) { 2301c1c6ebcSDave Chinner BUG(); 2311c1c6ebcSDave Chinner spin_unlock(&mp->m_perag_lock); 2328b26c582SDave Chinner radix_tree_preload_end(); 2338b26c582SDave Chinner error = -EEXIST; 2348b26c582SDave Chinner goto out_unwind; 2351c1c6ebcSDave Chinner } 2361c1c6ebcSDave Chinner spin_unlock(&mp->m_perag_lock); 2371c1c6ebcSDave Chinner radix_tree_preload_end(); 2381c1c6ebcSDave Chinner } 2391c1c6ebcSDave Chinner 240fb3b504aSChristoph Hellwig /* 241fb3b504aSChristoph Hellwig * If we mount with the inode64 option, or no inode overflows 242fb3b504aSChristoph Hellwig * the legacy 32-bit address space clear the inode32 option. 2431da177e4SLinus Torvalds */ 244fb3b504aSChristoph Hellwig agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0); 245fb3b504aSChristoph Hellwig ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino); 2461da177e4SLinus Torvalds 247fb3b504aSChristoph Hellwig if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32) 248fb3b504aSChristoph Hellwig mp->m_flags |= XFS_MOUNT_32BITINODES; 249fb3b504aSChristoph Hellwig else 250fb3b504aSChristoph Hellwig mp->m_flags &= ~XFS_MOUNT_32BITINODES; 251fb3b504aSChristoph Hellwig 2522d2194f6SCarlos Maiolino if (mp->m_flags & XFS_MOUNT_32BITINODES) 2532d2194f6SCarlos Maiolino index = xfs_set_inode32(mp); 2542d2194f6SCarlos Maiolino else 2552d2194f6SCarlos Maiolino index = xfs_set_inode64(mp); 256fb3b504aSChristoph Hellwig 2571c1c6ebcSDave Chinner if (maxagi) 2581c1c6ebcSDave Chinner *maxagi = index; 2591c1c6ebcSDave Chinner return 0; 2608b26c582SDave Chinner 2618b26c582SDave Chinner out_unwind: 2628b26c582SDave Chinner kmem_free(pag); 2638b26c582SDave Chinner for (; index > first_initialised; index--) { 2648b26c582SDave Chinner pag = radix_tree_delete(&mp->m_perag_tree, index); 2658b26c582SDave Chinner kmem_free(pag); 2668b26c582SDave Chinner } 2678b26c582SDave Chinner return error; 2681da177e4SLinus Torvalds } 2691da177e4SLinus Torvalds 2701da177e4SLinus Torvalds /* 2711da177e4SLinus Torvalds * xfs_readsb 2721da177e4SLinus Torvalds * 2731da177e4SLinus Torvalds * Does the initial read of the superblock. 2741da177e4SLinus Torvalds */ 2751da177e4SLinus Torvalds int 276ff55068cSDave Chinner xfs_readsb( 277ff55068cSDave Chinner struct xfs_mount *mp, 278ff55068cSDave Chinner int flags) 2791da177e4SLinus Torvalds { 2801da177e4SLinus Torvalds unsigned int sector_size; 28104a1e6c5SDave Chinner struct xfs_buf *bp; 28204a1e6c5SDave Chinner struct xfs_sb *sbp = &mp->m_sb; 2831da177e4SLinus Torvalds int error; 284af34e09dSDave Chinner int loud = !(flags & XFS_MFSI_QUIET); 285daba5427SEric Sandeen const struct xfs_buf_ops *buf_ops; 2861da177e4SLinus Torvalds 2871da177e4SLinus Torvalds ASSERT(mp->m_sb_bp == NULL); 2881da177e4SLinus Torvalds ASSERT(mp->m_ddev_targp != NULL); 2891da177e4SLinus Torvalds 2901da177e4SLinus Torvalds /* 291daba5427SEric Sandeen * For the initial read, we must guess at the sector 292daba5427SEric Sandeen * size based on the block device. It's enough to 293daba5427SEric Sandeen * get the sb_sectsize out of the superblock and 294daba5427SEric Sandeen * then reread with the proper length. 295daba5427SEric Sandeen * We don't verify it yet, because it may not be complete. 296daba5427SEric Sandeen */ 297daba5427SEric Sandeen sector_size = xfs_getsize_buftarg(mp->m_ddev_targp); 298daba5427SEric Sandeen buf_ops = NULL; 299daba5427SEric Sandeen 300daba5427SEric Sandeen /* 3011da177e4SLinus Torvalds * Allocate a (locked) buffer to hold the superblock. 3021da177e4SLinus Torvalds * This will be kept around at all times to optimize 3031da177e4SLinus Torvalds * access to the superblock. 3041da177e4SLinus Torvalds */ 30526af6552SDave Chinner reread: 306e70b73f8SDave Chinner bp = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR, 307daba5427SEric Sandeen BTOBB(sector_size), 0, buf_ops); 30826af6552SDave Chinner if (!bp) { 309af34e09dSDave Chinner if (loud) 310af34e09dSDave Chinner xfs_warn(mp, "SB buffer read failed"); 31126af6552SDave Chinner return EIO; 3121da177e4SLinus Torvalds } 313eab4e633SDave Chinner if (bp->b_error) { 314eab4e633SDave Chinner error = bp->b_error; 315eab4e633SDave Chinner if (loud) 316e721f504SDave Chinner xfs_warn(mp, "SB validate failed with error %d.", error); 317ac75a1f7SDave Chinner /* bad CRC means corrupted metadata */ 318ac75a1f7SDave Chinner if (error == EFSBADCRC) 319ac75a1f7SDave Chinner error = EFSCORRUPTED; 320eab4e633SDave Chinner goto release_buf; 321eab4e633SDave Chinner } 3221da177e4SLinus Torvalds 3231da177e4SLinus Torvalds /* 3241da177e4SLinus Torvalds * Initialize the mount structure from the superblock. 3251da177e4SLinus Torvalds */ 32698021821SDave Chinner xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp)); 32783e782e1SChandra Seetharaman xfs_sb_quota_from_disk(&mp->m_sb); 328ff55068cSDave Chinner 3291da177e4SLinus Torvalds /* 3301da177e4SLinus Torvalds * We must be able to do sector-sized and sector-aligned IO. 3311da177e4SLinus Torvalds */ 33204a1e6c5SDave Chinner if (sector_size > sbp->sb_sectsize) { 333af34e09dSDave Chinner if (loud) 334af34e09dSDave Chinner xfs_warn(mp, "device supports %u byte sectors (not %u)", 33504a1e6c5SDave Chinner sector_size, sbp->sb_sectsize); 3361da177e4SLinus Torvalds error = ENOSYS; 33726af6552SDave Chinner goto release_buf; 3381da177e4SLinus Torvalds } 3391da177e4SLinus Torvalds 3401da177e4SLinus Torvalds /* 341daba5427SEric Sandeen * Re-read the superblock so the buffer is correctly sized, 342daba5427SEric Sandeen * and properly verified. 3431da177e4SLinus Torvalds */ 344daba5427SEric Sandeen if (buf_ops == NULL) { 3451da177e4SLinus Torvalds xfs_buf_relse(bp); 34604a1e6c5SDave Chinner sector_size = sbp->sb_sectsize; 347daba5427SEric Sandeen buf_ops = loud ? &xfs_sb_buf_ops : &xfs_sb_quiet_buf_ops; 34826af6552SDave Chinner goto reread; 3491da177e4SLinus Torvalds } 3501da177e4SLinus Torvalds 3515478eeadSLachlan McIlroy /* Initialize per-cpu counters */ 3525478eeadSLachlan McIlroy xfs_icsb_reinit_counters(mp); 3538d280b98SDavid Chinner 35404a1e6c5SDave Chinner /* no need to be quiet anymore, so reset the buf ops */ 35504a1e6c5SDave Chinner bp->b_ops = &xfs_sb_buf_ops; 35604a1e6c5SDave Chinner 3571da177e4SLinus Torvalds mp->m_sb_bp = bp; 35826af6552SDave Chinner xfs_buf_unlock(bp); 3591da177e4SLinus Torvalds return 0; 3601da177e4SLinus Torvalds 36126af6552SDave Chinner release_buf: 3621da177e4SLinus Torvalds xfs_buf_relse(bp); 3631da177e4SLinus Torvalds return error; 3641da177e4SLinus Torvalds } 3651da177e4SLinus Torvalds 3661da177e4SLinus Torvalds /* 3670771fb45SEric Sandeen * Update alignment values based on mount options and sb values 3681da177e4SLinus Torvalds */ 3690771fb45SEric Sandeen STATIC int 3707884bc86SChristoph Hellwig xfs_update_alignment(xfs_mount_t *mp) 3711da177e4SLinus Torvalds { 3721da177e4SLinus Torvalds xfs_sb_t *sbp = &(mp->m_sb); 3731da177e4SLinus Torvalds 3744249023aSChristoph Hellwig if (mp->m_dalign) { 3751da177e4SLinus Torvalds /* 3761da177e4SLinus Torvalds * If stripe unit and stripe width are not multiples 3771da177e4SLinus Torvalds * of the fs blocksize turn off alignment. 3781da177e4SLinus Torvalds */ 3791da177e4SLinus Torvalds if ((BBTOB(mp->m_dalign) & mp->m_blockmask) || 3801da177e4SLinus Torvalds (BBTOB(mp->m_swidth) & mp->m_blockmask)) { 38139a45d84SJie Liu xfs_warn(mp, 38239a45d84SJie Liu "alignment check failed: sunit/swidth vs. blocksize(%d)", 38339a45d84SJie Liu sbp->sb_blocksize); 3840771fb45SEric Sandeen return XFS_ERROR(EINVAL); 3851da177e4SLinus Torvalds } else { 3861da177e4SLinus Torvalds /* 3871da177e4SLinus Torvalds * Convert the stripe unit and width to FSBs. 3881da177e4SLinus Torvalds */ 3891da177e4SLinus Torvalds mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign); 3901da177e4SLinus Torvalds if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) { 39153487786SDave Chinner xfs_warn(mp, 39239a45d84SJie Liu "alignment check failed: sunit/swidth vs. agsize(%d)", 3931da177e4SLinus Torvalds sbp->sb_agblocks); 39439a45d84SJie Liu return XFS_ERROR(EINVAL); 3951da177e4SLinus Torvalds } else if (mp->m_dalign) { 3961da177e4SLinus Torvalds mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth); 3971da177e4SLinus Torvalds } else { 39839a45d84SJie Liu xfs_warn(mp, 39939a45d84SJie Liu "alignment check failed: sunit(%d) less than bsize(%d)", 40039a45d84SJie Liu mp->m_dalign, sbp->sb_blocksize); 4010771fb45SEric Sandeen return XFS_ERROR(EINVAL); 4021da177e4SLinus Torvalds } 4031da177e4SLinus Torvalds } 4041da177e4SLinus Torvalds 4051da177e4SLinus Torvalds /* 4061da177e4SLinus Torvalds * Update superblock with new values 4071da177e4SLinus Torvalds * and log changes 4081da177e4SLinus Torvalds */ 40962118709SEric Sandeen if (xfs_sb_version_hasdalign(sbp)) { 4101da177e4SLinus Torvalds if (sbp->sb_unit != mp->m_dalign) { 4111da177e4SLinus Torvalds sbp->sb_unit = mp->m_dalign; 4127884bc86SChristoph Hellwig mp->m_update_flags |= XFS_SB_UNIT; 4131da177e4SLinus Torvalds } 4141da177e4SLinus Torvalds if (sbp->sb_width != mp->m_swidth) { 4151da177e4SLinus Torvalds sbp->sb_width = mp->m_swidth; 4167884bc86SChristoph Hellwig mp->m_update_flags |= XFS_SB_WIDTH; 4171da177e4SLinus Torvalds } 41834d7f603SJie Liu } else { 41934d7f603SJie Liu xfs_warn(mp, 42034d7f603SJie Liu "cannot change alignment: superblock does not support data alignment"); 42134d7f603SJie Liu return XFS_ERROR(EINVAL); 4221da177e4SLinus Torvalds } 4231da177e4SLinus Torvalds } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN && 42462118709SEric Sandeen xfs_sb_version_hasdalign(&mp->m_sb)) { 4251da177e4SLinus Torvalds mp->m_dalign = sbp->sb_unit; 4261da177e4SLinus Torvalds mp->m_swidth = sbp->sb_width; 4271da177e4SLinus Torvalds } 4281da177e4SLinus Torvalds 4290771fb45SEric Sandeen return 0; 4300771fb45SEric Sandeen } 4311da177e4SLinus Torvalds 4320771fb45SEric Sandeen /* 4330771fb45SEric Sandeen * Set the maximum inode count for this filesystem 4340771fb45SEric Sandeen */ 4350771fb45SEric Sandeen STATIC void 4360771fb45SEric Sandeen xfs_set_maxicount(xfs_mount_t *mp) 4370771fb45SEric Sandeen { 4380771fb45SEric Sandeen xfs_sb_t *sbp = &(mp->m_sb); 4391da177e4SLinus Torvalds __uint64_t icount; 4401da177e4SLinus Torvalds 4410771fb45SEric Sandeen if (sbp->sb_imax_pct) { 4420771fb45SEric Sandeen /* 4430771fb45SEric Sandeen * Make sure the maximum inode count is a multiple 4440771fb45SEric Sandeen * of the units we allocate inodes in. 4451da177e4SLinus Torvalds */ 4461da177e4SLinus Torvalds icount = sbp->sb_dblocks * sbp->sb_imax_pct; 4471da177e4SLinus Torvalds do_div(icount, 100); 4481da177e4SLinus Torvalds do_div(icount, mp->m_ialloc_blks); 4491da177e4SLinus Torvalds mp->m_maxicount = (icount * mp->m_ialloc_blks) << 4501da177e4SLinus Torvalds sbp->sb_inopblog; 4510771fb45SEric Sandeen } else { 4521da177e4SLinus Torvalds mp->m_maxicount = 0; 4531da177e4SLinus Torvalds } 4541da177e4SLinus Torvalds } 4551da177e4SLinus Torvalds 4561da177e4SLinus Torvalds /* 4571da177e4SLinus Torvalds * Set the default minimum read and write sizes unless 4581da177e4SLinus Torvalds * already specified in a mount option. 4591da177e4SLinus Torvalds * We use smaller I/O sizes when the file system 4601da177e4SLinus Torvalds * is being used for NFS service (wsync mount option). 4611da177e4SLinus Torvalds */ 4620771fb45SEric Sandeen STATIC void 4630771fb45SEric Sandeen xfs_set_rw_sizes(xfs_mount_t *mp) 4640771fb45SEric Sandeen { 4650771fb45SEric Sandeen xfs_sb_t *sbp = &(mp->m_sb); 4660771fb45SEric Sandeen int readio_log, writeio_log; 4670771fb45SEric Sandeen 4681da177e4SLinus Torvalds if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) { 4691da177e4SLinus Torvalds if (mp->m_flags & XFS_MOUNT_WSYNC) { 4701da177e4SLinus Torvalds readio_log = XFS_WSYNC_READIO_LOG; 4711da177e4SLinus Torvalds writeio_log = XFS_WSYNC_WRITEIO_LOG; 4721da177e4SLinus Torvalds } else { 4731da177e4SLinus Torvalds readio_log = XFS_READIO_LOG_LARGE; 4741da177e4SLinus Torvalds writeio_log = XFS_WRITEIO_LOG_LARGE; 4751da177e4SLinus Torvalds } 4761da177e4SLinus Torvalds } else { 4771da177e4SLinus Torvalds readio_log = mp->m_readio_log; 4781da177e4SLinus Torvalds writeio_log = mp->m_writeio_log; 4791da177e4SLinus Torvalds } 4801da177e4SLinus Torvalds 4811da177e4SLinus Torvalds if (sbp->sb_blocklog > readio_log) { 4821da177e4SLinus Torvalds mp->m_readio_log = sbp->sb_blocklog; 4831da177e4SLinus Torvalds } else { 4841da177e4SLinus Torvalds mp->m_readio_log = readio_log; 4851da177e4SLinus Torvalds } 4861da177e4SLinus Torvalds mp->m_readio_blocks = 1 << (mp->m_readio_log - sbp->sb_blocklog); 4871da177e4SLinus Torvalds if (sbp->sb_blocklog > writeio_log) { 4881da177e4SLinus Torvalds mp->m_writeio_log = sbp->sb_blocklog; 4891da177e4SLinus Torvalds } else { 4901da177e4SLinus Torvalds mp->m_writeio_log = writeio_log; 4911da177e4SLinus Torvalds } 4921da177e4SLinus Torvalds mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog); 4930771fb45SEric Sandeen } 494425f9dddSEric Sandeen 4951da177e4SLinus Torvalds /* 496055388a3SDave Chinner * precalculate the low space thresholds for dynamic speculative preallocation. 497055388a3SDave Chinner */ 498055388a3SDave Chinner void 499055388a3SDave Chinner xfs_set_low_space_thresholds( 500055388a3SDave Chinner struct xfs_mount *mp) 501055388a3SDave Chinner { 502055388a3SDave Chinner int i; 503055388a3SDave Chinner 504055388a3SDave Chinner for (i = 0; i < XFS_LOWSP_MAX; i++) { 505055388a3SDave Chinner __uint64_t space = mp->m_sb.sb_dblocks; 506055388a3SDave Chinner 507055388a3SDave Chinner do_div(space, 100); 508055388a3SDave Chinner mp->m_low_space[i] = space * (i + 1); 509055388a3SDave Chinner } 510055388a3SDave Chinner } 511055388a3SDave Chinner 512055388a3SDave Chinner 513055388a3SDave Chinner /* 5141da177e4SLinus Torvalds * Set whether we're using inode alignment. 5151da177e4SLinus Torvalds */ 5160771fb45SEric Sandeen STATIC void 5170771fb45SEric Sandeen xfs_set_inoalignment(xfs_mount_t *mp) 5180771fb45SEric Sandeen { 51962118709SEric Sandeen if (xfs_sb_version_hasalign(&mp->m_sb) && 5201da177e4SLinus Torvalds mp->m_sb.sb_inoalignmt >= 5211da177e4SLinus Torvalds XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size)) 5221da177e4SLinus Torvalds mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1; 5231da177e4SLinus Torvalds else 5241da177e4SLinus Torvalds mp->m_inoalign_mask = 0; 5251da177e4SLinus Torvalds /* 5261da177e4SLinus Torvalds * If we are using stripe alignment, check whether 5271da177e4SLinus Torvalds * the stripe unit is a multiple of the inode alignment 5281da177e4SLinus Torvalds */ 5291da177e4SLinus Torvalds if (mp->m_dalign && mp->m_inoalign_mask && 5301da177e4SLinus Torvalds !(mp->m_dalign & mp->m_inoalign_mask)) 5311da177e4SLinus Torvalds mp->m_sinoalign = mp->m_dalign; 5321da177e4SLinus Torvalds else 5331da177e4SLinus Torvalds mp->m_sinoalign = 0; 5340771fb45SEric Sandeen } 5350771fb45SEric Sandeen 5361da177e4SLinus Torvalds /* 5370471f62eSZhi Yong Wu * Check that the data (and log if separate) is an ok size. 5381da177e4SLinus Torvalds */ 5390771fb45SEric Sandeen STATIC int 5404249023aSChristoph Hellwig xfs_check_sizes(xfs_mount_t *mp) 5410771fb45SEric Sandeen { 5420771fb45SEric Sandeen xfs_buf_t *bp; 5430771fb45SEric Sandeen xfs_daddr_t d; 5440771fb45SEric Sandeen 5451da177e4SLinus Torvalds d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); 5461da177e4SLinus Torvalds if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) { 5470b932cccSDave Chinner xfs_warn(mp, "filesystem size mismatch detected"); 548657a4cffSEric Sandeen return XFS_ERROR(EFBIG); 5491da177e4SLinus Torvalds } 550e70b73f8SDave Chinner bp = xfs_buf_read_uncached(mp->m_ddev_targp, 5511da177e4SLinus Torvalds d - XFS_FSS_TO_BB(mp, 1), 552c3f8fc73SDave Chinner XFS_FSS_TO_BB(mp, 1), 0, NULL); 5531922c949SDave Chinner if (!bp) { 5540b932cccSDave Chinner xfs_warn(mp, "last sector read failed"); 5551922c949SDave Chinner return EIO; 5561da177e4SLinus Torvalds } 5571922c949SDave Chinner xfs_buf_relse(bp); 5581da177e4SLinus Torvalds 5594249023aSChristoph Hellwig if (mp->m_logdev_targp != mp->m_ddev_targp) { 5601da177e4SLinus Torvalds d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); 5611da177e4SLinus Torvalds if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { 5620b932cccSDave Chinner xfs_warn(mp, "log size mismatch detected"); 563657a4cffSEric Sandeen return XFS_ERROR(EFBIG); 5641da177e4SLinus Torvalds } 565e70b73f8SDave Chinner bp = xfs_buf_read_uncached(mp->m_logdev_targp, 5661da177e4SLinus Torvalds d - XFS_FSB_TO_BB(mp, 1), 567c3f8fc73SDave Chinner XFS_FSB_TO_BB(mp, 1), 0, NULL); 5681922c949SDave Chinner if (!bp) { 5690b932cccSDave Chinner xfs_warn(mp, "log device read failed"); 5701922c949SDave Chinner return EIO; 5711da177e4SLinus Torvalds } 5721922c949SDave Chinner xfs_buf_relse(bp); 5730771fb45SEric Sandeen } 5740771fb45SEric Sandeen return 0; 5750771fb45SEric Sandeen } 5760771fb45SEric Sandeen 5770771fb45SEric Sandeen /* 5787d095257SChristoph Hellwig * Clear the quotaflags in memory and in the superblock. 5797d095257SChristoph Hellwig */ 5807d095257SChristoph Hellwig int 5817d095257SChristoph Hellwig xfs_mount_reset_sbqflags( 5827d095257SChristoph Hellwig struct xfs_mount *mp) 5837d095257SChristoph Hellwig { 5847d095257SChristoph Hellwig int error; 5857d095257SChristoph Hellwig struct xfs_trans *tp; 5867d095257SChristoph Hellwig 5877d095257SChristoph Hellwig mp->m_qflags = 0; 5887d095257SChristoph Hellwig 5897d095257SChristoph Hellwig /* 5907d095257SChristoph Hellwig * It is OK to look at sb_qflags here in mount path, 5917d095257SChristoph Hellwig * without m_sb_lock. 5927d095257SChristoph Hellwig */ 5937d095257SChristoph Hellwig if (mp->m_sb.sb_qflags == 0) 5947d095257SChristoph Hellwig return 0; 5957d095257SChristoph Hellwig spin_lock(&mp->m_sb_lock); 5967d095257SChristoph Hellwig mp->m_sb.sb_qflags = 0; 5977d095257SChristoph Hellwig spin_unlock(&mp->m_sb_lock); 5987d095257SChristoph Hellwig 5997d095257SChristoph Hellwig /* 6007d095257SChristoph Hellwig * If the fs is readonly, let the incore superblock run 6017d095257SChristoph Hellwig * with quotas off but don't flush the update out to disk 6027d095257SChristoph Hellwig */ 6037d095257SChristoph Hellwig if (mp->m_flags & XFS_MOUNT_RDONLY) 6047d095257SChristoph Hellwig return 0; 6057d095257SChristoph Hellwig 6067d095257SChristoph Hellwig tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); 6073d3c8b52SJie Liu error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0); 6087d095257SChristoph Hellwig if (error) { 6097d095257SChristoph Hellwig xfs_trans_cancel(tp, 0); 61053487786SDave Chinner xfs_alert(mp, "%s: Superblock update failed!", __func__); 6117d095257SChristoph Hellwig return error; 6127d095257SChristoph Hellwig } 6137d095257SChristoph Hellwig 6147d095257SChristoph Hellwig xfs_mod_sb(tp, XFS_SB_QFLAGS); 6157d095257SChristoph Hellwig return xfs_trans_commit(tp, 0); 6167d095257SChristoph Hellwig } 6177d095257SChristoph Hellwig 618d5db0f97SEric Sandeen __uint64_t 619d5db0f97SEric Sandeen xfs_default_resblks(xfs_mount_t *mp) 620d5db0f97SEric Sandeen { 621d5db0f97SEric Sandeen __uint64_t resblks; 622d5db0f97SEric Sandeen 623d5db0f97SEric Sandeen /* 6248babd8a2SDave Chinner * We default to 5% or 8192 fsbs of space reserved, whichever is 6258babd8a2SDave Chinner * smaller. This is intended to cover concurrent allocation 6268babd8a2SDave Chinner * transactions when we initially hit enospc. These each require a 4 6278babd8a2SDave Chinner * block reservation. Hence by default we cover roughly 2000 concurrent 6288babd8a2SDave Chinner * allocation reservations. 629d5db0f97SEric Sandeen */ 630d5db0f97SEric Sandeen resblks = mp->m_sb.sb_dblocks; 631d5db0f97SEric Sandeen do_div(resblks, 20); 6328babd8a2SDave Chinner resblks = min_t(__uint64_t, resblks, 8192); 633d5db0f97SEric Sandeen return resblks; 634d5db0f97SEric Sandeen } 635d5db0f97SEric Sandeen 6367d095257SChristoph Hellwig /* 6370771fb45SEric Sandeen * This function does the following on an initial mount of a file system: 6380771fb45SEric Sandeen * - reads the superblock from disk and init the mount struct 6390771fb45SEric Sandeen * - if we're a 32-bit kernel, do a size check on the superblock 6400771fb45SEric Sandeen * so we don't mount terabyte filesystems 6410771fb45SEric Sandeen * - init mount struct realtime fields 6420771fb45SEric Sandeen * - allocate inode hash table for fs 6430771fb45SEric Sandeen * - init directory manager 6440771fb45SEric Sandeen * - perform recovery and init the log manager 6450771fb45SEric Sandeen */ 6460771fb45SEric Sandeen int 6470771fb45SEric Sandeen xfs_mountfs( 6484249023aSChristoph Hellwig xfs_mount_t *mp) 6490771fb45SEric Sandeen { 6500771fb45SEric Sandeen xfs_sb_t *sbp = &(mp->m_sb); 6510771fb45SEric Sandeen xfs_inode_t *rip; 6520771fb45SEric Sandeen __uint64_t resblks; 6537d095257SChristoph Hellwig uint quotamount = 0; 6547d095257SChristoph Hellwig uint quotaflags = 0; 6550771fb45SEric Sandeen int error = 0; 6560771fb45SEric Sandeen 657ff55068cSDave Chinner xfs_sb_mount_common(mp, sbp); 6580771fb45SEric Sandeen 6590771fb45SEric Sandeen /* 660e6957ea4SEric Sandeen * Check for a mismatched features2 values. Older kernels 661e6957ea4SEric Sandeen * read & wrote into the wrong sb offset for sb_features2 662e6957ea4SEric Sandeen * on some platforms due to xfs_sb_t not being 64bit size aligned 663e6957ea4SEric Sandeen * when sb_features2 was added, which made older superblock 664e6957ea4SEric Sandeen * reading/writing routines swap it as a 64-bit value. 665ee1c0908SDavid Chinner * 666e6957ea4SEric Sandeen * For backwards compatibility, we make both slots equal. 667e6957ea4SEric Sandeen * 668e6957ea4SEric Sandeen * If we detect a mismatched field, we OR the set bits into the 669e6957ea4SEric Sandeen * existing features2 field in case it has already been modified; we 670e6957ea4SEric Sandeen * don't want to lose any features. We then update the bad location 671e6957ea4SEric Sandeen * with the ORed value so that older kernels will see any features2 672e6957ea4SEric Sandeen * flags, and mark the two fields as needing updates once the 673e6957ea4SEric Sandeen * transaction subsystem is online. 674ee1c0908SDavid Chinner */ 675e6957ea4SEric Sandeen if (xfs_sb_has_mismatched_features2(sbp)) { 6760b932cccSDave Chinner xfs_warn(mp, "correcting sb_features alignment problem"); 677ee1c0908SDavid Chinner sbp->sb_features2 |= sbp->sb_bad_features2; 678e6957ea4SEric Sandeen sbp->sb_bad_features2 = sbp->sb_features2; 6797884bc86SChristoph Hellwig mp->m_update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2; 680e6957ea4SEric Sandeen 681e6957ea4SEric Sandeen /* 682e6957ea4SEric Sandeen * Re-check for ATTR2 in case it was found in bad_features2 683e6957ea4SEric Sandeen * slot. 684e6957ea4SEric Sandeen */ 6857c12f296STim Shimmin if (xfs_sb_version_hasattr2(&mp->m_sb) && 6867c12f296STim Shimmin !(mp->m_flags & XFS_MOUNT_NOATTR2)) 687e6957ea4SEric Sandeen mp->m_flags |= XFS_MOUNT_ATTR2; 6887c12f296STim Shimmin } 689e6957ea4SEric Sandeen 6907c12f296STim Shimmin if (xfs_sb_version_hasattr2(&mp->m_sb) && 6917c12f296STim Shimmin (mp->m_flags & XFS_MOUNT_NOATTR2)) { 6927c12f296STim Shimmin xfs_sb_version_removeattr2(&mp->m_sb); 6937884bc86SChristoph Hellwig mp->m_update_flags |= XFS_SB_FEATURES2; 6947c12f296STim Shimmin 6957c12f296STim Shimmin /* update sb_versionnum for the clearing of the morebits */ 6967c12f296STim Shimmin if (!sbp->sb_features2) 6977884bc86SChristoph Hellwig mp->m_update_flags |= XFS_SB_VERSIONNUM; 698ee1c0908SDavid Chinner } 699ee1c0908SDavid Chinner 700*263997a6SDave Chinner /* always use v2 inodes by default now */ 701*263997a6SDave Chinner if (!(mp->m_sb.sb_versionnum & XFS_SB_VERSION_NLINKBIT)) { 702*263997a6SDave Chinner mp->m_sb.sb_versionnum |= XFS_SB_VERSION_NLINKBIT; 703*263997a6SDave Chinner mp->m_update_flags |= XFS_SB_VERSIONNUM; 704*263997a6SDave Chinner } 705*263997a6SDave Chinner 706ee1c0908SDavid Chinner /* 7070771fb45SEric Sandeen * Check if sb_agblocks is aligned at stripe boundary 7080771fb45SEric Sandeen * If sb_agblocks is NOT aligned turn off m_dalign since 7090771fb45SEric Sandeen * allocator alignment is within an ag, therefore ag has 7100771fb45SEric Sandeen * to be aligned at stripe boundary. 7110771fb45SEric Sandeen */ 7127884bc86SChristoph Hellwig error = xfs_update_alignment(mp); 7130771fb45SEric Sandeen if (error) 714f9057e3dSChristoph Hellwig goto out; 7150771fb45SEric Sandeen 7160771fb45SEric Sandeen xfs_alloc_compute_maxlevels(mp); 7170771fb45SEric Sandeen xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK); 7180771fb45SEric Sandeen xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK); 7190771fb45SEric Sandeen xfs_ialloc_compute_maxlevels(mp); 7200771fb45SEric Sandeen 7210771fb45SEric Sandeen xfs_set_maxicount(mp); 7220771fb45SEric Sandeen 72327174203SChristoph Hellwig error = xfs_uuid_mount(mp); 72427174203SChristoph Hellwig if (error) 725f9057e3dSChristoph Hellwig goto out; 7261da177e4SLinus Torvalds 7271da177e4SLinus Torvalds /* 7280771fb45SEric Sandeen * Set the minimum read and write sizes 7290771fb45SEric Sandeen */ 7300771fb45SEric Sandeen xfs_set_rw_sizes(mp); 7310771fb45SEric Sandeen 732055388a3SDave Chinner /* set the low space thresholds for dynamic preallocation */ 733055388a3SDave Chinner xfs_set_low_space_thresholds(mp); 734055388a3SDave Chinner 7350771fb45SEric Sandeen /* 7360771fb45SEric Sandeen * Set the inode cluster size. 7370771fb45SEric Sandeen * This may still be overridden by the file system 7380771fb45SEric Sandeen * block size if it is larger than the chosen cluster size. 7398f80587bSDave Chinner * 7408f80587bSDave Chinner * For v5 filesystems, scale the cluster size with the inode size to 7418f80587bSDave Chinner * keep a constant ratio of inode per cluster buffer, but only if mkfs 7428f80587bSDave Chinner * has set the inode alignment value appropriately for larger cluster 7438f80587bSDave Chinner * sizes. 7440771fb45SEric Sandeen */ 7450771fb45SEric Sandeen mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE; 7468f80587bSDave Chinner if (xfs_sb_version_hascrc(&mp->m_sb)) { 7478f80587bSDave Chinner int new_size = mp->m_inode_cluster_size; 7488f80587bSDave Chinner 7498f80587bSDave Chinner new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE; 7508f80587bSDave Chinner if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size)) 7518f80587bSDave Chinner mp->m_inode_cluster_size = new_size; 7528f80587bSDave Chinner } 7530771fb45SEric Sandeen 7540771fb45SEric Sandeen /* 7550771fb45SEric Sandeen * Set inode alignment fields 7560771fb45SEric Sandeen */ 7570771fb45SEric Sandeen xfs_set_inoalignment(mp); 7580771fb45SEric Sandeen 7590771fb45SEric Sandeen /* 760c2bfbc9bSZhi Yong Wu * Check that the data (and log if separate) is an ok size. 7610771fb45SEric Sandeen */ 7624249023aSChristoph Hellwig error = xfs_check_sizes(mp); 7630771fb45SEric Sandeen if (error) 764f9057e3dSChristoph Hellwig goto out_remove_uuid; 7650771fb45SEric Sandeen 7660771fb45SEric Sandeen /* 7671da177e4SLinus Torvalds * Initialize realtime fields in the mount structure 7681da177e4SLinus Torvalds */ 7690771fb45SEric Sandeen error = xfs_rtmount_init(mp); 7700771fb45SEric Sandeen if (error) { 7710b932cccSDave Chinner xfs_warn(mp, "RT mount failed"); 772f9057e3dSChristoph Hellwig goto out_remove_uuid; 7731da177e4SLinus Torvalds } 7741da177e4SLinus Torvalds 7751da177e4SLinus Torvalds /* 7761da177e4SLinus Torvalds * Copies the low order bits of the timestamp and the randomly 7771da177e4SLinus Torvalds * set "sequence" number out of a UUID. 7781da177e4SLinus Torvalds */ 7791da177e4SLinus Torvalds uuid_getnodeuniq(&sbp->sb_uuid, mp->m_fixedfsid); 7801da177e4SLinus Torvalds 7811da177e4SLinus Torvalds mp->m_dmevmask = 0; /* not persistent; set after each mount */ 7821da177e4SLinus Torvalds 783f6c2d1faSNathan Scott xfs_dir_mount(mp); 7841da177e4SLinus Torvalds 7851da177e4SLinus Torvalds /* 7861da177e4SLinus Torvalds * Initialize the attribute manager's entries. 7871da177e4SLinus Torvalds */ 7881da177e4SLinus Torvalds mp->m_attr_magicpct = (mp->m_sb.sb_blocksize * 37) / 100; 7891da177e4SLinus Torvalds 7901da177e4SLinus Torvalds /* 7911da177e4SLinus Torvalds * Initialize the precomputed transaction reservations values. 7921da177e4SLinus Torvalds */ 7931da177e4SLinus Torvalds xfs_trans_init(mp); 7941da177e4SLinus Torvalds 7951da177e4SLinus Torvalds /* 7961da177e4SLinus Torvalds * Allocate and initialize the per-ag data. 7971da177e4SLinus Torvalds */ 7981c1c6ebcSDave Chinner spin_lock_init(&mp->m_perag_lock); 7999b98b6f3SDave Chinner INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC); 8001c1c6ebcSDave Chinner error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi); 8011c1c6ebcSDave Chinner if (error) { 8020b932cccSDave Chinner xfs_warn(mp, "Failed per-ag init: %d", error); 803f9057e3dSChristoph Hellwig goto out_remove_uuid; 8041c1c6ebcSDave Chinner } 8051da177e4SLinus Torvalds 806f9057e3dSChristoph Hellwig if (!sbp->sb_logblocks) { 8070b932cccSDave Chinner xfs_warn(mp, "no log defined"); 808f9057e3dSChristoph Hellwig XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW, mp); 809f9057e3dSChristoph Hellwig error = XFS_ERROR(EFSCORRUPTED); 810f9057e3dSChristoph Hellwig goto out_free_perag; 811f9057e3dSChristoph Hellwig } 812f9057e3dSChristoph Hellwig 8131da177e4SLinus Torvalds /* 8141da177e4SLinus Torvalds * log's mount-time initialization. Perform 1st part recovery if needed 8151da177e4SLinus Torvalds */ 8161da177e4SLinus Torvalds error = xfs_log_mount(mp, mp->m_logdev_targp, 8171da177e4SLinus Torvalds XFS_FSB_TO_DADDR(mp, sbp->sb_logstart), 8181da177e4SLinus Torvalds XFS_FSB_TO_BB(mp, sbp->sb_logblocks)); 8191da177e4SLinus Torvalds if (error) { 8200b932cccSDave Chinner xfs_warn(mp, "log mount failed"); 821d4f3512bSDave Chinner goto out_fail_wait; 8221da177e4SLinus Torvalds } 8231da177e4SLinus Torvalds 8241da177e4SLinus Torvalds /* 82592821e2bSDavid Chinner * Now the log is mounted, we know if it was an unclean shutdown or 82692821e2bSDavid Chinner * not. If it was, with the first phase of recovery has completed, we 82792821e2bSDavid Chinner * have consistent AG blocks on disk. We have not recovered EFIs yet, 82892821e2bSDavid Chinner * but they are recovered transactionally in the second recovery phase 82992821e2bSDavid Chinner * later. 83092821e2bSDavid Chinner * 83192821e2bSDavid Chinner * Hence we can safely re-initialise incore superblock counters from 83292821e2bSDavid Chinner * the per-ag data. These may not be correct if the filesystem was not 83392821e2bSDavid Chinner * cleanly unmounted, so we need to wait for recovery to finish before 83492821e2bSDavid Chinner * doing this. 83592821e2bSDavid Chinner * 83692821e2bSDavid Chinner * If the filesystem was cleanly unmounted, then we can trust the 83792821e2bSDavid Chinner * values in the superblock to be correct and we don't need to do 83892821e2bSDavid Chinner * anything here. 83992821e2bSDavid Chinner * 84092821e2bSDavid Chinner * If we are currently making the filesystem, the initialisation will 84192821e2bSDavid Chinner * fail as the perag data is in an undefined state. 84292821e2bSDavid Chinner */ 84392821e2bSDavid Chinner if (xfs_sb_version_haslazysbcount(&mp->m_sb) && 84492821e2bSDavid Chinner !XFS_LAST_UNMOUNT_WAS_CLEAN(mp) && 84592821e2bSDavid Chinner !mp->m_sb.sb_inprogress) { 84692821e2bSDavid Chinner error = xfs_initialize_perag_data(mp, sbp->sb_agcount); 847f9057e3dSChristoph Hellwig if (error) 848d4f3512bSDave Chinner goto out_fail_wait; 84992821e2bSDavid Chinner } 850f9057e3dSChristoph Hellwig 85192821e2bSDavid Chinner /* 8521da177e4SLinus Torvalds * Get and sanity-check the root inode. 8531da177e4SLinus Torvalds * Save the pointer to it in the mount structure. 8541da177e4SLinus Torvalds */ 8557b6259e7SDave Chinner error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip); 8561da177e4SLinus Torvalds if (error) { 8570b932cccSDave Chinner xfs_warn(mp, "failed to read root inode"); 858f9057e3dSChristoph Hellwig goto out_log_dealloc; 8591da177e4SLinus Torvalds } 8601da177e4SLinus Torvalds 8611da177e4SLinus Torvalds ASSERT(rip != NULL); 8621da177e4SLinus Torvalds 863abbede1bSAl Viro if (unlikely(!S_ISDIR(rip->i_d.di_mode))) { 8640b932cccSDave Chinner xfs_warn(mp, "corrupted root inode %llu: not a directory", 865b6574520SNathan Scott (unsigned long long)rip->i_ino); 8661da177e4SLinus Torvalds xfs_iunlock(rip, XFS_ILOCK_EXCL); 8671da177e4SLinus Torvalds XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW, 8681da177e4SLinus Torvalds mp); 8691da177e4SLinus Torvalds error = XFS_ERROR(EFSCORRUPTED); 870f9057e3dSChristoph Hellwig goto out_rele_rip; 8711da177e4SLinus Torvalds } 8721da177e4SLinus Torvalds mp->m_rootip = rip; /* save it */ 8731da177e4SLinus Torvalds 8741da177e4SLinus Torvalds xfs_iunlock(rip, XFS_ILOCK_EXCL); 8751da177e4SLinus Torvalds 8761da177e4SLinus Torvalds /* 8771da177e4SLinus Torvalds * Initialize realtime inode pointers in the mount structure 8781da177e4SLinus Torvalds */ 8790771fb45SEric Sandeen error = xfs_rtmount_inodes(mp); 8800771fb45SEric Sandeen if (error) { 8811da177e4SLinus Torvalds /* 8821da177e4SLinus Torvalds * Free up the root inode. 8831da177e4SLinus Torvalds */ 8840b932cccSDave Chinner xfs_warn(mp, "failed to read RT inodes"); 885f9057e3dSChristoph Hellwig goto out_rele_rip; 8861da177e4SLinus Torvalds } 8871da177e4SLinus Torvalds 8881da177e4SLinus Torvalds /* 8897884bc86SChristoph Hellwig * If this is a read-only mount defer the superblock updates until 8907884bc86SChristoph Hellwig * the next remount into writeable mode. Otherwise we would never 8917884bc86SChristoph Hellwig * perform the update e.g. for the root filesystem. 8921da177e4SLinus Torvalds */ 8937884bc86SChristoph Hellwig if (mp->m_update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) { 8947884bc86SChristoph Hellwig error = xfs_mount_log_sb(mp, mp->m_update_flags); 895e5720eecSDavid Chinner if (error) { 8960b932cccSDave Chinner xfs_warn(mp, "failed to write sb changes"); 897b93b6e43SChristoph Hellwig goto out_rtunmount; 898e5720eecSDavid Chinner } 899e5720eecSDavid Chinner } 9001da177e4SLinus Torvalds 9011da177e4SLinus Torvalds /* 9021da177e4SLinus Torvalds * Initialise the XFS quota management subsystem for this mount 9031da177e4SLinus Torvalds */ 9047d095257SChristoph Hellwig if (XFS_IS_QUOTA_RUNNING(mp)) { 9057d095257SChristoph Hellwig error = xfs_qm_newmount(mp, "amount, "aflags); 9060771fb45SEric Sandeen if (error) 907b93b6e43SChristoph Hellwig goto out_rtunmount; 9087d095257SChristoph Hellwig } else { 9097d095257SChristoph Hellwig ASSERT(!XFS_IS_QUOTA_ON(mp)); 9107d095257SChristoph Hellwig 9117d095257SChristoph Hellwig /* 9127d095257SChristoph Hellwig * If a file system had quotas running earlier, but decided to 9137d095257SChristoph Hellwig * mount without -o uquota/pquota/gquota options, revoke the 9147d095257SChristoph Hellwig * quotachecked license. 9157d095257SChristoph Hellwig */ 9167d095257SChristoph Hellwig if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) { 9170b932cccSDave Chinner xfs_notice(mp, "resetting quota flags"); 9187d095257SChristoph Hellwig error = xfs_mount_reset_sbqflags(mp); 9197d095257SChristoph Hellwig if (error) 9207d095257SChristoph Hellwig return error; 9217d095257SChristoph Hellwig } 9227d095257SChristoph Hellwig } 9231da177e4SLinus Torvalds 9241da177e4SLinus Torvalds /* 9251da177e4SLinus Torvalds * Finish recovering the file system. This part needed to be 9261da177e4SLinus Torvalds * delayed until after the root and real-time bitmap inodes 9271da177e4SLinus Torvalds * were consistently read in. 9281da177e4SLinus Torvalds */ 9294249023aSChristoph Hellwig error = xfs_log_mount_finish(mp); 9301da177e4SLinus Torvalds if (error) { 9310b932cccSDave Chinner xfs_warn(mp, "log mount finish failed"); 932b93b6e43SChristoph Hellwig goto out_rtunmount; 9331da177e4SLinus Torvalds } 9341da177e4SLinus Torvalds 9351da177e4SLinus Torvalds /* 9361da177e4SLinus Torvalds * Complete the quota initialisation, post-log-replay component. 9371da177e4SLinus Torvalds */ 9387d095257SChristoph Hellwig if (quotamount) { 9397d095257SChristoph Hellwig ASSERT(mp->m_qflags == 0); 9407d095257SChristoph Hellwig mp->m_qflags = quotaflags; 9417d095257SChristoph Hellwig 9427d095257SChristoph Hellwig xfs_qm_mount_quotas(mp); 9437d095257SChristoph Hellwig } 9447d095257SChristoph Hellwig 94584e1e99fSDavid Chinner /* 94684e1e99fSDavid Chinner * Now we are mounted, reserve a small amount of unused space for 94784e1e99fSDavid Chinner * privileged transactions. This is needed so that transaction 94884e1e99fSDavid Chinner * space required for critical operations can dip into this pool 94984e1e99fSDavid Chinner * when at ENOSPC. This is needed for operations like create with 95084e1e99fSDavid Chinner * attr, unwritten extent conversion at ENOSPC, etc. Data allocations 95184e1e99fSDavid Chinner * are not allowed to use this reserved space. 9528babd8a2SDave Chinner * 9538babd8a2SDave Chinner * This may drive us straight to ENOSPC on mount, but that implies 9548babd8a2SDave Chinner * we were already there on the last unmount. Warn if this occurs. 95584e1e99fSDavid Chinner */ 956d5db0f97SEric Sandeen if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { 957d5db0f97SEric Sandeen resblks = xfs_default_resblks(mp); 958714082bcSDavid Chinner error = xfs_reserve_blocks(mp, &resblks, NULL); 959714082bcSDavid Chinner if (error) 9600b932cccSDave Chinner xfs_warn(mp, 9610b932cccSDave Chinner "Unable to allocate reserve blocks. Continuing without reserve pool."); 962d5db0f97SEric Sandeen } 96384e1e99fSDavid Chinner 9641da177e4SLinus Torvalds return 0; 9651da177e4SLinus Torvalds 966b93b6e43SChristoph Hellwig out_rtunmount: 967b93b6e43SChristoph Hellwig xfs_rtunmount_inodes(mp); 968f9057e3dSChristoph Hellwig out_rele_rip: 96943355099SChristoph Hellwig IRELE(rip); 970f9057e3dSChristoph Hellwig out_log_dealloc: 97121b699c8SChristoph Hellwig xfs_log_unmount(mp); 972d4f3512bSDave Chinner out_fail_wait: 973d4f3512bSDave Chinner if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) 974d4f3512bSDave Chinner xfs_wait_buftarg(mp->m_logdev_targp); 975d4f3512bSDave Chinner xfs_wait_buftarg(mp->m_ddev_targp); 976f9057e3dSChristoph Hellwig out_free_perag: 977ff4f038cSChristoph Hellwig xfs_free_perag(mp); 978f9057e3dSChristoph Hellwig out_remove_uuid: 97927174203SChristoph Hellwig xfs_uuid_unmount(mp); 980f9057e3dSChristoph Hellwig out: 9811da177e4SLinus Torvalds return error; 9821da177e4SLinus Torvalds } 9831da177e4SLinus Torvalds 9841da177e4SLinus Torvalds /* 9851da177e4SLinus Torvalds * This flushes out the inodes,dquots and the superblock, unmounts the 9861da177e4SLinus Torvalds * log and makes sure that incore structures are freed. 9871da177e4SLinus Torvalds */ 98841b5c2e7SChristoph Hellwig void 98941b5c2e7SChristoph Hellwig xfs_unmountfs( 99041b5c2e7SChristoph Hellwig struct xfs_mount *mp) 9911da177e4SLinus Torvalds { 99284e1e99fSDavid Chinner __uint64_t resblks; 99341b5c2e7SChristoph Hellwig int error; 9941da177e4SLinus Torvalds 995579b62faSBrian Foster cancel_delayed_work_sync(&mp->m_eofblocks_work); 996579b62faSBrian Foster 9977d095257SChristoph Hellwig xfs_qm_unmount_quotas(mp); 998b93b6e43SChristoph Hellwig xfs_rtunmount_inodes(mp); 99977508ec8SChristoph Hellwig IRELE(mp->m_rootip); 100077508ec8SChristoph Hellwig 1001641c56fbSDavid Chinner /* 1002641c56fbSDavid Chinner * We can potentially deadlock here if we have an inode cluster 10039da096fdSMalcolm Parsons * that has been freed has its buffer still pinned in memory because 1004641c56fbSDavid Chinner * the transaction is still sitting in a iclog. The stale inodes 1005641c56fbSDavid Chinner * on that buffer will have their flush locks held until the 1006641c56fbSDavid Chinner * transaction hits the disk and the callbacks run. the inode 1007641c56fbSDavid Chinner * flush takes the flush lock unconditionally and with nothing to 1008641c56fbSDavid Chinner * push out the iclog we will never get that unlocked. hence we 1009641c56fbSDavid Chinner * need to force the log first. 1010641c56fbSDavid Chinner */ 1011a14a348bSChristoph Hellwig xfs_log_force(mp, XFS_LOG_SYNC); 1012c854363eSDave Chinner 1013c854363eSDave Chinner /* 1014211e4d43SChristoph Hellwig * Flush all pending changes from the AIL. 1015c854363eSDave Chinner */ 1016211e4d43SChristoph Hellwig xfs_ail_push_all_sync(mp->m_ail); 1017211e4d43SChristoph Hellwig 1018211e4d43SChristoph Hellwig /* 1019211e4d43SChristoph Hellwig * And reclaim all inodes. At this point there should be no dirty 10207e18530bSDave Chinner * inodes and none should be pinned or locked, but use synchronous 10217e18530bSDave Chinner * reclaim just to be sure. We can stop background inode reclaim 10227e18530bSDave Chinner * here as well if it is still running. 1023211e4d43SChristoph Hellwig */ 10247e18530bSDave Chinner cancel_delayed_work_sync(&mp->m_reclaim_work); 1025c854363eSDave Chinner xfs_reclaim_inodes(mp, SYNC_WAIT); 10261da177e4SLinus Torvalds 10277d095257SChristoph Hellwig xfs_qm_unmount(mp); 1028a357a121SLachlan McIlroy 10291da177e4SLinus Torvalds /* 103084e1e99fSDavid Chinner * Unreserve any blocks we have so that when we unmount we don't account 103184e1e99fSDavid Chinner * the reserved free space as used. This is really only necessary for 103284e1e99fSDavid Chinner * lazy superblock counting because it trusts the incore superblock 10339da096fdSMalcolm Parsons * counters to be absolutely correct on clean unmount. 103484e1e99fSDavid Chinner * 103584e1e99fSDavid Chinner * We don't bother correcting this elsewhere for lazy superblock 103684e1e99fSDavid Chinner * counting because on mount of an unclean filesystem we reconstruct the 103784e1e99fSDavid Chinner * correct counter value and this is irrelevant. 103884e1e99fSDavid Chinner * 103984e1e99fSDavid Chinner * For non-lazy counter filesystems, this doesn't matter at all because 104084e1e99fSDavid Chinner * we only every apply deltas to the superblock and hence the incore 104184e1e99fSDavid Chinner * value does not matter.... 104284e1e99fSDavid Chinner */ 104384e1e99fSDavid Chinner resblks = 0; 1044714082bcSDavid Chinner error = xfs_reserve_blocks(mp, &resblks, NULL); 1045714082bcSDavid Chinner if (error) 10460b932cccSDave Chinner xfs_warn(mp, "Unable to free reserved block pool. " 1047714082bcSDavid Chinner "Freespace may not be correct on next mount."); 1048714082bcSDavid Chinner 1049adab0f67SChandra Seetharaman error = xfs_log_sbcount(mp); 1050e5720eecSDavid Chinner if (error) 10510b932cccSDave Chinner xfs_warn(mp, "Unable to update superblock counters. " 1052e5720eecSDavid Chinner "Freespace may not be correct on next mount."); 105387c7bec7SChristoph Hellwig 105421b699c8SChristoph Hellwig xfs_log_unmount(mp); 105527174203SChristoph Hellwig xfs_uuid_unmount(mp); 10561da177e4SLinus Torvalds 10571550d0b0SChristoph Hellwig #if defined(DEBUG) 10580ce4cfd4SChristoph Hellwig xfs_errortag_clearall(mp, 0); 10591da177e4SLinus Torvalds #endif 1060ff4f038cSChristoph Hellwig xfs_free_perag(mp); 10611da177e4SLinus Torvalds } 10621da177e4SLinus Torvalds 10631da177e4SLinus Torvalds int 106492821e2bSDavid Chinner xfs_fs_writable(xfs_mount_t *mp) 106592821e2bSDavid Chinner { 1066d9457dc0SJan Kara return !(mp->m_super->s_writers.frozen || XFS_FORCED_SHUTDOWN(mp) || 1067bd186aa9SChristoph Hellwig (mp->m_flags & XFS_MOUNT_RDONLY)); 106892821e2bSDavid Chinner } 106992821e2bSDavid Chinner 107092821e2bSDavid Chinner /* 1071b2ce3974SAlex Elder * xfs_log_sbcount 1072b2ce3974SAlex Elder * 1073adab0f67SChandra Seetharaman * Sync the superblock counters to disk. 1074b2ce3974SAlex Elder * 1075b2ce3974SAlex Elder * Note this code can be called during the process of freezing, so 1076adab0f67SChandra Seetharaman * we may need to use the transaction allocator which does not 1077b2ce3974SAlex Elder * block when the transaction subsystem is in its frozen state. 107892821e2bSDavid Chinner */ 107992821e2bSDavid Chinner int 1080adab0f67SChandra Seetharaman xfs_log_sbcount(xfs_mount_t *mp) 108192821e2bSDavid Chinner { 108292821e2bSDavid Chinner xfs_trans_t *tp; 108392821e2bSDavid Chinner int error; 108492821e2bSDavid Chinner 108592821e2bSDavid Chinner if (!xfs_fs_writable(mp)) 108692821e2bSDavid Chinner return 0; 108792821e2bSDavid Chinner 1088d4d90b57SChristoph Hellwig xfs_icsb_sync_counters(mp, 0); 108992821e2bSDavid Chinner 109092821e2bSDavid Chinner /* 109192821e2bSDavid Chinner * we don't need to do this if we are updating the superblock 109292821e2bSDavid Chinner * counters on every modification. 109392821e2bSDavid Chinner */ 109492821e2bSDavid Chinner if (!xfs_sb_version_haslazysbcount(&mp->m_sb)) 109592821e2bSDavid Chinner return 0; 109692821e2bSDavid Chinner 1097b2ce3974SAlex Elder tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP); 10983d3c8b52SJie Liu error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0); 109992821e2bSDavid Chinner if (error) { 110092821e2bSDavid Chinner xfs_trans_cancel(tp, 0); 110192821e2bSDavid Chinner return error; 110292821e2bSDavid Chinner } 110392821e2bSDavid Chinner 110492821e2bSDavid Chinner xfs_mod_sb(tp, XFS_SB_IFREE | XFS_SB_ICOUNT | XFS_SB_FDBLOCKS); 110592821e2bSDavid Chinner xfs_trans_set_sync(tp); 1106e5720eecSDavid Chinner error = xfs_trans_commit(tp, 0); 1107e5720eecSDavid Chinner return error; 110892821e2bSDavid Chinner } 110992821e2bSDavid Chinner 11101da177e4SLinus Torvalds /* 111199e738b7SZhi Yong Wu * xfs_mod_incore_sb_unlocked() is a utility routine commonly used to apply 11121da177e4SLinus Torvalds * a delta to a specified field in the in-core superblock. Simply 11131da177e4SLinus Torvalds * switch on the field indicated and apply the delta to that field. 11141da177e4SLinus Torvalds * Fields are not allowed to dip below zero, so if the delta would 11151da177e4SLinus Torvalds * do this do not apply it and return EINVAL. 11161da177e4SLinus Torvalds * 11173685c2a1SEric Sandeen * The m_sb_lock must be held when this routine is called. 11181da177e4SLinus Torvalds */ 1119d96f8f89SEric Sandeen STATIC int 112020f4ebf2SDavid Chinner xfs_mod_incore_sb_unlocked( 112120f4ebf2SDavid Chinner xfs_mount_t *mp, 112220f4ebf2SDavid Chinner xfs_sb_field_t field, 112320f4ebf2SDavid Chinner int64_t delta, 112420f4ebf2SDavid Chinner int rsvd) 11251da177e4SLinus Torvalds { 11261da177e4SLinus Torvalds int scounter; /* short counter for 32 bit fields */ 11271da177e4SLinus Torvalds long long lcounter; /* long counter for 64 bit fields */ 11281da177e4SLinus Torvalds long long res_used, rem; 11291da177e4SLinus Torvalds 11301da177e4SLinus Torvalds /* 11311da177e4SLinus Torvalds * With the in-core superblock spin lock held, switch 11321da177e4SLinus Torvalds * on the indicated field. Apply the delta to the 11331da177e4SLinus Torvalds * proper field. If the fields value would dip below 11341da177e4SLinus Torvalds * 0, then do not apply the delta and return EINVAL. 11351da177e4SLinus Torvalds */ 11361da177e4SLinus Torvalds switch (field) { 11371da177e4SLinus Torvalds case XFS_SBS_ICOUNT: 11381da177e4SLinus Torvalds lcounter = (long long)mp->m_sb.sb_icount; 11391da177e4SLinus Torvalds lcounter += delta; 11401da177e4SLinus Torvalds if (lcounter < 0) { 11411da177e4SLinus Torvalds ASSERT(0); 1142014c2544SJesper Juhl return XFS_ERROR(EINVAL); 11431da177e4SLinus Torvalds } 11441da177e4SLinus Torvalds mp->m_sb.sb_icount = lcounter; 1145014c2544SJesper Juhl return 0; 11461da177e4SLinus Torvalds case XFS_SBS_IFREE: 11471da177e4SLinus Torvalds lcounter = (long long)mp->m_sb.sb_ifree; 11481da177e4SLinus Torvalds lcounter += delta; 11491da177e4SLinus Torvalds if (lcounter < 0) { 11501da177e4SLinus Torvalds ASSERT(0); 1151014c2544SJesper Juhl return XFS_ERROR(EINVAL); 11521da177e4SLinus Torvalds } 11531da177e4SLinus Torvalds mp->m_sb.sb_ifree = lcounter; 1154014c2544SJesper Juhl return 0; 11551da177e4SLinus Torvalds case XFS_SBS_FDBLOCKS: 11564be536deSDavid Chinner lcounter = (long long) 11574be536deSDavid Chinner mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 11581da177e4SLinus Torvalds res_used = (long long)(mp->m_resblks - mp->m_resblks_avail); 11591da177e4SLinus Torvalds 11601da177e4SLinus Torvalds if (delta > 0) { /* Putting blocks back */ 11611da177e4SLinus Torvalds if (res_used > delta) { 11621da177e4SLinus Torvalds mp->m_resblks_avail += delta; 11631da177e4SLinus Torvalds } else { 11641da177e4SLinus Torvalds rem = delta - res_used; 11651da177e4SLinus Torvalds mp->m_resblks_avail = mp->m_resblks; 11661da177e4SLinus Torvalds lcounter += rem; 11671da177e4SLinus Torvalds } 11681da177e4SLinus Torvalds } else { /* Taking blocks away */ 11691da177e4SLinus Torvalds lcounter += delta; 11708babd8a2SDave Chinner if (lcounter >= 0) { 11718babd8a2SDave Chinner mp->m_sb.sb_fdblocks = lcounter + 11728babd8a2SDave Chinner XFS_ALLOC_SET_ASIDE(mp); 11738babd8a2SDave Chinner return 0; 11748babd8a2SDave Chinner } 11751da177e4SLinus Torvalds 11761da177e4SLinus Torvalds /* 11778babd8a2SDave Chinner * We are out of blocks, use any available reserved 11788babd8a2SDave Chinner * blocks if were allowed to. 11791da177e4SLinus Torvalds */ 11808babd8a2SDave Chinner if (!rsvd) 1181014c2544SJesper Juhl return XFS_ERROR(ENOSPC); 11828babd8a2SDave Chinner 11838babd8a2SDave Chinner lcounter = (long long)mp->m_resblks_avail + delta; 11848babd8a2SDave Chinner if (lcounter >= 0) { 11851da177e4SLinus Torvalds mp->m_resblks_avail = lcounter; 1186014c2544SJesper Juhl return 0; 11878babd8a2SDave Chinner } 11888babd8a2SDave Chinner printk_once(KERN_WARNING 11898babd8a2SDave Chinner "Filesystem \"%s\": reserve blocks depleted! " 11908babd8a2SDave Chinner "Consider increasing reserve pool size.", 11918babd8a2SDave Chinner mp->m_fsname); 1192014c2544SJesper Juhl return XFS_ERROR(ENOSPC); 11931da177e4SLinus Torvalds } 11941da177e4SLinus Torvalds 11954be536deSDavid Chinner mp->m_sb.sb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp); 1196014c2544SJesper Juhl return 0; 11971da177e4SLinus Torvalds case XFS_SBS_FREXTENTS: 11981da177e4SLinus Torvalds lcounter = (long long)mp->m_sb.sb_frextents; 11991da177e4SLinus Torvalds lcounter += delta; 12001da177e4SLinus Torvalds if (lcounter < 0) { 1201014c2544SJesper Juhl return XFS_ERROR(ENOSPC); 12021da177e4SLinus Torvalds } 12031da177e4SLinus Torvalds mp->m_sb.sb_frextents = lcounter; 1204014c2544SJesper Juhl return 0; 12051da177e4SLinus Torvalds case XFS_SBS_DBLOCKS: 12061da177e4SLinus Torvalds lcounter = (long long)mp->m_sb.sb_dblocks; 12071da177e4SLinus Torvalds lcounter += delta; 12081da177e4SLinus Torvalds if (lcounter < 0) { 12091da177e4SLinus Torvalds ASSERT(0); 1210014c2544SJesper Juhl return XFS_ERROR(EINVAL); 12111da177e4SLinus Torvalds } 12121da177e4SLinus Torvalds mp->m_sb.sb_dblocks = lcounter; 1213014c2544SJesper Juhl return 0; 12141da177e4SLinus Torvalds case XFS_SBS_AGCOUNT: 12151da177e4SLinus Torvalds scounter = mp->m_sb.sb_agcount; 12161da177e4SLinus Torvalds scounter += delta; 12171da177e4SLinus Torvalds if (scounter < 0) { 12181da177e4SLinus Torvalds ASSERT(0); 1219014c2544SJesper Juhl return XFS_ERROR(EINVAL); 12201da177e4SLinus Torvalds } 12211da177e4SLinus Torvalds mp->m_sb.sb_agcount = scounter; 1222014c2544SJesper Juhl return 0; 12231da177e4SLinus Torvalds case XFS_SBS_IMAX_PCT: 12241da177e4SLinus Torvalds scounter = mp->m_sb.sb_imax_pct; 12251da177e4SLinus Torvalds scounter += delta; 12261da177e4SLinus Torvalds if (scounter < 0) { 12271da177e4SLinus Torvalds ASSERT(0); 1228014c2544SJesper Juhl return XFS_ERROR(EINVAL); 12291da177e4SLinus Torvalds } 12301da177e4SLinus Torvalds mp->m_sb.sb_imax_pct = scounter; 1231014c2544SJesper Juhl return 0; 12321da177e4SLinus Torvalds case XFS_SBS_REXTSIZE: 12331da177e4SLinus Torvalds scounter = mp->m_sb.sb_rextsize; 12341da177e4SLinus Torvalds scounter += delta; 12351da177e4SLinus Torvalds if (scounter < 0) { 12361da177e4SLinus Torvalds ASSERT(0); 1237014c2544SJesper Juhl return XFS_ERROR(EINVAL); 12381da177e4SLinus Torvalds } 12391da177e4SLinus Torvalds mp->m_sb.sb_rextsize = scounter; 1240014c2544SJesper Juhl return 0; 12411da177e4SLinus Torvalds case XFS_SBS_RBMBLOCKS: 12421da177e4SLinus Torvalds scounter = mp->m_sb.sb_rbmblocks; 12431da177e4SLinus Torvalds scounter += delta; 12441da177e4SLinus Torvalds if (scounter < 0) { 12451da177e4SLinus Torvalds ASSERT(0); 1246014c2544SJesper Juhl return XFS_ERROR(EINVAL); 12471da177e4SLinus Torvalds } 12481da177e4SLinus Torvalds mp->m_sb.sb_rbmblocks = scounter; 1249014c2544SJesper Juhl return 0; 12501da177e4SLinus Torvalds case XFS_SBS_RBLOCKS: 12511da177e4SLinus Torvalds lcounter = (long long)mp->m_sb.sb_rblocks; 12521da177e4SLinus Torvalds lcounter += delta; 12531da177e4SLinus Torvalds if (lcounter < 0) { 12541da177e4SLinus Torvalds ASSERT(0); 1255014c2544SJesper Juhl return XFS_ERROR(EINVAL); 12561da177e4SLinus Torvalds } 12571da177e4SLinus Torvalds mp->m_sb.sb_rblocks = lcounter; 1258014c2544SJesper Juhl return 0; 12591da177e4SLinus Torvalds case XFS_SBS_REXTENTS: 12601da177e4SLinus Torvalds lcounter = (long long)mp->m_sb.sb_rextents; 12611da177e4SLinus Torvalds lcounter += delta; 12621da177e4SLinus Torvalds if (lcounter < 0) { 12631da177e4SLinus Torvalds ASSERT(0); 1264014c2544SJesper Juhl return XFS_ERROR(EINVAL); 12651da177e4SLinus Torvalds } 12661da177e4SLinus Torvalds mp->m_sb.sb_rextents = lcounter; 1267014c2544SJesper Juhl return 0; 12681da177e4SLinus Torvalds case XFS_SBS_REXTSLOG: 12691da177e4SLinus Torvalds scounter = mp->m_sb.sb_rextslog; 12701da177e4SLinus Torvalds scounter += delta; 12711da177e4SLinus Torvalds if (scounter < 0) { 12721da177e4SLinus Torvalds ASSERT(0); 1273014c2544SJesper Juhl return XFS_ERROR(EINVAL); 12741da177e4SLinus Torvalds } 12751da177e4SLinus Torvalds mp->m_sb.sb_rextslog = scounter; 1276014c2544SJesper Juhl return 0; 12771da177e4SLinus Torvalds default: 12781da177e4SLinus Torvalds ASSERT(0); 1279014c2544SJesper Juhl return XFS_ERROR(EINVAL); 12801da177e4SLinus Torvalds } 12811da177e4SLinus Torvalds } 12821da177e4SLinus Torvalds 12831da177e4SLinus Torvalds /* 12841da177e4SLinus Torvalds * xfs_mod_incore_sb() is used to change a field in the in-core 12851da177e4SLinus Torvalds * superblock structure by the specified delta. This modification 12863685c2a1SEric Sandeen * is protected by the m_sb_lock. Just use the xfs_mod_incore_sb_unlocked() 12871da177e4SLinus Torvalds * routine to do the work. 12881da177e4SLinus Torvalds */ 12891da177e4SLinus Torvalds int 129020f4ebf2SDavid Chinner xfs_mod_incore_sb( 129196540c78SChristoph Hellwig struct xfs_mount *mp, 129220f4ebf2SDavid Chinner xfs_sb_field_t field, 129320f4ebf2SDavid Chinner int64_t delta, 129420f4ebf2SDavid Chinner int rsvd) 12951da177e4SLinus Torvalds { 12961da177e4SLinus Torvalds int status; 12971da177e4SLinus Torvalds 12988d280b98SDavid Chinner #ifdef HAVE_PERCPU_SB 129996540c78SChristoph Hellwig ASSERT(field < XFS_SBS_ICOUNT || field > XFS_SBS_FDBLOCKS); 13008d280b98SDavid Chinner #endif 13013685c2a1SEric Sandeen spin_lock(&mp->m_sb_lock); 13021da177e4SLinus Torvalds status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); 13033685c2a1SEric Sandeen spin_unlock(&mp->m_sb_lock); 13048d280b98SDavid Chinner 1305014c2544SJesper Juhl return status; 13061da177e4SLinus Torvalds } 13071da177e4SLinus Torvalds 13081da177e4SLinus Torvalds /* 13091b040712SChristoph Hellwig * Change more than one field in the in-core superblock structure at a time. 13101da177e4SLinus Torvalds * 13111b040712SChristoph Hellwig * The fields and changes to those fields are specified in the array of 13121b040712SChristoph Hellwig * xfs_mod_sb structures passed in. Either all of the specified deltas 13131b040712SChristoph Hellwig * will be applied or none of them will. If any modified field dips below 0, 13141b040712SChristoph Hellwig * then all modifications will be backed out and EINVAL will be returned. 13151b040712SChristoph Hellwig * 13161b040712SChristoph Hellwig * Note that this function may not be used for the superblock values that 13171b040712SChristoph Hellwig * are tracked with the in-memory per-cpu counters - a direct call to 13181b040712SChristoph Hellwig * xfs_icsb_modify_counters is required for these. 13191da177e4SLinus Torvalds */ 13201da177e4SLinus Torvalds int 13211b040712SChristoph Hellwig xfs_mod_incore_sb_batch( 13221b040712SChristoph Hellwig struct xfs_mount *mp, 13231b040712SChristoph Hellwig xfs_mod_sb_t *msb, 13241b040712SChristoph Hellwig uint nmsb, 13251b040712SChristoph Hellwig int rsvd) 13261da177e4SLinus Torvalds { 132745c51b99SDavid Sterba xfs_mod_sb_t *msbp; 13281b040712SChristoph Hellwig int error = 0; 13291da177e4SLinus Torvalds 13301da177e4SLinus Torvalds /* 13311b040712SChristoph Hellwig * Loop through the array of mod structures and apply each individually. 13321b040712SChristoph Hellwig * If any fail, then back out all those which have already been applied. 13331b040712SChristoph Hellwig * Do all of this within the scope of the m_sb_lock so that all of the 13341b040712SChristoph Hellwig * changes will be atomic. 13351da177e4SLinus Torvalds */ 13363685c2a1SEric Sandeen spin_lock(&mp->m_sb_lock); 133745c51b99SDavid Sterba for (msbp = msb; msbp < (msb + nmsb); msbp++) { 13381b040712SChristoph Hellwig ASSERT(msbp->msb_field < XFS_SBS_ICOUNT || 13391b040712SChristoph Hellwig msbp->msb_field > XFS_SBS_FDBLOCKS); 13408d280b98SDavid Chinner 13411b040712SChristoph Hellwig error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field, 13421b040712SChristoph Hellwig msbp->msb_delta, rsvd); 13431b040712SChristoph Hellwig if (error) 13441b040712SChristoph Hellwig goto unwind; 13451da177e4SLinus Torvalds } 13461b040712SChristoph Hellwig spin_unlock(&mp->m_sb_lock); 13471b040712SChristoph Hellwig return 0; 13481da177e4SLinus Torvalds 13491b040712SChristoph Hellwig unwind: 13501b040712SChristoph Hellwig while (--msbp >= msb) { 13511b040712SChristoph Hellwig error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field, 13521b040712SChristoph Hellwig -msbp->msb_delta, rsvd); 13531b040712SChristoph Hellwig ASSERT(error == 0); 13541da177e4SLinus Torvalds } 13553685c2a1SEric Sandeen spin_unlock(&mp->m_sb_lock); 13561b040712SChristoph Hellwig return error; 13571da177e4SLinus Torvalds } 13581da177e4SLinus Torvalds 13591da177e4SLinus Torvalds /* 13601da177e4SLinus Torvalds * xfs_getsb() is called to obtain the buffer for the superblock. 13611da177e4SLinus Torvalds * The buffer is returned locked and read in from disk. 13621da177e4SLinus Torvalds * The buffer should be released with a call to xfs_brelse(). 13631da177e4SLinus Torvalds * 13641da177e4SLinus Torvalds * If the flags parameter is BUF_TRYLOCK, then we'll only return 13651da177e4SLinus Torvalds * the superblock buffer if it can be locked without sleeping. 13661da177e4SLinus Torvalds * If it can't then we'll return NULL. 13671da177e4SLinus Torvalds */ 13680c842ad4SChristoph Hellwig struct xfs_buf * 13691da177e4SLinus Torvalds xfs_getsb( 13700c842ad4SChristoph Hellwig struct xfs_mount *mp, 13711da177e4SLinus Torvalds int flags) 13721da177e4SLinus Torvalds { 13730c842ad4SChristoph Hellwig struct xfs_buf *bp = mp->m_sb_bp; 13741da177e4SLinus Torvalds 13750c842ad4SChristoph Hellwig if (!xfs_buf_trylock(bp)) { 13760c842ad4SChristoph Hellwig if (flags & XBF_TRYLOCK) 13771da177e4SLinus Torvalds return NULL; 13780c842ad4SChristoph Hellwig xfs_buf_lock(bp); 13791da177e4SLinus Torvalds } 13800c842ad4SChristoph Hellwig 138172790aa1SChandra Seetharaman xfs_buf_hold(bp); 13821da177e4SLinus Torvalds ASSERT(XFS_BUF_ISDONE(bp)); 1383014c2544SJesper Juhl return bp; 13841da177e4SLinus Torvalds } 13851da177e4SLinus Torvalds 13861da177e4SLinus Torvalds /* 13871da177e4SLinus Torvalds * Used to free the superblock along various error paths. 13881da177e4SLinus Torvalds */ 13891da177e4SLinus Torvalds void 13901da177e4SLinus Torvalds xfs_freesb( 139126af6552SDave Chinner struct xfs_mount *mp) 13921da177e4SLinus Torvalds { 139326af6552SDave Chinner struct xfs_buf *bp = mp->m_sb_bp; 13941da177e4SLinus Torvalds 139526af6552SDave Chinner xfs_buf_lock(bp); 13961da177e4SLinus Torvalds mp->m_sb_bp = NULL; 139726af6552SDave Chinner xfs_buf_relse(bp); 13981da177e4SLinus Torvalds } 13991da177e4SLinus Torvalds 14001da177e4SLinus Torvalds /* 14011da177e4SLinus Torvalds * Used to log changes to the superblock unit and width fields which could 1402e6957ea4SEric Sandeen * be altered by the mount options, as well as any potential sb_features2 1403e6957ea4SEric Sandeen * fixup. Only the first superblock is updated. 14041da177e4SLinus Torvalds */ 14057884bc86SChristoph Hellwig int 1406ee1c0908SDavid Chinner xfs_mount_log_sb( 14071da177e4SLinus Torvalds xfs_mount_t *mp, 14081da177e4SLinus Torvalds __int64_t fields) 14091da177e4SLinus Torvalds { 14101da177e4SLinus Torvalds xfs_trans_t *tp; 1411e5720eecSDavid Chinner int error; 14121da177e4SLinus Torvalds 1413ee1c0908SDavid Chinner ASSERT(fields & (XFS_SB_UNIT | XFS_SB_WIDTH | XFS_SB_UUID | 14144b166de0SDavid Chinner XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2 | 14154b166de0SDavid Chinner XFS_SB_VERSIONNUM)); 14161da177e4SLinus Torvalds 14171da177e4SLinus Torvalds tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT); 14183d3c8b52SJie Liu error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0); 1419e5720eecSDavid Chinner if (error) { 14201da177e4SLinus Torvalds xfs_trans_cancel(tp, 0); 1421e5720eecSDavid Chinner return error; 14221da177e4SLinus Torvalds } 14231da177e4SLinus Torvalds xfs_mod_sb(tp, fields); 1424e5720eecSDavid Chinner error = xfs_trans_commit(tp, 0); 1425e5720eecSDavid Chinner return error; 14261da177e4SLinus Torvalds } 14278d280b98SDavid Chinner 1428dda35b8fSChristoph Hellwig /* 1429dda35b8fSChristoph Hellwig * If the underlying (data/log/rt) device is readonly, there are some 1430dda35b8fSChristoph Hellwig * operations that cannot proceed. 1431dda35b8fSChristoph Hellwig */ 1432dda35b8fSChristoph Hellwig int 1433dda35b8fSChristoph Hellwig xfs_dev_is_read_only( 1434dda35b8fSChristoph Hellwig struct xfs_mount *mp, 1435dda35b8fSChristoph Hellwig char *message) 1436dda35b8fSChristoph Hellwig { 1437dda35b8fSChristoph Hellwig if (xfs_readonly_buftarg(mp->m_ddev_targp) || 1438dda35b8fSChristoph Hellwig xfs_readonly_buftarg(mp->m_logdev_targp) || 1439dda35b8fSChristoph Hellwig (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) { 14400b932cccSDave Chinner xfs_notice(mp, "%s required on read-only device.", message); 14410b932cccSDave Chinner xfs_notice(mp, "write access unavailable, cannot proceed."); 1442dda35b8fSChristoph Hellwig return EROFS; 1443dda35b8fSChristoph Hellwig } 1444dda35b8fSChristoph Hellwig return 0; 1445dda35b8fSChristoph Hellwig } 14468d280b98SDavid Chinner 14478d280b98SDavid Chinner #ifdef HAVE_PERCPU_SB 14488d280b98SDavid Chinner /* 14498d280b98SDavid Chinner * Per-cpu incore superblock counters 14508d280b98SDavid Chinner * 14518d280b98SDavid Chinner * Simple concept, difficult implementation 14528d280b98SDavid Chinner * 14538d280b98SDavid Chinner * Basically, replace the incore superblock counters with a distributed per cpu 14548d280b98SDavid Chinner * counter for contended fields (e.g. free block count). 14558d280b98SDavid Chinner * 14568d280b98SDavid Chinner * Difficulties arise in that the incore sb is used for ENOSPC checking, and 14578d280b98SDavid Chinner * hence needs to be accurately read when we are running low on space. Hence 14588d280b98SDavid Chinner * there is a method to enable and disable the per-cpu counters based on how 14598d280b98SDavid Chinner * much "stuff" is available in them. 14608d280b98SDavid Chinner * 14618d280b98SDavid Chinner * Basically, a counter is enabled if there is enough free resource to justify 14628d280b98SDavid Chinner * running a per-cpu fast-path. If the per-cpu counter runs out (i.e. a local 14638d280b98SDavid Chinner * ENOSPC), then we disable the counters to synchronise all callers and 14648d280b98SDavid Chinner * re-distribute the available resources. 14658d280b98SDavid Chinner * 14668d280b98SDavid Chinner * If, once we redistributed the available resources, we still get a failure, 14678d280b98SDavid Chinner * we disable the per-cpu counter and go through the slow path. 14688d280b98SDavid Chinner * 14698d280b98SDavid Chinner * The slow path is the current xfs_mod_incore_sb() function. This means that 14709da096fdSMalcolm Parsons * when we disable a per-cpu counter, we need to drain its resources back to 14718d280b98SDavid Chinner * the global superblock. We do this after disabling the counter to prevent 14728d280b98SDavid Chinner * more threads from queueing up on the counter. 14738d280b98SDavid Chinner * 14748d280b98SDavid Chinner * Essentially, this means that we still need a lock in the fast path to enable 14758d280b98SDavid Chinner * synchronisation between the global counters and the per-cpu counters. This 14768d280b98SDavid Chinner * is not a problem because the lock will be local to a CPU almost all the time 14778d280b98SDavid Chinner * and have little contention except when we get to ENOSPC conditions. 14788d280b98SDavid Chinner * 14798d280b98SDavid Chinner * Basically, this lock becomes a barrier that enables us to lock out the fast 14808d280b98SDavid Chinner * path while we do things like enabling and disabling counters and 14818d280b98SDavid Chinner * synchronising the counters. 14828d280b98SDavid Chinner * 14838d280b98SDavid Chinner * Locking rules: 14848d280b98SDavid Chinner * 14853685c2a1SEric Sandeen * 1. m_sb_lock before picking up per-cpu locks 14868d280b98SDavid Chinner * 2. per-cpu locks always picked up via for_each_online_cpu() order 14873685c2a1SEric Sandeen * 3. accurate counter sync requires m_sb_lock + per cpu locks 14888d280b98SDavid Chinner * 4. modifying per-cpu counters requires holding per-cpu lock 14893685c2a1SEric Sandeen * 5. modifying global counters requires holding m_sb_lock 14903685c2a1SEric Sandeen * 6. enabling or disabling a counter requires holding the m_sb_lock 14918d280b98SDavid Chinner * and _none_ of the per-cpu locks. 14928d280b98SDavid Chinner * 14938d280b98SDavid Chinner * Disabled counters are only ever re-enabled by a balance operation 14948d280b98SDavid Chinner * that results in more free resources per CPU than a given threshold. 14958d280b98SDavid Chinner * To ensure counters don't remain disabled, they are rebalanced when 14968d280b98SDavid Chinner * the global resource goes above a higher threshold (i.e. some hysteresis 14978d280b98SDavid Chinner * is present to prevent thrashing). 14988d280b98SDavid Chinner */ 1499e8234a68SDavid Chinner 15005a67e4c5SChandra Seetharaman #ifdef CONFIG_HOTPLUG_CPU 1501e8234a68SDavid Chinner /* 1502e8234a68SDavid Chinner * hot-plug CPU notifier support. 1503e8234a68SDavid Chinner * 15045a67e4c5SChandra Seetharaman * We need a notifier per filesystem as we need to be able to identify 15055a67e4c5SChandra Seetharaman * the filesystem to balance the counters out. This is achieved by 15065a67e4c5SChandra Seetharaman * having a notifier block embedded in the xfs_mount_t and doing pointer 15075a67e4c5SChandra Seetharaman * magic to get the mount pointer from the notifier block address. 1508e8234a68SDavid Chinner */ 1509e8234a68SDavid Chinner STATIC int 1510e8234a68SDavid Chinner xfs_icsb_cpu_notify( 1511e8234a68SDavid Chinner struct notifier_block *nfb, 1512e8234a68SDavid Chinner unsigned long action, 1513e8234a68SDavid Chinner void *hcpu) 1514e8234a68SDavid Chinner { 1515e8234a68SDavid Chinner xfs_icsb_cnts_t *cntp; 1516e8234a68SDavid Chinner xfs_mount_t *mp; 1517e8234a68SDavid Chinner 1518e8234a68SDavid Chinner mp = (xfs_mount_t *)container_of(nfb, xfs_mount_t, m_icsb_notifier); 1519e8234a68SDavid Chinner cntp = (xfs_icsb_cnts_t *) 1520e8234a68SDavid Chinner per_cpu_ptr(mp->m_sb_cnts, (unsigned long)hcpu); 1521e8234a68SDavid Chinner switch (action) { 1522e8234a68SDavid Chinner case CPU_UP_PREPARE: 15238bb78442SRafael J. Wysocki case CPU_UP_PREPARE_FROZEN: 1524e8234a68SDavid Chinner /* Easy Case - initialize the area and locks, and 1525e8234a68SDavid Chinner * then rebalance when online does everything else for us. */ 152601e1b69cSDavid Chinner memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); 1527e8234a68SDavid Chinner break; 1528e8234a68SDavid Chinner case CPU_ONLINE: 15298bb78442SRafael J. Wysocki case CPU_ONLINE_FROZEN: 153003135cf7SDavid Chinner xfs_icsb_lock(mp); 153145af6c6dSChristoph Hellwig xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0); 153245af6c6dSChristoph Hellwig xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); 153345af6c6dSChristoph Hellwig xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); 153403135cf7SDavid Chinner xfs_icsb_unlock(mp); 1535e8234a68SDavid Chinner break; 1536e8234a68SDavid Chinner case CPU_DEAD: 15378bb78442SRafael J. Wysocki case CPU_DEAD_FROZEN: 1538e8234a68SDavid Chinner /* Disable all the counters, then fold the dead cpu's 1539e8234a68SDavid Chinner * count into the total on the global superblock and 1540e8234a68SDavid Chinner * re-enable the counters. */ 154103135cf7SDavid Chinner xfs_icsb_lock(mp); 15423685c2a1SEric Sandeen spin_lock(&mp->m_sb_lock); 1543e8234a68SDavid Chinner xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT); 1544e8234a68SDavid Chinner xfs_icsb_disable_counter(mp, XFS_SBS_IFREE); 1545e8234a68SDavid Chinner xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS); 1546e8234a68SDavid Chinner 1547e8234a68SDavid Chinner mp->m_sb.sb_icount += cntp->icsb_icount; 1548e8234a68SDavid Chinner mp->m_sb.sb_ifree += cntp->icsb_ifree; 1549e8234a68SDavid Chinner mp->m_sb.sb_fdblocks += cntp->icsb_fdblocks; 1550e8234a68SDavid Chinner 155101e1b69cSDavid Chinner memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); 1552e8234a68SDavid Chinner 155345af6c6dSChristoph Hellwig xfs_icsb_balance_counter_locked(mp, XFS_SBS_ICOUNT, 0); 155445af6c6dSChristoph Hellwig xfs_icsb_balance_counter_locked(mp, XFS_SBS_IFREE, 0); 155545af6c6dSChristoph Hellwig xfs_icsb_balance_counter_locked(mp, XFS_SBS_FDBLOCKS, 0); 15563685c2a1SEric Sandeen spin_unlock(&mp->m_sb_lock); 155703135cf7SDavid Chinner xfs_icsb_unlock(mp); 1558e8234a68SDavid Chinner break; 1559e8234a68SDavid Chinner } 1560e8234a68SDavid Chinner 1561e8234a68SDavid Chinner return NOTIFY_OK; 1562e8234a68SDavid Chinner } 15635a67e4c5SChandra Seetharaman #endif /* CONFIG_HOTPLUG_CPU */ 1564e8234a68SDavid Chinner 15658d280b98SDavid Chinner int 15668d280b98SDavid Chinner xfs_icsb_init_counters( 15678d280b98SDavid Chinner xfs_mount_t *mp) 15688d280b98SDavid Chinner { 15698d280b98SDavid Chinner xfs_icsb_cnts_t *cntp; 15708d280b98SDavid Chinner int i; 15718d280b98SDavid Chinner 15728d280b98SDavid Chinner mp->m_sb_cnts = alloc_percpu(xfs_icsb_cnts_t); 15738d280b98SDavid Chinner if (mp->m_sb_cnts == NULL) 15748d280b98SDavid Chinner return -ENOMEM; 15758d280b98SDavid Chinner 15768d280b98SDavid Chinner for_each_online_cpu(i) { 15778d280b98SDavid Chinner cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); 157801e1b69cSDavid Chinner memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); 15798d280b98SDavid Chinner } 158020b64285SDavid Chinner 158120b64285SDavid Chinner mutex_init(&mp->m_icsb_mutex); 158220b64285SDavid Chinner 15838d280b98SDavid Chinner /* 15848d280b98SDavid Chinner * start with all counters disabled so that the 15858d280b98SDavid Chinner * initial balance kicks us off correctly 15868d280b98SDavid Chinner */ 15878d280b98SDavid Chinner mp->m_icsb_counters = -1; 158846677e67SRichard Weinberger 158946677e67SRichard Weinberger #ifdef CONFIG_HOTPLUG_CPU 159046677e67SRichard Weinberger mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify; 159146677e67SRichard Weinberger mp->m_icsb_notifier.priority = 0; 159246677e67SRichard Weinberger register_hotcpu_notifier(&mp->m_icsb_notifier); 159346677e67SRichard Weinberger #endif /* CONFIG_HOTPLUG_CPU */ 159446677e67SRichard Weinberger 15958d280b98SDavid Chinner return 0; 15968d280b98SDavid Chinner } 15978d280b98SDavid Chinner 15985478eeadSLachlan McIlroy void 15995478eeadSLachlan McIlroy xfs_icsb_reinit_counters( 16005478eeadSLachlan McIlroy xfs_mount_t *mp) 16015478eeadSLachlan McIlroy { 16025478eeadSLachlan McIlroy xfs_icsb_lock(mp); 16035478eeadSLachlan McIlroy /* 16045478eeadSLachlan McIlroy * start with all counters disabled so that the 16055478eeadSLachlan McIlroy * initial balance kicks us off correctly 16065478eeadSLachlan McIlroy */ 16075478eeadSLachlan McIlroy mp->m_icsb_counters = -1; 160845af6c6dSChristoph Hellwig xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0); 160945af6c6dSChristoph Hellwig xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); 161045af6c6dSChristoph Hellwig xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); 16115478eeadSLachlan McIlroy xfs_icsb_unlock(mp); 16125478eeadSLachlan McIlroy } 16135478eeadSLachlan McIlroy 1614c962fb79SChristoph Hellwig void 16158d280b98SDavid Chinner xfs_icsb_destroy_counters( 16168d280b98SDavid Chinner xfs_mount_t *mp) 16178d280b98SDavid Chinner { 1618e8234a68SDavid Chinner if (mp->m_sb_cnts) { 16195a67e4c5SChandra Seetharaman unregister_hotcpu_notifier(&mp->m_icsb_notifier); 16208d280b98SDavid Chinner free_percpu(mp->m_sb_cnts); 16218d280b98SDavid Chinner } 162203135cf7SDavid Chinner mutex_destroy(&mp->m_icsb_mutex); 1623e8234a68SDavid Chinner } 16248d280b98SDavid Chinner 1625b8f82a4aSChristoph Hellwig STATIC void 162601e1b69cSDavid Chinner xfs_icsb_lock_cntr( 162701e1b69cSDavid Chinner xfs_icsb_cnts_t *icsbp) 162801e1b69cSDavid Chinner { 162901e1b69cSDavid Chinner while (test_and_set_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags)) { 163001e1b69cSDavid Chinner ndelay(1000); 163101e1b69cSDavid Chinner } 163201e1b69cSDavid Chinner } 163301e1b69cSDavid Chinner 1634b8f82a4aSChristoph Hellwig STATIC void 163501e1b69cSDavid Chinner xfs_icsb_unlock_cntr( 163601e1b69cSDavid Chinner xfs_icsb_cnts_t *icsbp) 163701e1b69cSDavid Chinner { 163801e1b69cSDavid Chinner clear_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags); 163901e1b69cSDavid Chinner } 164001e1b69cSDavid Chinner 16418d280b98SDavid Chinner 1642b8f82a4aSChristoph Hellwig STATIC void 16438d280b98SDavid Chinner xfs_icsb_lock_all_counters( 16448d280b98SDavid Chinner xfs_mount_t *mp) 16458d280b98SDavid Chinner { 16468d280b98SDavid Chinner xfs_icsb_cnts_t *cntp; 16478d280b98SDavid Chinner int i; 16488d280b98SDavid Chinner 16498d280b98SDavid Chinner for_each_online_cpu(i) { 16508d280b98SDavid Chinner cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); 165101e1b69cSDavid Chinner xfs_icsb_lock_cntr(cntp); 16528d280b98SDavid Chinner } 16538d280b98SDavid Chinner } 16548d280b98SDavid Chinner 1655b8f82a4aSChristoph Hellwig STATIC void 16568d280b98SDavid Chinner xfs_icsb_unlock_all_counters( 16578d280b98SDavid Chinner xfs_mount_t *mp) 16588d280b98SDavid Chinner { 16598d280b98SDavid Chinner xfs_icsb_cnts_t *cntp; 16608d280b98SDavid Chinner int i; 16618d280b98SDavid Chinner 16628d280b98SDavid Chinner for_each_online_cpu(i) { 16638d280b98SDavid Chinner cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); 166401e1b69cSDavid Chinner xfs_icsb_unlock_cntr(cntp); 16658d280b98SDavid Chinner } 16668d280b98SDavid Chinner } 16678d280b98SDavid Chinner 16688d280b98SDavid Chinner STATIC void 16698d280b98SDavid Chinner xfs_icsb_count( 16708d280b98SDavid Chinner xfs_mount_t *mp, 16718d280b98SDavid Chinner xfs_icsb_cnts_t *cnt, 16728d280b98SDavid Chinner int flags) 16738d280b98SDavid Chinner { 16748d280b98SDavid Chinner xfs_icsb_cnts_t *cntp; 16758d280b98SDavid Chinner int i; 16768d280b98SDavid Chinner 16778d280b98SDavid Chinner memset(cnt, 0, sizeof(xfs_icsb_cnts_t)); 16788d280b98SDavid Chinner 16798d280b98SDavid Chinner if (!(flags & XFS_ICSB_LAZY_COUNT)) 16808d280b98SDavid Chinner xfs_icsb_lock_all_counters(mp); 16818d280b98SDavid Chinner 16828d280b98SDavid Chinner for_each_online_cpu(i) { 16838d280b98SDavid Chinner cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); 16848d280b98SDavid Chinner cnt->icsb_icount += cntp->icsb_icount; 16858d280b98SDavid Chinner cnt->icsb_ifree += cntp->icsb_ifree; 16868d280b98SDavid Chinner cnt->icsb_fdblocks += cntp->icsb_fdblocks; 16878d280b98SDavid Chinner } 16888d280b98SDavid Chinner 16898d280b98SDavid Chinner if (!(flags & XFS_ICSB_LAZY_COUNT)) 16908d280b98SDavid Chinner xfs_icsb_unlock_all_counters(mp); 16918d280b98SDavid Chinner } 16928d280b98SDavid Chinner 16938d280b98SDavid Chinner STATIC int 16948d280b98SDavid Chinner xfs_icsb_counter_disabled( 16958d280b98SDavid Chinner xfs_mount_t *mp, 16968d280b98SDavid Chinner xfs_sb_field_t field) 16978d280b98SDavid Chinner { 16988d280b98SDavid Chinner ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); 16998d280b98SDavid Chinner return test_bit(field, &mp->m_icsb_counters); 17008d280b98SDavid Chinner } 17018d280b98SDavid Chinner 170236fbe6e6SDavid Chinner STATIC void 17038d280b98SDavid Chinner xfs_icsb_disable_counter( 17048d280b98SDavid Chinner xfs_mount_t *mp, 17058d280b98SDavid Chinner xfs_sb_field_t field) 17068d280b98SDavid Chinner { 17078d280b98SDavid Chinner xfs_icsb_cnts_t cnt; 17088d280b98SDavid Chinner 17098d280b98SDavid Chinner ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); 17108d280b98SDavid Chinner 171120b64285SDavid Chinner /* 171220b64285SDavid Chinner * If we are already disabled, then there is nothing to do 171320b64285SDavid Chinner * here. We check before locking all the counters to avoid 171420b64285SDavid Chinner * the expensive lock operation when being called in the 171520b64285SDavid Chinner * slow path and the counter is already disabled. This is 171620b64285SDavid Chinner * safe because the only time we set or clear this state is under 171720b64285SDavid Chinner * the m_icsb_mutex. 171820b64285SDavid Chinner */ 171920b64285SDavid Chinner if (xfs_icsb_counter_disabled(mp, field)) 172036fbe6e6SDavid Chinner return; 172120b64285SDavid Chinner 17228d280b98SDavid Chinner xfs_icsb_lock_all_counters(mp); 17238d280b98SDavid Chinner if (!test_and_set_bit(field, &mp->m_icsb_counters)) { 17248d280b98SDavid Chinner /* drain back to superblock */ 17258d280b98SDavid Chinner 1726ce46193bSChristoph Hellwig xfs_icsb_count(mp, &cnt, XFS_ICSB_LAZY_COUNT); 17278d280b98SDavid Chinner switch(field) { 17288d280b98SDavid Chinner case XFS_SBS_ICOUNT: 17298d280b98SDavid Chinner mp->m_sb.sb_icount = cnt.icsb_icount; 17308d280b98SDavid Chinner break; 17318d280b98SDavid Chinner case XFS_SBS_IFREE: 17328d280b98SDavid Chinner mp->m_sb.sb_ifree = cnt.icsb_ifree; 17338d280b98SDavid Chinner break; 17348d280b98SDavid Chinner case XFS_SBS_FDBLOCKS: 17358d280b98SDavid Chinner mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; 17368d280b98SDavid Chinner break; 17378d280b98SDavid Chinner default: 17388d280b98SDavid Chinner BUG(); 17398d280b98SDavid Chinner } 17408d280b98SDavid Chinner } 17418d280b98SDavid Chinner 17428d280b98SDavid Chinner xfs_icsb_unlock_all_counters(mp); 17438d280b98SDavid Chinner } 17448d280b98SDavid Chinner 17458d280b98SDavid Chinner STATIC void 17468d280b98SDavid Chinner xfs_icsb_enable_counter( 17478d280b98SDavid Chinner xfs_mount_t *mp, 17488d280b98SDavid Chinner xfs_sb_field_t field, 17498d280b98SDavid Chinner uint64_t count, 17508d280b98SDavid Chinner uint64_t resid) 17518d280b98SDavid Chinner { 17528d280b98SDavid Chinner xfs_icsb_cnts_t *cntp; 17538d280b98SDavid Chinner int i; 17548d280b98SDavid Chinner 17558d280b98SDavid Chinner ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); 17568d280b98SDavid Chinner 17578d280b98SDavid Chinner xfs_icsb_lock_all_counters(mp); 17588d280b98SDavid Chinner for_each_online_cpu(i) { 17598d280b98SDavid Chinner cntp = per_cpu_ptr(mp->m_sb_cnts, i); 17608d280b98SDavid Chinner switch (field) { 17618d280b98SDavid Chinner case XFS_SBS_ICOUNT: 17628d280b98SDavid Chinner cntp->icsb_icount = count + resid; 17638d280b98SDavid Chinner break; 17648d280b98SDavid Chinner case XFS_SBS_IFREE: 17658d280b98SDavid Chinner cntp->icsb_ifree = count + resid; 17668d280b98SDavid Chinner break; 17678d280b98SDavid Chinner case XFS_SBS_FDBLOCKS: 17688d280b98SDavid Chinner cntp->icsb_fdblocks = count + resid; 17698d280b98SDavid Chinner break; 17708d280b98SDavid Chinner default: 17718d280b98SDavid Chinner BUG(); 17728d280b98SDavid Chinner break; 17738d280b98SDavid Chinner } 17748d280b98SDavid Chinner resid = 0; 17758d280b98SDavid Chinner } 17768d280b98SDavid Chinner clear_bit(field, &mp->m_icsb_counters); 17778d280b98SDavid Chinner xfs_icsb_unlock_all_counters(mp); 17788d280b98SDavid Chinner } 17798d280b98SDavid Chinner 1780dbcabad1SDavid Chinner void 1781d4d90b57SChristoph Hellwig xfs_icsb_sync_counters_locked( 17828d280b98SDavid Chinner xfs_mount_t *mp, 17838d280b98SDavid Chinner int flags) 17848d280b98SDavid Chinner { 17858d280b98SDavid Chinner xfs_icsb_cnts_t cnt; 17868d280b98SDavid Chinner 17878d280b98SDavid Chinner xfs_icsb_count(mp, &cnt, flags); 17888d280b98SDavid Chinner 17898d280b98SDavid Chinner if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT)) 17908d280b98SDavid Chinner mp->m_sb.sb_icount = cnt.icsb_icount; 17918d280b98SDavid Chinner if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE)) 17928d280b98SDavid Chinner mp->m_sb.sb_ifree = cnt.icsb_ifree; 17938d280b98SDavid Chinner if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS)) 17948d280b98SDavid Chinner mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; 17958d280b98SDavid Chinner } 17968d280b98SDavid Chinner 17978d280b98SDavid Chinner /* 17988d280b98SDavid Chinner * Accurate update of per-cpu counters to incore superblock 17998d280b98SDavid Chinner */ 1800d4d90b57SChristoph Hellwig void 18018d280b98SDavid Chinner xfs_icsb_sync_counters( 1802d4d90b57SChristoph Hellwig xfs_mount_t *mp, 1803d4d90b57SChristoph Hellwig int flags) 18048d280b98SDavid Chinner { 1805d4d90b57SChristoph Hellwig spin_lock(&mp->m_sb_lock); 1806d4d90b57SChristoph Hellwig xfs_icsb_sync_counters_locked(mp, flags); 1807d4d90b57SChristoph Hellwig spin_unlock(&mp->m_sb_lock); 18088d280b98SDavid Chinner } 18098d280b98SDavid Chinner 18108d280b98SDavid Chinner /* 18118d280b98SDavid Chinner * Balance and enable/disable counters as necessary. 18128d280b98SDavid Chinner * 181320b64285SDavid Chinner * Thresholds for re-enabling counters are somewhat magic. inode counts are 181420b64285SDavid Chinner * chosen to be the same number as single on disk allocation chunk per CPU, and 181520b64285SDavid Chinner * free blocks is something far enough zero that we aren't going thrash when we 181620b64285SDavid Chinner * get near ENOSPC. We also need to supply a minimum we require per cpu to 181720b64285SDavid Chinner * prevent looping endlessly when xfs_alloc_space asks for more than will 181820b64285SDavid Chinner * be distributed to a single CPU but each CPU has enough blocks to be 181920b64285SDavid Chinner * reenabled. 182020b64285SDavid Chinner * 182120b64285SDavid Chinner * Note that we can be called when counters are already disabled. 182220b64285SDavid Chinner * xfs_icsb_disable_counter() optimises the counter locking in this case to 182320b64285SDavid Chinner * prevent locking every per-cpu counter needlessly. 18248d280b98SDavid Chinner */ 182520b64285SDavid Chinner 182620b64285SDavid Chinner #define XFS_ICSB_INO_CNTR_REENABLE (uint64_t)64 18274be536deSDavid Chinner #define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \ 182820b64285SDavid Chinner (uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp)) 18298d280b98SDavid Chinner STATIC void 183045af6c6dSChristoph Hellwig xfs_icsb_balance_counter_locked( 18318d280b98SDavid Chinner xfs_mount_t *mp, 18328d280b98SDavid Chinner xfs_sb_field_t field, 183320b64285SDavid Chinner int min_per_cpu) 18348d280b98SDavid Chinner { 18356fdf8cccSNathan Scott uint64_t count, resid; 18368d280b98SDavid Chinner int weight = num_online_cpus(); 183720b64285SDavid Chinner uint64_t min = (uint64_t)min_per_cpu; 18388d280b98SDavid Chinner 18398d280b98SDavid Chinner /* disable counter and sync counter */ 18408d280b98SDavid Chinner xfs_icsb_disable_counter(mp, field); 18418d280b98SDavid Chinner 18428d280b98SDavid Chinner /* update counters - first CPU gets residual*/ 18438d280b98SDavid Chinner switch (field) { 18448d280b98SDavid Chinner case XFS_SBS_ICOUNT: 18458d280b98SDavid Chinner count = mp->m_sb.sb_icount; 18468d280b98SDavid Chinner resid = do_div(count, weight); 184720b64285SDavid Chinner if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE)) 184845af6c6dSChristoph Hellwig return; 18498d280b98SDavid Chinner break; 18508d280b98SDavid Chinner case XFS_SBS_IFREE: 18518d280b98SDavid Chinner count = mp->m_sb.sb_ifree; 18528d280b98SDavid Chinner resid = do_div(count, weight); 185320b64285SDavid Chinner if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE)) 185445af6c6dSChristoph Hellwig return; 18558d280b98SDavid Chinner break; 18568d280b98SDavid Chinner case XFS_SBS_FDBLOCKS: 18578d280b98SDavid Chinner count = mp->m_sb.sb_fdblocks; 18588d280b98SDavid Chinner resid = do_div(count, weight); 185920b64285SDavid Chinner if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp))) 186045af6c6dSChristoph Hellwig return; 18618d280b98SDavid Chinner break; 18628d280b98SDavid Chinner default: 18638d280b98SDavid Chinner BUG(); 18646fdf8cccSNathan Scott count = resid = 0; /* quiet, gcc */ 18658d280b98SDavid Chinner break; 18668d280b98SDavid Chinner } 18678d280b98SDavid Chinner 18688d280b98SDavid Chinner xfs_icsb_enable_counter(mp, field, count, resid); 186945af6c6dSChristoph Hellwig } 187045af6c6dSChristoph Hellwig 187145af6c6dSChristoph Hellwig STATIC void 187245af6c6dSChristoph Hellwig xfs_icsb_balance_counter( 187345af6c6dSChristoph Hellwig xfs_mount_t *mp, 187445af6c6dSChristoph Hellwig xfs_sb_field_t fields, 187545af6c6dSChristoph Hellwig int min_per_cpu) 187645af6c6dSChristoph Hellwig { 187745af6c6dSChristoph Hellwig spin_lock(&mp->m_sb_lock); 187845af6c6dSChristoph Hellwig xfs_icsb_balance_counter_locked(mp, fields, min_per_cpu); 18793685c2a1SEric Sandeen spin_unlock(&mp->m_sb_lock); 18808d280b98SDavid Chinner } 18818d280b98SDavid Chinner 18821b040712SChristoph Hellwig int 188320b64285SDavid Chinner xfs_icsb_modify_counters( 18848d280b98SDavid Chinner xfs_mount_t *mp, 18858d280b98SDavid Chinner xfs_sb_field_t field, 188620f4ebf2SDavid Chinner int64_t delta, 188720b64285SDavid Chinner int rsvd) 18888d280b98SDavid Chinner { 18898d280b98SDavid Chinner xfs_icsb_cnts_t *icsbp; 18908d280b98SDavid Chinner long long lcounter; /* long counter for 64 bit fields */ 18917a9e02d6SChristoph Lameter int ret = 0; 18928d280b98SDavid Chinner 189320b64285SDavid Chinner might_sleep(); 18948d280b98SDavid Chinner again: 18957a9e02d6SChristoph Lameter preempt_disable(); 18967a9e02d6SChristoph Lameter icsbp = this_cpu_ptr(mp->m_sb_cnts); 189720b64285SDavid Chinner 189820b64285SDavid Chinner /* 189920b64285SDavid Chinner * if the counter is disabled, go to slow path 190020b64285SDavid Chinner */ 19018d280b98SDavid Chinner if (unlikely(xfs_icsb_counter_disabled(mp, field))) 19028d280b98SDavid Chinner goto slow_path; 190320b64285SDavid Chinner xfs_icsb_lock_cntr(icsbp); 190420b64285SDavid Chinner if (unlikely(xfs_icsb_counter_disabled(mp, field))) { 190520b64285SDavid Chinner xfs_icsb_unlock_cntr(icsbp); 190620b64285SDavid Chinner goto slow_path; 190720b64285SDavid Chinner } 19088d280b98SDavid Chinner 19098d280b98SDavid Chinner switch (field) { 19108d280b98SDavid Chinner case XFS_SBS_ICOUNT: 19118d280b98SDavid Chinner lcounter = icsbp->icsb_icount; 19128d280b98SDavid Chinner lcounter += delta; 19138d280b98SDavid Chinner if (unlikely(lcounter < 0)) 191420b64285SDavid Chinner goto balance_counter; 19158d280b98SDavid Chinner icsbp->icsb_icount = lcounter; 19168d280b98SDavid Chinner break; 19178d280b98SDavid Chinner 19188d280b98SDavid Chinner case XFS_SBS_IFREE: 19198d280b98SDavid Chinner lcounter = icsbp->icsb_ifree; 19208d280b98SDavid Chinner lcounter += delta; 19218d280b98SDavid Chinner if (unlikely(lcounter < 0)) 192220b64285SDavid Chinner goto balance_counter; 19238d280b98SDavid Chinner icsbp->icsb_ifree = lcounter; 19248d280b98SDavid Chinner break; 19258d280b98SDavid Chinner 19268d280b98SDavid Chinner case XFS_SBS_FDBLOCKS: 19278d280b98SDavid Chinner BUG_ON((mp->m_resblks - mp->m_resblks_avail) != 0); 19288d280b98SDavid Chinner 19294be536deSDavid Chinner lcounter = icsbp->icsb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 19308d280b98SDavid Chinner lcounter += delta; 19318d280b98SDavid Chinner if (unlikely(lcounter < 0)) 193220b64285SDavid Chinner goto balance_counter; 19334be536deSDavid Chinner icsbp->icsb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp); 19348d280b98SDavid Chinner break; 19358d280b98SDavid Chinner default: 19368d280b98SDavid Chinner BUG(); 19378d280b98SDavid Chinner break; 19388d280b98SDavid Chinner } 193901e1b69cSDavid Chinner xfs_icsb_unlock_cntr(icsbp); 19407a9e02d6SChristoph Lameter preempt_enable(); 19418d280b98SDavid Chinner return 0; 19428d280b98SDavid Chinner 19438d280b98SDavid Chinner slow_path: 19447a9e02d6SChristoph Lameter preempt_enable(); 194520b64285SDavid Chinner 194620b64285SDavid Chinner /* 194720b64285SDavid Chinner * serialise with a mutex so we don't burn lots of cpu on 194820b64285SDavid Chinner * the superblock lock. We still need to hold the superblock 194920b64285SDavid Chinner * lock, however, when we modify the global structures. 195020b64285SDavid Chinner */ 195103135cf7SDavid Chinner xfs_icsb_lock(mp); 195220b64285SDavid Chinner 195320b64285SDavid Chinner /* 195420b64285SDavid Chinner * Now running atomically. 195520b64285SDavid Chinner * 195620b64285SDavid Chinner * If the counter is enabled, someone has beaten us to rebalancing. 195720b64285SDavid Chinner * Drop the lock and try again in the fast path.... 195820b64285SDavid Chinner */ 195920b64285SDavid Chinner if (!(xfs_icsb_counter_disabled(mp, field))) { 196003135cf7SDavid Chinner xfs_icsb_unlock(mp); 196120b64285SDavid Chinner goto again; 196220b64285SDavid Chinner } 196320b64285SDavid Chinner 196420b64285SDavid Chinner /* 196520b64285SDavid Chinner * The counter is currently disabled. Because we are 196620b64285SDavid Chinner * running atomically here, we know a rebalance cannot 196720b64285SDavid Chinner * be in progress. Hence we can go straight to operating 196820b64285SDavid Chinner * on the global superblock. We do not call xfs_mod_incore_sb() 19693685c2a1SEric Sandeen * here even though we need to get the m_sb_lock. Doing so 197020b64285SDavid Chinner * will cause us to re-enter this function and deadlock. 19713685c2a1SEric Sandeen * Hence we get the m_sb_lock ourselves and then call 197220b64285SDavid Chinner * xfs_mod_incore_sb_unlocked() as the unlocked path operates 197320b64285SDavid Chinner * directly on the global counters. 197420b64285SDavid Chinner */ 19753685c2a1SEric Sandeen spin_lock(&mp->m_sb_lock); 197620b64285SDavid Chinner ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); 19773685c2a1SEric Sandeen spin_unlock(&mp->m_sb_lock); 197820b64285SDavid Chinner 197920b64285SDavid Chinner /* 198020b64285SDavid Chinner * Now that we've modified the global superblock, we 198120b64285SDavid Chinner * may be able to re-enable the distributed counters 198220b64285SDavid Chinner * (e.g. lots of space just got freed). After that 198320b64285SDavid Chinner * we are done. 198420b64285SDavid Chinner */ 198520b64285SDavid Chinner if (ret != ENOSPC) 198645af6c6dSChristoph Hellwig xfs_icsb_balance_counter(mp, field, 0); 198703135cf7SDavid Chinner xfs_icsb_unlock(mp); 198820b64285SDavid Chinner return ret; 198920b64285SDavid Chinner 199020b64285SDavid Chinner balance_counter: 199101e1b69cSDavid Chinner xfs_icsb_unlock_cntr(icsbp); 19927a9e02d6SChristoph Lameter preempt_enable(); 19938d280b98SDavid Chinner 199420b64285SDavid Chinner /* 199520b64285SDavid Chinner * We may have multiple threads here if multiple per-cpu 199620b64285SDavid Chinner * counters run dry at the same time. This will mean we can 199720b64285SDavid Chinner * do more balances than strictly necessary but it is not 199820b64285SDavid Chinner * the common slowpath case. 199920b64285SDavid Chinner */ 200003135cf7SDavid Chinner xfs_icsb_lock(mp); 200120b64285SDavid Chinner 200220b64285SDavid Chinner /* 200320b64285SDavid Chinner * running atomically. 200420b64285SDavid Chinner * 200520b64285SDavid Chinner * This will leave the counter in the correct state for future 200620b64285SDavid Chinner * accesses. After the rebalance, we simply try again and our retry 200720b64285SDavid Chinner * will either succeed through the fast path or slow path without 200820b64285SDavid Chinner * another balance operation being required. 200920b64285SDavid Chinner */ 201045af6c6dSChristoph Hellwig xfs_icsb_balance_counter(mp, field, delta); 201103135cf7SDavid Chinner xfs_icsb_unlock(mp); 20128d280b98SDavid Chinner goto again; 20138d280b98SDavid Chinner } 20148d280b98SDavid Chinner 20158d280b98SDavid Chinner #endif 2016