1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #ifndef __XFS_MOUNT_H__ 7 #define __XFS_MOUNT_H__ 8 9 struct xlog; 10 struct xfs_inode; 11 struct xfs_mru_cache; 12 struct xfs_ail; 13 struct xfs_quotainfo; 14 struct xfs_da_geometry; 15 struct xfs_perag; 16 17 /* dynamic preallocation free space thresholds, 5% down to 1% */ 18 enum { 19 XFS_LOWSP_1_PCNT = 0, 20 XFS_LOWSP_2_PCNT, 21 XFS_LOWSP_3_PCNT, 22 XFS_LOWSP_4_PCNT, 23 XFS_LOWSP_5_PCNT, 24 XFS_LOWSP_MAX, 25 }; 26 27 /* 28 * Error Configuration 29 * 30 * Error classes define the subsystem the configuration belongs to. 31 * Error numbers define the errors that are configurable. 32 */ 33 enum { 34 XFS_ERR_METADATA, 35 XFS_ERR_CLASS_MAX, 36 }; 37 enum { 38 XFS_ERR_DEFAULT, 39 XFS_ERR_EIO, 40 XFS_ERR_ENOSPC, 41 XFS_ERR_ENODEV, 42 XFS_ERR_ERRNO_MAX, 43 }; 44 45 #define XFS_ERR_RETRY_FOREVER -1 46 47 /* 48 * Although retry_timeout is in jiffies which is normally an unsigned long, 49 * we limit the retry timeout to 86400 seconds, or one day. So even a 50 * signed 32-bit long is sufficient for a HZ value up to 24855. Making it 51 * signed lets us store the special "-1" value, meaning retry forever. 52 */ 53 struct xfs_error_cfg { 54 struct xfs_kobj kobj; 55 int max_retries; 56 long retry_timeout; /* in jiffies, -1 = infinite */ 57 }; 58 59 /* 60 * Per-cpu deferred inode inactivation GC lists. 61 */ 62 struct xfs_inodegc { 63 struct xfs_mount *mp; 64 struct llist_head list; 65 struct delayed_work work; 66 int error; 67 68 /* approximate count of inodes in the list */ 69 unsigned int items; 70 unsigned int shrinker_hits; 71 unsigned int cpu; 72 }; 73 74 /* 75 * The struct xfsmount layout is optimised to separate read-mostly variables 76 * from variables that are frequently modified. We put the read-mostly variables 77 * first, then place all the other variables at the end. 78 * 79 * Typically, read-mostly variables are those that are set at mount time and 80 * never changed again, or only change rarely as a result of things like sysfs 81 * knobs being tweaked. 82 */ 83 typedef struct xfs_mount { 84 struct xfs_sb m_sb; /* copy of fs superblock */ 85 struct super_block *m_super; 86 struct xfs_ail *m_ail; /* fs active log item list */ 87 struct xfs_buf *m_sb_bp; /* buffer for superblock */ 88 char *m_rtname; /* realtime device name */ 89 char *m_logname; /* external log device name */ 90 struct xfs_da_geometry *m_dir_geo; /* directory block geometry */ 91 struct xfs_da_geometry *m_attr_geo; /* attribute block geometry */ 92 struct xlog *m_log; /* log specific stuff */ 93 struct xfs_inode *m_rbmip; /* pointer to bitmap inode */ 94 struct xfs_inode *m_rsumip; /* pointer to summary inode */ 95 struct xfs_inode *m_rootip; /* pointer to root directory */ 96 struct xfs_quotainfo *m_quotainfo; /* disk quota information */ 97 struct xfs_buftarg *m_ddev_targp; /* data device */ 98 struct xfs_buftarg *m_logdev_targp;/* log device */ 99 struct xfs_buftarg *m_rtdev_targp; /* rt device */ 100 void __percpu *m_inodegc; /* percpu inodegc structures */ 101 102 /* 103 * Optional cache of rt summary level per bitmap block with the 104 * invariant that m_rsum_cache[bbno] > the maximum i for which 105 * rsum[i][bbno] != 0, or 0 if rsum[i][bbno] == 0 for all i. 106 * Reads and writes are serialized by the rsumip inode lock. 107 */ 108 uint8_t *m_rsum_cache; 109 struct xfs_mru_cache *m_filestream; /* per-mount filestream data */ 110 struct workqueue_struct *m_buf_workqueue; 111 struct workqueue_struct *m_unwritten_workqueue; 112 struct workqueue_struct *m_reclaim_workqueue; 113 struct workqueue_struct *m_sync_workqueue; 114 struct workqueue_struct *m_blockgc_wq; 115 struct workqueue_struct *m_inodegc_wq; 116 117 int m_bsize; /* fs logical block size */ 118 uint8_t m_blkbit_log; /* blocklog + NBBY */ 119 uint8_t m_blkbb_log; /* blocklog - BBSHIFT */ 120 uint8_t m_agno_log; /* log #ag's */ 121 uint8_t m_sectbb_log; /* sectlog - BBSHIFT */ 122 int8_t m_rtxblklog; /* log2 of rextsize, if possible */ 123 uint m_blockmask; /* sb_blocksize-1 */ 124 uint m_blockwsize; /* sb_blocksize in words */ 125 uint m_blockwmask; /* blockwsize-1 */ 126 uint m_alloc_mxr[2]; /* max alloc btree records */ 127 uint m_alloc_mnr[2]; /* min alloc btree records */ 128 uint m_bmap_dmxr[2]; /* max bmap btree records */ 129 uint m_bmap_dmnr[2]; /* min bmap btree records */ 130 uint m_rmap_mxr[2]; /* max rmap btree records */ 131 uint m_rmap_mnr[2]; /* min rmap btree records */ 132 uint m_refc_mxr[2]; /* max refc btree records */ 133 uint m_refc_mnr[2]; /* min refc btree records */ 134 uint m_alloc_maxlevels; /* max alloc btree levels */ 135 uint m_bm_maxlevels[2]; /* max bmap btree levels */ 136 uint m_rmap_maxlevels; /* max rmap btree levels */ 137 uint m_refc_maxlevels; /* max refcount btree level */ 138 unsigned int m_agbtree_maxlevels; /* max level of all AG btrees */ 139 xfs_extlen_t m_ag_prealloc_blocks; /* reserved ag blocks */ 140 uint m_alloc_set_aside; /* space we can't use */ 141 uint m_ag_max_usable; /* max space per AG */ 142 int m_dalign; /* stripe unit */ 143 int m_swidth; /* stripe width */ 144 xfs_agnumber_t m_maxagi; /* highest inode alloc group */ 145 uint m_allocsize_log;/* min write size log bytes */ 146 uint m_allocsize_blocks; /* min write size blocks */ 147 int m_logbufs; /* number of log buffers */ 148 int m_logbsize; /* size of each log buffer */ 149 uint m_rsumlevels; /* rt summary levels */ 150 uint m_rsumsize; /* size of rt summary, bytes */ 151 int m_fixedfsid[2]; /* unchanged for life of FS */ 152 uint m_qflags; /* quota status flags */ 153 uint64_t m_features; /* active filesystem features */ 154 uint64_t m_low_space[XFS_LOWSP_MAX]; 155 uint64_t m_low_rtexts[XFS_LOWSP_MAX]; 156 uint64_t m_rtxblkmask; /* rt extent block mask */ 157 struct xfs_ino_geometry m_ino_geo; /* inode geometry */ 158 struct xfs_trans_resv m_resv; /* precomputed res values */ 159 /* low free space thresholds */ 160 unsigned long m_opstate; /* dynamic state flags */ 161 bool m_always_cow; 162 bool m_fail_unmount; 163 bool m_finobt_nores; /* no per-AG finobt resv. */ 164 bool m_update_sb; /* sb needs update in mount */ 165 166 /* 167 * Bitsets of per-fs metadata that have been checked and/or are sick. 168 * Callers must hold m_sb_lock to access these two fields. 169 */ 170 uint8_t m_fs_checked; 171 uint8_t m_fs_sick; 172 /* 173 * Bitsets of rt metadata that have been checked and/or are sick. 174 * Callers must hold m_sb_lock to access this field. 175 */ 176 uint8_t m_rt_checked; 177 uint8_t m_rt_sick; 178 179 /* 180 * End of read-mostly variables. Frequently written variables and locks 181 * should be placed below this comment from now on. The first variable 182 * here is marked as cacheline aligned so they it is separated from 183 * the read-mostly variables. 184 */ 185 186 spinlock_t ____cacheline_aligned m_sb_lock; /* sb counter lock */ 187 struct percpu_counter m_icount; /* allocated inodes counter */ 188 struct percpu_counter m_ifree; /* free inodes counter */ 189 struct percpu_counter m_fdblocks; /* free block counter */ 190 struct percpu_counter m_frextents; /* free rt extent counter */ 191 192 /* 193 * Count of data device blocks reserved for delayed allocations, 194 * including indlen blocks. Does not include allocated CoW staging 195 * extents or anything related to the rt device. 196 */ 197 struct percpu_counter m_delalloc_blks; 198 199 /* 200 * RT version of the above. 201 */ 202 struct percpu_counter m_delalloc_rtextents; 203 204 /* 205 * Global count of allocation btree blocks in use across all AGs. Only 206 * used when perag reservation is enabled. Helps prevent block 207 * reservation from attempting to reserve allocation btree blocks. 208 */ 209 atomic64_t m_allocbt_blks; 210 211 struct radix_tree_root m_perag_tree; /* per-ag accounting info */ 212 spinlock_t m_perag_lock; /* lock for m_perag_tree */ 213 uint64_t m_resblks; /* total reserved blocks */ 214 uint64_t m_resblks_avail;/* available reserved blocks */ 215 uint64_t m_resblks_save; /* reserved blks @ remount,ro */ 216 struct delayed_work m_reclaim_work; /* background inode reclaim */ 217 struct dentry *m_debugfs; /* debugfs parent */ 218 struct xfs_kobj m_kobj; 219 struct xfs_kobj m_error_kobj; 220 struct xfs_kobj m_error_meta_kobj; 221 struct xfs_error_cfg m_error_cfg[XFS_ERR_CLASS_MAX][XFS_ERR_ERRNO_MAX]; 222 struct xstats m_stats; /* per-fs stats */ 223 #ifdef CONFIG_XFS_ONLINE_SCRUB_STATS 224 struct xchk_stats *m_scrub_stats; 225 #endif 226 xfs_agnumber_t m_agfrotor; /* last ag where space found */ 227 atomic_t m_agirotor; /* last ag dir inode alloced */ 228 229 /* Memory shrinker to throttle and reprioritize inodegc */ 230 struct shrinker *m_inodegc_shrinker; 231 /* 232 * Workqueue item so that we can coalesce multiple inode flush attempts 233 * into a single flush. 234 */ 235 struct work_struct m_flush_inodes_work; 236 237 /* 238 * Generation of the filesysyem layout. This is incremented by each 239 * growfs, and used by the pNFS server to ensure the client updates 240 * its view of the block device once it gets a layout that might 241 * reference the newly added blocks. Does not need to be persistent 242 * as long as we only allow file system size increments, but if we 243 * ever support shrinks it would have to be persisted in addition 244 * to various other kinds of pain inflicted on the pNFS server. 245 */ 246 uint32_t m_generation; 247 struct mutex m_growlock; /* growfs mutex */ 248 249 #ifdef DEBUG 250 /* 251 * Frequency with which errors are injected. Replaces xfs_etest; the 252 * value stored in here is the inverse of the frequency with which the 253 * error triggers. 1 = always, 2 = half the time, etc. 254 */ 255 unsigned int *m_errortag; 256 struct xfs_kobj m_errortag_kobj; 257 #endif 258 259 /* cpus that have inodes queued for inactivation */ 260 struct cpumask m_inodegc_cpumask; 261 262 /* Hook to feed dirent updates to an active online repair. */ 263 struct xfs_hooks m_dir_update_hooks; 264 } xfs_mount_t; 265 266 #define M_IGEO(mp) (&(mp)->m_ino_geo) 267 268 /* 269 * Flags for m_features. 270 * 271 * These are all the active features in the filesystem, regardless of how 272 * they are configured. 273 */ 274 #define XFS_FEAT_ATTR (1ULL << 0) /* xattrs present in fs */ 275 #define XFS_FEAT_NLINK (1ULL << 1) /* 32 bit link counts */ 276 #define XFS_FEAT_QUOTA (1ULL << 2) /* quota active */ 277 #define XFS_FEAT_ALIGN (1ULL << 3) /* inode alignment */ 278 #define XFS_FEAT_DALIGN (1ULL << 4) /* data alignment */ 279 #define XFS_FEAT_LOGV2 (1ULL << 5) /* version 2 logs */ 280 #define XFS_FEAT_SECTOR (1ULL << 6) /* sector size > 512 bytes */ 281 #define XFS_FEAT_EXTFLG (1ULL << 7) /* unwritten extents */ 282 #define XFS_FEAT_ASCIICI (1ULL << 8) /* ASCII only case-insens. */ 283 #define XFS_FEAT_LAZYSBCOUNT (1ULL << 9) /* Superblk counters */ 284 #define XFS_FEAT_ATTR2 (1ULL << 10) /* dynamic attr fork */ 285 #define XFS_FEAT_PARENT (1ULL << 11) /* parent pointers */ 286 #define XFS_FEAT_PROJID32 (1ULL << 12) /* 32 bit project id */ 287 #define XFS_FEAT_CRC (1ULL << 13) /* metadata CRCs */ 288 #define XFS_FEAT_V3INODES (1ULL << 14) /* Version 3 inodes */ 289 #define XFS_FEAT_PQUOTINO (1ULL << 15) /* non-shared proj/grp quotas */ 290 #define XFS_FEAT_FTYPE (1ULL << 16) /* inode type in dir */ 291 #define XFS_FEAT_FINOBT (1ULL << 17) /* free inode btree */ 292 #define XFS_FEAT_RMAPBT (1ULL << 18) /* reverse map btree */ 293 #define XFS_FEAT_REFLINK (1ULL << 19) /* reflinked files */ 294 #define XFS_FEAT_SPINODES (1ULL << 20) /* sparse inode chunks */ 295 #define XFS_FEAT_META_UUID (1ULL << 21) /* metadata UUID */ 296 #define XFS_FEAT_REALTIME (1ULL << 22) /* realtime device present */ 297 #define XFS_FEAT_INOBTCNT (1ULL << 23) /* inobt block counts */ 298 #define XFS_FEAT_BIGTIME (1ULL << 24) /* large timestamps */ 299 #define XFS_FEAT_NEEDSREPAIR (1ULL << 25) /* needs xfs_repair */ 300 #define XFS_FEAT_NREXT64 (1ULL << 26) /* large extent counters */ 301 #define XFS_FEAT_EXCHANGE_RANGE (1ULL << 27) /* exchange range */ 302 303 /* Mount features */ 304 #define XFS_FEAT_NOATTR2 (1ULL << 48) /* disable attr2 creation */ 305 #define XFS_FEAT_NOALIGN (1ULL << 49) /* ignore alignment */ 306 #define XFS_FEAT_ALLOCSIZE (1ULL << 50) /* user specified allocation size */ 307 #define XFS_FEAT_LARGE_IOSIZE (1ULL << 51) /* report large preferred 308 * I/O size in stat() */ 309 #define XFS_FEAT_WSYNC (1ULL << 52) /* synchronous metadata ops */ 310 #define XFS_FEAT_DIRSYNC (1ULL << 53) /* synchronous directory ops */ 311 #define XFS_FEAT_DISCARD (1ULL << 54) /* discard unused blocks */ 312 #define XFS_FEAT_GRPID (1ULL << 55) /* group-ID assigned from directory */ 313 #define XFS_FEAT_SMALL_INUMS (1ULL << 56) /* user wants 32bit inodes */ 314 #define XFS_FEAT_IKEEP (1ULL << 57) /* keep empty inode clusters*/ 315 #define XFS_FEAT_SWALLOC (1ULL << 58) /* stripe width allocation */ 316 #define XFS_FEAT_FILESTREAMS (1ULL << 59) /* use filestreams allocator */ 317 #define XFS_FEAT_DAX_ALWAYS (1ULL << 60) /* DAX always enabled */ 318 #define XFS_FEAT_DAX_NEVER (1ULL << 61) /* DAX never enabled */ 319 #define XFS_FEAT_NORECOVERY (1ULL << 62) /* no recovery - dirty fs */ 320 #define XFS_FEAT_NOUUID (1ULL << 63) /* ignore uuid during mount */ 321 322 #define __XFS_HAS_FEAT(name, NAME) \ 323 static inline bool xfs_has_ ## name (struct xfs_mount *mp) \ 324 { \ 325 return mp->m_features & XFS_FEAT_ ## NAME; \ 326 } 327 328 /* Some features can be added dynamically so they need a set wrapper, too. */ 329 #define __XFS_ADD_FEAT(name, NAME) \ 330 __XFS_HAS_FEAT(name, NAME); \ 331 static inline void xfs_add_ ## name (struct xfs_mount *mp) \ 332 { \ 333 mp->m_features |= XFS_FEAT_ ## NAME; \ 334 xfs_sb_version_add ## name(&mp->m_sb); \ 335 } 336 337 /* Superblock features */ 338 __XFS_ADD_FEAT(attr, ATTR) 339 __XFS_HAS_FEAT(nlink, NLINK) 340 __XFS_ADD_FEAT(quota, QUOTA) 341 __XFS_HAS_FEAT(dalign, DALIGN) 342 __XFS_HAS_FEAT(sector, SECTOR) 343 __XFS_HAS_FEAT(asciici, ASCIICI) 344 __XFS_HAS_FEAT(parent, PARENT) 345 __XFS_HAS_FEAT(ftype, FTYPE) 346 __XFS_HAS_FEAT(finobt, FINOBT) 347 __XFS_HAS_FEAT(rmapbt, RMAPBT) 348 __XFS_HAS_FEAT(reflink, REFLINK) 349 __XFS_HAS_FEAT(sparseinodes, SPINODES) 350 __XFS_HAS_FEAT(metauuid, META_UUID) 351 __XFS_HAS_FEAT(realtime, REALTIME) 352 __XFS_HAS_FEAT(inobtcounts, INOBTCNT) 353 __XFS_HAS_FEAT(bigtime, BIGTIME) 354 __XFS_HAS_FEAT(needsrepair, NEEDSREPAIR) 355 __XFS_HAS_FEAT(large_extent_counts, NREXT64) 356 __XFS_HAS_FEAT(exchange_range, EXCHANGE_RANGE) 357 358 /* 359 * Some features are always on for v5 file systems, allow the compiler to 360 * eliminiate dead code when building without v4 support. 361 */ 362 #define __XFS_HAS_V4_FEAT(name, NAME) \ 363 static inline bool xfs_has_ ## name (struct xfs_mount *mp) \ 364 { \ 365 return !IS_ENABLED(CONFIG_XFS_SUPPORT_V4) || \ 366 (mp->m_features & XFS_FEAT_ ## NAME); \ 367 } 368 369 #define __XFS_ADD_V4_FEAT(name, NAME) \ 370 __XFS_HAS_V4_FEAT(name, NAME); \ 371 static inline void xfs_add_ ## name (struct xfs_mount *mp) \ 372 { \ 373 if (IS_ENABLED(CONFIG_XFS_SUPPORT_V4)) { \ 374 mp->m_features |= XFS_FEAT_ ## NAME; \ 375 xfs_sb_version_add ## name(&mp->m_sb); \ 376 } \ 377 } 378 379 __XFS_HAS_V4_FEAT(align, ALIGN) 380 __XFS_HAS_V4_FEAT(logv2, LOGV2) 381 __XFS_HAS_V4_FEAT(extflg, EXTFLG) 382 __XFS_HAS_V4_FEAT(lazysbcount, LAZYSBCOUNT) 383 __XFS_ADD_V4_FEAT(attr2, ATTR2) 384 __XFS_ADD_V4_FEAT(projid32, PROJID32) 385 __XFS_HAS_V4_FEAT(v3inodes, V3INODES) 386 __XFS_HAS_V4_FEAT(crc, CRC) 387 __XFS_HAS_V4_FEAT(pquotino, PQUOTINO) 388 389 /* 390 * Mount features 391 * 392 * These do not change dynamically - features that can come and go, such as 32 393 * bit inodes and read-only state, are kept as operational state rather than 394 * features. 395 */ 396 __XFS_HAS_FEAT(noattr2, NOATTR2) 397 __XFS_HAS_FEAT(noalign, NOALIGN) 398 __XFS_HAS_FEAT(allocsize, ALLOCSIZE) 399 __XFS_HAS_FEAT(large_iosize, LARGE_IOSIZE) 400 __XFS_HAS_FEAT(wsync, WSYNC) 401 __XFS_HAS_FEAT(dirsync, DIRSYNC) 402 __XFS_HAS_FEAT(discard, DISCARD) 403 __XFS_HAS_FEAT(grpid, GRPID) 404 __XFS_HAS_FEAT(small_inums, SMALL_INUMS) 405 __XFS_HAS_FEAT(ikeep, IKEEP) 406 __XFS_HAS_FEAT(swalloc, SWALLOC) 407 __XFS_HAS_FEAT(filestreams, FILESTREAMS) 408 __XFS_HAS_FEAT(dax_always, DAX_ALWAYS) 409 __XFS_HAS_FEAT(dax_never, DAX_NEVER) 410 __XFS_HAS_FEAT(norecovery, NORECOVERY) 411 __XFS_HAS_FEAT(nouuid, NOUUID) 412 413 /* 414 * Operational mount state flags 415 * 416 * Use these with atomic bit ops only! 417 */ 418 #define XFS_OPSTATE_UNMOUNTING 0 /* filesystem is unmounting */ 419 #define XFS_OPSTATE_CLEAN 1 /* mount was clean */ 420 #define XFS_OPSTATE_SHUTDOWN 2 /* stop all fs operations */ 421 #define XFS_OPSTATE_INODE32 3 /* inode32 allocator active */ 422 #define XFS_OPSTATE_READONLY 4 /* read-only fs */ 423 424 /* 425 * If set, inactivation worker threads will be scheduled to process queued 426 * inodegc work. If not, queued inodes remain in memory waiting to be 427 * processed. 428 */ 429 #define XFS_OPSTATE_INODEGC_ENABLED 5 430 /* 431 * If set, background speculative prealloc gc worker threads will be scheduled 432 * to process queued blockgc work. If not, inodes retain their preallocations 433 * until explicitly deleted. 434 */ 435 #define XFS_OPSTATE_BLOCKGC_ENABLED 6 436 437 /* Kernel has logged a warning about online fsck being used on this fs. */ 438 #define XFS_OPSTATE_WARNED_SCRUB 7 439 /* Kernel has logged a warning about shrink being used on this fs. */ 440 #define XFS_OPSTATE_WARNED_SHRINK 8 441 /* Kernel has logged a warning about logged xattr updates being used. */ 442 #define XFS_OPSTATE_WARNED_LARP 9 443 /* Mount time quotacheck is running */ 444 #define XFS_OPSTATE_QUOTACHECK_RUNNING 10 445 /* Do we want to clear log incompat flags? */ 446 #define XFS_OPSTATE_UNSET_LOG_INCOMPAT 11 447 /* Filesystem can use logged extended attributes */ 448 #define XFS_OPSTATE_USE_LARP 12 449 450 #define __XFS_IS_OPSTATE(name, NAME) \ 451 static inline bool xfs_is_ ## name (struct xfs_mount *mp) \ 452 { \ 453 return test_bit(XFS_OPSTATE_ ## NAME, &mp->m_opstate); \ 454 } \ 455 static inline bool xfs_clear_ ## name (struct xfs_mount *mp) \ 456 { \ 457 return test_and_clear_bit(XFS_OPSTATE_ ## NAME, &mp->m_opstate); \ 458 } \ 459 static inline bool xfs_set_ ## name (struct xfs_mount *mp) \ 460 { \ 461 return test_and_set_bit(XFS_OPSTATE_ ## NAME, &mp->m_opstate); \ 462 } 463 464 __XFS_IS_OPSTATE(unmounting, UNMOUNTING) 465 __XFS_IS_OPSTATE(clean, CLEAN) 466 __XFS_IS_OPSTATE(shutdown, SHUTDOWN) 467 __XFS_IS_OPSTATE(inode32, INODE32) 468 __XFS_IS_OPSTATE(readonly, READONLY) 469 __XFS_IS_OPSTATE(inodegc_enabled, INODEGC_ENABLED) 470 __XFS_IS_OPSTATE(blockgc_enabled, BLOCKGC_ENABLED) 471 #ifdef CONFIG_XFS_QUOTA 472 __XFS_IS_OPSTATE(quotacheck_running, QUOTACHECK_RUNNING) 473 #else 474 # define xfs_is_quotacheck_running(mp) (false) 475 #endif 476 __XFS_IS_OPSTATE(done_with_log_incompat, UNSET_LOG_INCOMPAT) 477 __XFS_IS_OPSTATE(using_logged_xattrs, USE_LARP) 478 479 static inline bool 480 xfs_should_warn(struct xfs_mount *mp, long nr) 481 { 482 return !test_and_set_bit(nr, &mp->m_opstate); 483 } 484 485 #define XFS_OPSTATE_STRINGS \ 486 { (1UL << XFS_OPSTATE_UNMOUNTING), "unmounting" }, \ 487 { (1UL << XFS_OPSTATE_CLEAN), "clean" }, \ 488 { (1UL << XFS_OPSTATE_SHUTDOWN), "shutdown" }, \ 489 { (1UL << XFS_OPSTATE_INODE32), "inode32" }, \ 490 { (1UL << XFS_OPSTATE_READONLY), "read_only" }, \ 491 { (1UL << XFS_OPSTATE_INODEGC_ENABLED), "inodegc" }, \ 492 { (1UL << XFS_OPSTATE_BLOCKGC_ENABLED), "blockgc" }, \ 493 { (1UL << XFS_OPSTATE_WARNED_SCRUB), "wscrub" }, \ 494 { (1UL << XFS_OPSTATE_WARNED_SHRINK), "wshrink" }, \ 495 { (1UL << XFS_OPSTATE_WARNED_LARP), "wlarp" }, \ 496 { (1UL << XFS_OPSTATE_QUOTACHECK_RUNNING), "quotacheck" }, \ 497 { (1UL << XFS_OPSTATE_UNSET_LOG_INCOMPAT), "unset_log_incompat" }, \ 498 { (1UL << XFS_OPSTATE_USE_LARP), "logged_xattrs" } 499 500 /* 501 * Max and min values for mount-option defined I/O 502 * preallocation sizes. 503 */ 504 #define XFS_MAX_IO_LOG 30 /* 1G */ 505 #define XFS_MIN_IO_LOG PAGE_SHIFT 506 507 void xfs_do_force_shutdown(struct xfs_mount *mp, uint32_t flags, char *fname, 508 int lnnum); 509 #define xfs_force_shutdown(m,f) \ 510 xfs_do_force_shutdown(m, f, __FILE__, __LINE__) 511 512 #define SHUTDOWN_META_IO_ERROR (1u << 0) /* write attempt to metadata failed */ 513 #define SHUTDOWN_LOG_IO_ERROR (1u << 1) /* write attempt to the log failed */ 514 #define SHUTDOWN_FORCE_UMOUNT (1u << 2) /* shutdown from a forced unmount */ 515 #define SHUTDOWN_CORRUPT_INCORE (1u << 3) /* corrupt in-memory structures */ 516 #define SHUTDOWN_CORRUPT_ONDISK (1u << 4) /* corrupt metadata on device */ 517 #define SHUTDOWN_DEVICE_REMOVED (1u << 5) /* device removed underneath us */ 518 519 #define XFS_SHUTDOWN_STRINGS \ 520 { SHUTDOWN_META_IO_ERROR, "metadata_io" }, \ 521 { SHUTDOWN_LOG_IO_ERROR, "log_io" }, \ 522 { SHUTDOWN_FORCE_UMOUNT, "force_umount" }, \ 523 { SHUTDOWN_CORRUPT_INCORE, "corruption" }, \ 524 { SHUTDOWN_DEVICE_REMOVED, "device_removed" } 525 526 /* 527 * Flags for xfs_mountfs 528 */ 529 #define XFS_MFSI_QUIET 0x40 /* Be silent if mount errors found */ 530 531 static inline xfs_agnumber_t 532 xfs_daddr_to_agno(struct xfs_mount *mp, xfs_daddr_t d) 533 { 534 xfs_rfsblock_t ld = XFS_BB_TO_FSBT(mp, d); 535 do_div(ld, mp->m_sb.sb_agblocks); 536 return (xfs_agnumber_t) ld; 537 } 538 539 static inline xfs_agblock_t 540 xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d) 541 { 542 xfs_rfsblock_t ld = XFS_BB_TO_FSBT(mp, d); 543 return (xfs_agblock_t) do_div(ld, mp->m_sb.sb_agblocks); 544 } 545 546 extern void xfs_uuid_table_free(void); 547 extern uint64_t xfs_default_resblks(xfs_mount_t *mp); 548 extern int xfs_mountfs(xfs_mount_t *mp); 549 extern void xfs_unmountfs(xfs_mount_t *); 550 551 /* 552 * Deltas for the block count can vary from 1 to very large, but lock contention 553 * only occurs on frequent small block count updates such as in the delayed 554 * allocation path for buffered writes (page a time updates). Hence we set 555 * a large batch count (1024) to minimise global counter updates except when 556 * we get near to ENOSPC and we have to be very accurate with our updates. 557 */ 558 #define XFS_FDBLOCKS_BATCH 1024 559 560 /* 561 * Estimate the amount of free space that is not available to userspace and is 562 * not explicitly reserved from the incore fdblocks. This includes: 563 * 564 * - The minimum number of blocks needed to support splitting a bmap btree 565 * - The blocks currently in use by the freespace btrees because they record 566 * the actual blocks that will fill per-AG metadata space reservations 567 */ 568 static inline uint64_t 569 xfs_fdblocks_unavailable( 570 struct xfs_mount *mp) 571 { 572 return mp->m_alloc_set_aside + atomic64_read(&mp->m_allocbt_blks); 573 } 574 575 int xfs_dec_freecounter(struct xfs_mount *mp, struct percpu_counter *counter, 576 uint64_t delta, bool rsvd); 577 void xfs_add_freecounter(struct xfs_mount *mp, struct percpu_counter *counter, 578 uint64_t delta); 579 580 static inline int xfs_dec_fdblocks(struct xfs_mount *mp, uint64_t delta, 581 bool reserved) 582 { 583 return xfs_dec_freecounter(mp, &mp->m_fdblocks, delta, reserved); 584 } 585 586 static inline void xfs_add_fdblocks(struct xfs_mount *mp, uint64_t delta) 587 { 588 xfs_add_freecounter(mp, &mp->m_fdblocks, delta); 589 } 590 591 static inline int xfs_dec_frextents(struct xfs_mount *mp, uint64_t delta) 592 { 593 return xfs_dec_freecounter(mp, &mp->m_frextents, delta, false); 594 } 595 596 static inline void xfs_add_frextents(struct xfs_mount *mp, uint64_t delta) 597 { 598 xfs_add_freecounter(mp, &mp->m_frextents, delta); 599 } 600 601 extern int xfs_readsb(xfs_mount_t *, int); 602 extern void xfs_freesb(xfs_mount_t *); 603 extern bool xfs_fs_writable(struct xfs_mount *mp, int level); 604 extern int xfs_sb_validate_fsb_count(struct xfs_sb *, uint64_t); 605 606 extern int xfs_dev_is_read_only(struct xfs_mount *, char *); 607 608 extern void xfs_set_low_space_thresholds(struct xfs_mount *); 609 610 int xfs_zero_extent(struct xfs_inode *ip, xfs_fsblock_t start_fsb, 611 xfs_off_t count_fsb); 612 613 struct xfs_error_cfg * xfs_error_get_cfg(struct xfs_mount *mp, 614 int error_class, int error); 615 void xfs_force_summary_recalc(struct xfs_mount *mp); 616 int xfs_add_incompat_log_feature(struct xfs_mount *mp, uint32_t feature); 617 bool xfs_clear_incompat_log_features(struct xfs_mount *mp); 618 void xfs_mod_delalloc(struct xfs_inode *ip, int64_t data_delta, 619 int64_t ind_delta); 620 621 #endif /* __XFS_MOUNT_H__ */ 622