xref: /linux/fs/xfs/xfs_mount.h (revision c148bc7535650fbfa95a1f571b9ffa2ab478ea33)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4   * All Rights Reserved.
5   */
6  #ifndef __XFS_MOUNT_H__
7  #define	__XFS_MOUNT_H__
8  
9  struct xlog;
10  struct xfs_inode;
11  struct xfs_mru_cache;
12  struct xfs_ail;
13  struct xfs_quotainfo;
14  struct xfs_da_geometry;
15  struct xfs_perag;
16  
17  /* dynamic preallocation free space thresholds, 5% down to 1% */
18  enum {
19  	XFS_LOWSP_1_PCNT = 0,
20  	XFS_LOWSP_2_PCNT,
21  	XFS_LOWSP_3_PCNT,
22  	XFS_LOWSP_4_PCNT,
23  	XFS_LOWSP_5_PCNT,
24  	XFS_LOWSP_MAX,
25  };
26  
27  /*
28   * Error Configuration
29   *
30   * Error classes define the subsystem the configuration belongs to.
31   * Error numbers define the errors that are configurable.
32   */
33  enum {
34  	XFS_ERR_METADATA,
35  	XFS_ERR_CLASS_MAX,
36  };
37  enum {
38  	XFS_ERR_DEFAULT,
39  	XFS_ERR_EIO,
40  	XFS_ERR_ENOSPC,
41  	XFS_ERR_ENODEV,
42  	XFS_ERR_ERRNO_MAX,
43  };
44  
45  #define XFS_ERR_RETRY_FOREVER	-1
46  
47  /*
48   * Although retry_timeout is in jiffies which is normally an unsigned long,
49   * we limit the retry timeout to 86400 seconds, or one day.  So even a
50   * signed 32-bit long is sufficient for a HZ value up to 24855.  Making it
51   * signed lets us store the special "-1" value, meaning retry forever.
52   */
53  struct xfs_error_cfg {
54  	struct xfs_kobj	kobj;
55  	int		max_retries;
56  	long		retry_timeout;	/* in jiffies, -1 = infinite */
57  };
58  
59  /*
60   * Per-cpu deferred inode inactivation GC lists.
61   */
62  struct xfs_inodegc {
63  	struct xfs_mount	*mp;
64  	struct llist_head	list;
65  	struct delayed_work	work;
66  	int			error;
67  
68  	/* approximate count of inodes in the list */
69  	unsigned int		items;
70  	unsigned int		shrinker_hits;
71  	unsigned int		cpu;
72  };
73  
74  /*
75   * Container for each type of groups, used to look up individual groups and
76   * describes the geometry.
77   */
78  struct xfs_groups {
79  	struct xarray		xa;
80  
81  	/*
82  	 * Maximum capacity of the group in FSBs.
83  	 *
84  	 * Each group is laid out densely in the daddr space.  For the
85  	 * degenerate case of a pre-rtgroups filesystem, the incore rtgroup
86  	 * pretends to have a zero-block and zero-blklog rtgroup.
87  	 */
88  	uint32_t		blocks;
89  
90  	/*
91  	 * Log(2) of the logical size of each group.
92  	 *
93  	 * Compared to the blocks field above this is rounded up to the next
94  	 * power of two, and thus lays out the xfs_fsblock_t/xfs_rtblock_t
95  	 * space sparsely with a hole from blocks to (1 << blklog) at the end
96  	 * of each group.
97  	 */
98  	uint8_t			blklog;
99  
100  	/*
101  	 * Zoned devices can have gaps beyond the usable capacity of a zone and
102  	 * the end in the LBA/daddr address space.  In other words, the hardware
103  	 * equivalent to the RT groups already takes care of the power of 2
104  	 * alignment for us.  In this case the sparse FSB/RTB address space maps
105  	 * 1:1 to the device address space.
106  	 */
107  	bool			has_daddr_gaps;
108  
109  	/*
110  	 * Mask to extract the group-relative block number from a FSB.
111  	 * For a pre-rtgroups filesystem we pretend to have one very large
112  	 * rtgroup, so this mask must be 64-bit.
113  	 */
114  	uint64_t		blkmask;
115  
116  	/*
117  	 * Start of the first group in the device.  This is used to support a
118  	 * RT device following the data device on the same block device for
119  	 * SMR hard drives.
120  	 */
121  	xfs_fsblock_t		start_fsb;
122  };
123  
124  struct xfs_freecounter {
125  	/* free blocks for general use: */
126  	struct percpu_counter	count;
127  
128  	/* total reserved blocks: */
129  	uint64_t		res_total;
130  
131  	/* available reserved blocks: */
132  	uint64_t		res_avail;
133  
134  	/* reserved blks @ remount,ro: */
135  	uint64_t		res_saved;
136  };
137  
138  /*
139   * The struct xfsmount layout is optimised to separate read-mostly variables
140   * from variables that are frequently modified. We put the read-mostly variables
141   * first, then place all the other variables at the end.
142   *
143   * Typically, read-mostly variables are those that are set at mount time and
144   * never changed again, or only change rarely as a result of things like sysfs
145   * knobs being tweaked.
146   */
147  typedef struct xfs_mount {
148  	struct xfs_sb		m_sb;		/* copy of fs superblock */
149  	struct super_block	*m_super;
150  	struct xfs_ail		*m_ail;		/* fs active log item list */
151  	struct xfs_buf		*m_sb_bp;	/* buffer for superblock */
152  	struct xfs_buf		*m_rtsb_bp;	/* realtime superblock */
153  	char			*m_rtname;	/* realtime device name */
154  	char			*m_logname;	/* external log device name */
155  	struct xfs_da_geometry	*m_dir_geo;	/* directory block geometry */
156  	struct xfs_da_geometry	*m_attr_geo;	/* attribute block geometry */
157  	struct xlog		*m_log;		/* log specific stuff */
158  	struct xfs_inode	*m_rootip;	/* pointer to root directory */
159  	struct xfs_inode	*m_metadirip;	/* ptr to metadata directory */
160  	struct xfs_inode	*m_rtdirip;	/* ptr to realtime metadir */
161  	struct xfs_quotainfo	*m_quotainfo;	/* disk quota information */
162  	struct xfs_buftarg	*m_ddev_targp;	/* data device */
163  	struct xfs_buftarg	*m_logdev_targp;/* log device */
164  	struct xfs_buftarg	*m_rtdev_targp;	/* rt device */
165  	void __percpu		*m_inodegc;	/* percpu inodegc structures */
166  	struct xfs_mru_cache	*m_filestream;  /* per-mount filestream data */
167  	struct workqueue_struct *m_buf_workqueue;
168  	struct workqueue_struct	*m_unwritten_workqueue;
169  	struct workqueue_struct	*m_reclaim_workqueue;
170  	struct workqueue_struct	*m_sync_workqueue;
171  	struct workqueue_struct *m_blockgc_wq;
172  	struct workqueue_struct *m_inodegc_wq;
173  
174  	int			m_bsize;	/* fs logical block size */
175  	uint8_t			m_blkbit_log;	/* blocklog + NBBY */
176  	uint8_t			m_blkbb_log;	/* blocklog - BBSHIFT */
177  	uint8_t			m_agno_log;	/* log #ag's */
178  	uint8_t			m_sectbb_log;	/* sectlog - BBSHIFT */
179  	int8_t			m_rtxblklog;	/* log2 of rextsize, if possible */
180  
181  	uint			m_blockmask;	/* sb_blocksize-1 */
182  	uint			m_blockwsize;	/* sb_blocksize in words */
183  	/* number of rt extents per rt bitmap block if rtgroups enabled */
184  	unsigned int		m_rtx_per_rbmblock;
185  	uint			m_alloc_mxr[2];	/* max alloc btree records */
186  	uint			m_alloc_mnr[2];	/* min alloc btree records */
187  	uint			m_bmap_dmxr[2];	/* max bmap btree records */
188  	uint			m_bmap_dmnr[2];	/* min bmap btree records */
189  	uint			m_rmap_mxr[2];	/* max rmap btree records */
190  	uint			m_rmap_mnr[2];	/* min rmap btree records */
191  	uint			m_rtrmap_mxr[2]; /* max rtrmap btree records */
192  	uint			m_rtrmap_mnr[2]; /* min rtrmap btree records */
193  	uint			m_refc_mxr[2];	/* max refc btree records */
194  	uint			m_refc_mnr[2];	/* min refc btree records */
195  	uint			m_rtrefc_mxr[2]; /* max rtrefc btree records */
196  	uint			m_rtrefc_mnr[2]; /* min rtrefc btree records */
197  	uint			m_alloc_maxlevels; /* max alloc btree levels */
198  	uint			m_bm_maxlevels[2]; /* max bmap btree levels */
199  	uint			m_rmap_maxlevels; /* max rmap btree levels */
200  	uint			m_rtrmap_maxlevels; /* max rtrmap btree level */
201  	uint			m_refc_maxlevels; /* max refcount btree level */
202  	uint			m_rtrefc_maxlevels; /* max rtrefc btree level */
203  	unsigned int		m_agbtree_maxlevels; /* max level of all AG btrees */
204  	unsigned int		m_rtbtree_maxlevels; /* max level of all rt btrees */
205  	xfs_extlen_t		m_ag_prealloc_blocks; /* reserved ag blocks */
206  	uint			m_alloc_set_aside; /* space we can't use */
207  	uint			m_ag_max_usable; /* max space per AG */
208  	int			m_dalign;	/* stripe unit */
209  	int			m_swidth;	/* stripe width */
210  	xfs_agnumber_t		m_maxagi;	/* highest inode alloc group */
211  	uint			m_allocsize_log;/* min write size log bytes */
212  	uint			m_allocsize_blocks; /* min write size blocks */
213  	int			m_logbufs;	/* number of log buffers */
214  	int			m_logbsize;	/* size of each log buffer */
215  	unsigned int		m_rsumlevels;	/* rt summary levels */
216  	xfs_filblks_t		m_rsumblocks;	/* size of rt summary, FSBs */
217  	int			m_fixedfsid[2];	/* unchanged for life of FS */
218  	uint			m_qflags;	/* quota status flags */
219  	uint64_t		m_features;	/* active filesystem features */
220  	uint64_t		m_low_space[XFS_LOWSP_MAX];
221  	uint64_t		m_low_rtexts[XFS_LOWSP_MAX];
222  	uint64_t		m_rtxblkmask;	/* rt extent block mask */
223  	struct xfs_ino_geometry	m_ino_geo;	/* inode geometry */
224  	struct xfs_trans_resv	m_resv;		/* precomputed res values */
225  						/* low free space thresholds */
226  	unsigned long		m_opstate;	/* dynamic state flags */
227  	bool			m_always_cow;
228  	bool			m_fail_unmount;
229  	bool			m_finobt_nores; /* no per-AG finobt resv. */
230  	bool			m_update_sb;	/* sb needs update in mount */
231  	unsigned int		m_max_open_zones;
232  
233  	/*
234  	 * Bitsets of per-fs metadata that have been checked and/or are sick.
235  	 * Callers must hold m_sb_lock to access these two fields.
236  	 */
237  	uint8_t			m_fs_checked;
238  	uint8_t			m_fs_sick;
239  	/*
240  	 * Bitsets of rt metadata that have been checked and/or are sick.
241  	 * Callers must hold m_sb_lock to access this field.
242  	 */
243  	uint8_t			m_rt_checked;
244  	uint8_t			m_rt_sick;
245  
246  	/*
247  	 * End of read-mostly variables. Frequently written variables and locks
248  	 * should be placed below this comment from now on. The first variable
249  	 * here is marked as cacheline aligned so they it is separated from
250  	 * the read-mostly variables.
251  	 */
252  
253  	spinlock_t ____cacheline_aligned m_sb_lock; /* sb counter lock */
254  	struct percpu_counter	m_icount;	/* allocated inodes counter */
255  	struct percpu_counter	m_ifree;	/* free inodes counter */
256  
257  	struct xfs_freecounter	m_free[XC_FREE_NR];
258  
259  	/*
260  	 * Count of data device blocks reserved for delayed allocations,
261  	 * including indlen blocks.  Does not include allocated CoW staging
262  	 * extents or anything related to the rt device.
263  	 */
264  	struct percpu_counter	m_delalloc_blks;
265  
266  	/*
267  	 * RT version of the above.
268  	 */
269  	struct percpu_counter	m_delalloc_rtextents;
270  
271  	/*
272  	 * Global count of allocation btree blocks in use across all AGs. Only
273  	 * used when perag reservation is enabled. Helps prevent block
274  	 * reservation from attempting to reserve allocation btree blocks.
275  	 */
276  	atomic64_t		m_allocbt_blks;
277  
278  	struct xfs_groups	m_groups[XG_TYPE_MAX];
279  	struct delayed_work	m_reclaim_work;	/* background inode reclaim */
280  	struct xfs_zone_info	*m_zone_info;	/* zone allocator information */
281  	struct dentry		*m_debugfs;	/* debugfs parent */
282  	struct xfs_kobj		m_kobj;
283  	struct xfs_kobj		m_error_kobj;
284  	struct xfs_kobj		m_error_meta_kobj;
285  	struct xfs_error_cfg	m_error_cfg[XFS_ERR_CLASS_MAX][XFS_ERR_ERRNO_MAX];
286  	struct xstats		m_stats;	/* per-fs stats */
287  #ifdef CONFIG_XFS_ONLINE_SCRUB_STATS
288  	struct xchk_stats	*m_scrub_stats;
289  #endif
290  	struct xfs_kobj		m_zoned_kobj;
291  	xfs_agnumber_t		m_agfrotor;	/* last ag where space found */
292  	atomic_t		m_agirotor;	/* last ag dir inode alloced */
293  	atomic_t		m_rtgrotor;	/* last rtgroup rtpicked */
294  
295  	struct mutex		m_metafile_resv_lock;
296  	uint64_t		m_metafile_resv_target;
297  	uint64_t		m_metafile_resv_used;
298  	uint64_t		m_metafile_resv_avail;
299  
300  	/* Memory shrinker to throttle and reprioritize inodegc */
301  	struct shrinker		*m_inodegc_shrinker;
302  	/*
303  	 * Workqueue item so that we can coalesce multiple inode flush attempts
304  	 * into a single flush.
305  	 */
306  	struct work_struct	m_flush_inodes_work;
307  
308  	/*
309  	 * Generation of the filesysyem layout.  This is incremented by each
310  	 * growfs, and used by the pNFS server to ensure the client updates
311  	 * its view of the block device once it gets a layout that might
312  	 * reference the newly added blocks.  Does not need to be persistent
313  	 * as long as we only allow file system size increments, but if we
314  	 * ever support shrinks it would have to be persisted in addition
315  	 * to various other kinds of pain inflicted on the pNFS server.
316  	 */
317  	uint32_t		m_generation;
318  	struct mutex		m_growlock;	/* growfs mutex */
319  
320  #ifdef DEBUG
321  	/*
322  	 * Frequency with which errors are injected.  Replaces xfs_etest; the
323  	 * value stored in here is the inverse of the frequency with which the
324  	 * error triggers.  1 = always, 2 = half the time, etc.
325  	 */
326  	unsigned int		*m_errortag;
327  	struct xfs_kobj		m_errortag_kobj;
328  #endif
329  
330  	/* cpus that have inodes queued for inactivation */
331  	struct cpumask		m_inodegc_cpumask;
332  
333  	/* Hook to feed dirent updates to an active online repair. */
334  	struct xfs_hooks	m_dir_update_hooks;
335  } xfs_mount_t;
336  
337  #define M_IGEO(mp)		(&(mp)->m_ino_geo)
338  
339  /*
340   * Flags for m_features.
341   *
342   * These are all the active features in the filesystem, regardless of how
343   * they are configured.
344   */
345  #define XFS_FEAT_ATTR		(1ULL << 0)	/* xattrs present in fs */
346  #define XFS_FEAT_NLINK		(1ULL << 1)	/* 32 bit link counts */
347  #define XFS_FEAT_QUOTA		(1ULL << 2)	/* quota active */
348  #define XFS_FEAT_ALIGN		(1ULL << 3)	/* inode alignment */
349  #define XFS_FEAT_DALIGN		(1ULL << 4)	/* data alignment */
350  #define XFS_FEAT_LOGV2		(1ULL << 5)	/* version 2 logs */
351  #define XFS_FEAT_SECTOR		(1ULL << 6)	/* sector size > 512 bytes */
352  #define XFS_FEAT_EXTFLG		(1ULL << 7)	/* unwritten extents */
353  #define XFS_FEAT_ASCIICI	(1ULL << 8)	/* ASCII only case-insens. */
354  #define XFS_FEAT_LAZYSBCOUNT	(1ULL << 9)	/* Superblk counters */
355  #define XFS_FEAT_ATTR2		(1ULL << 10)	/* dynamic attr fork */
356  #define XFS_FEAT_PARENT		(1ULL << 11)	/* parent pointers */
357  #define XFS_FEAT_PROJID32	(1ULL << 12)	/* 32 bit project id */
358  #define XFS_FEAT_CRC		(1ULL << 13)	/* metadata CRCs */
359  #define XFS_FEAT_V3INODES	(1ULL << 14)	/* Version 3 inodes */
360  #define XFS_FEAT_PQUOTINO	(1ULL << 15)	/* non-shared proj/grp quotas */
361  #define XFS_FEAT_FTYPE		(1ULL << 16)	/* inode type in dir */
362  #define XFS_FEAT_FINOBT		(1ULL << 17)	/* free inode btree */
363  #define XFS_FEAT_RMAPBT		(1ULL << 18)	/* reverse map btree */
364  #define XFS_FEAT_REFLINK	(1ULL << 19)	/* reflinked files */
365  #define XFS_FEAT_SPINODES	(1ULL << 20)	/* sparse inode chunks */
366  #define XFS_FEAT_META_UUID	(1ULL << 21)	/* metadata UUID */
367  #define XFS_FEAT_REALTIME	(1ULL << 22)	/* realtime device present */
368  #define XFS_FEAT_INOBTCNT	(1ULL << 23)	/* inobt block counts */
369  #define XFS_FEAT_BIGTIME	(1ULL << 24)	/* large timestamps */
370  #define XFS_FEAT_NEEDSREPAIR	(1ULL << 25)	/* needs xfs_repair */
371  #define XFS_FEAT_NREXT64	(1ULL << 26)	/* large extent counters */
372  #define XFS_FEAT_EXCHANGE_RANGE	(1ULL << 27)	/* exchange range */
373  #define XFS_FEAT_METADIR	(1ULL << 28)	/* metadata directory tree */
374  #define XFS_FEAT_ZONED		(1ULL << 29)	/* zoned RT device */
375  
376  /* Mount features */
377  #define XFS_FEAT_NOLIFETIME	(1ULL << 47)	/* disable lifetime hints */
378  #define XFS_FEAT_NOATTR2	(1ULL << 48)	/* disable attr2 creation */
379  #define XFS_FEAT_NOALIGN	(1ULL << 49)	/* ignore alignment */
380  #define XFS_FEAT_ALLOCSIZE	(1ULL << 50)	/* user specified allocation size */
381  #define XFS_FEAT_LARGE_IOSIZE	(1ULL << 51)	/* report large preferred
382  						 * I/O size in stat() */
383  #define XFS_FEAT_WSYNC		(1ULL << 52)	/* synchronous metadata ops */
384  #define XFS_FEAT_DIRSYNC	(1ULL << 53)	/* synchronous directory ops */
385  #define XFS_FEAT_DISCARD	(1ULL << 54)	/* discard unused blocks */
386  #define XFS_FEAT_GRPID		(1ULL << 55)	/* group-ID assigned from directory */
387  #define XFS_FEAT_SMALL_INUMS	(1ULL << 56)	/* user wants 32bit inodes */
388  #define XFS_FEAT_IKEEP		(1ULL << 57)	/* keep empty inode clusters*/
389  #define XFS_FEAT_SWALLOC	(1ULL << 58)	/* stripe width allocation */
390  #define XFS_FEAT_FILESTREAMS	(1ULL << 59)	/* use filestreams allocator */
391  #define XFS_FEAT_DAX_ALWAYS	(1ULL << 60)	/* DAX always enabled */
392  #define XFS_FEAT_DAX_NEVER	(1ULL << 61)	/* DAX never enabled */
393  #define XFS_FEAT_NORECOVERY	(1ULL << 62)	/* no recovery - dirty fs */
394  #define XFS_FEAT_NOUUID		(1ULL << 63)	/* ignore uuid during mount */
395  
396  #define __XFS_HAS_FEAT(name, NAME) \
397  static inline bool xfs_has_ ## name (const struct xfs_mount *mp) \
398  { \
399  	return mp->m_features & XFS_FEAT_ ## NAME; \
400  }
401  
402  /* Some features can be added dynamically so they need a set wrapper, too. */
403  #define __XFS_ADD_FEAT(name, NAME) \
404  	__XFS_HAS_FEAT(name, NAME); \
405  static inline void xfs_add_ ## name (struct xfs_mount *mp) \
406  { \
407  	mp->m_features |= XFS_FEAT_ ## NAME; \
408  	xfs_sb_version_add ## name(&mp->m_sb); \
409  }
410  
411  /* Superblock features */
__XFS_ADD_FEAT(attr,ATTR)412  __XFS_ADD_FEAT(attr, ATTR)
413  __XFS_HAS_FEAT(nlink, NLINK)
414  __XFS_ADD_FEAT(quota, QUOTA)
415  __XFS_HAS_FEAT(dalign, DALIGN)
416  __XFS_HAS_FEAT(sector, SECTOR)
417  __XFS_HAS_FEAT(asciici, ASCIICI)
418  __XFS_HAS_FEAT(parent, PARENT)
419  __XFS_HAS_FEAT(ftype, FTYPE)
420  __XFS_HAS_FEAT(finobt, FINOBT)
421  __XFS_HAS_FEAT(rmapbt, RMAPBT)
422  __XFS_HAS_FEAT(reflink, REFLINK)
423  __XFS_HAS_FEAT(sparseinodes, SPINODES)
424  __XFS_HAS_FEAT(metauuid, META_UUID)
425  __XFS_HAS_FEAT(realtime, REALTIME)
426  __XFS_HAS_FEAT(inobtcounts, INOBTCNT)
427  __XFS_HAS_FEAT(bigtime, BIGTIME)
428  __XFS_HAS_FEAT(needsrepair, NEEDSREPAIR)
429  __XFS_HAS_FEAT(large_extent_counts, NREXT64)
430  __XFS_HAS_FEAT(exchange_range, EXCHANGE_RANGE)
431  __XFS_HAS_FEAT(metadir, METADIR)
432  __XFS_HAS_FEAT(zoned, ZONED)
433  __XFS_HAS_FEAT(nolifetime, NOLIFETIME)
434  
435  static inline bool xfs_has_rtgroups(const struct xfs_mount *mp)
436  {
437  	/* all metadir file systems also allow rtgroups */
438  	return xfs_has_metadir(mp);
439  }
440  
xfs_has_rtsb(const struct xfs_mount * mp)441  static inline bool xfs_has_rtsb(const struct xfs_mount *mp)
442  {
443  	/* all rtgroups filesystems with an rt section have an rtsb */
444  	return xfs_has_rtgroups(mp) &&
445  		xfs_has_realtime(mp) &&
446  		!xfs_has_zoned(mp);
447  }
448  
xfs_has_rtrmapbt(const struct xfs_mount * mp)449  static inline bool xfs_has_rtrmapbt(const struct xfs_mount *mp)
450  {
451  	return xfs_has_rtgroups(mp) && xfs_has_realtime(mp) &&
452  	       xfs_has_rmapbt(mp);
453  }
454  
xfs_has_rtreflink(const struct xfs_mount * mp)455  static inline bool xfs_has_rtreflink(const struct xfs_mount *mp)
456  {
457  	return xfs_has_metadir(mp) && xfs_has_realtime(mp) &&
458  	       xfs_has_reflink(mp);
459  }
460  
xfs_has_nonzoned(const struct xfs_mount * mp)461  static inline bool xfs_has_nonzoned(const struct xfs_mount *mp)
462  {
463  	return !xfs_has_zoned(mp);
464  }
465  
466  /*
467   * Some features are always on for v5 file systems, allow the compiler to
468   * eliminiate dead code when building without v4 support.
469   */
470  #define __XFS_HAS_V4_FEAT(name, NAME) \
471  static inline bool xfs_has_ ## name (struct xfs_mount *mp) \
472  { \
473  	return !IS_ENABLED(CONFIG_XFS_SUPPORT_V4) || \
474  		(mp->m_features & XFS_FEAT_ ## NAME); \
475  }
476  
477  #define __XFS_ADD_V4_FEAT(name, NAME) \
478  	__XFS_HAS_V4_FEAT(name, NAME); \
479  static inline void xfs_add_ ## name (struct xfs_mount *mp) \
480  { \
481  	if (IS_ENABLED(CONFIG_XFS_SUPPORT_V4)) { \
482  		mp->m_features |= XFS_FEAT_ ## NAME; \
483  		xfs_sb_version_add ## name(&mp->m_sb); \
484  	} \
485  }
486  
__XFS_HAS_V4_FEAT(align,ALIGN)487  __XFS_HAS_V4_FEAT(align, ALIGN)
488  __XFS_HAS_V4_FEAT(logv2, LOGV2)
489  __XFS_HAS_V4_FEAT(extflg, EXTFLG)
490  __XFS_HAS_V4_FEAT(lazysbcount, LAZYSBCOUNT)
491  __XFS_ADD_V4_FEAT(attr2, ATTR2)
492  __XFS_ADD_V4_FEAT(projid32, PROJID32)
493  __XFS_HAS_V4_FEAT(v3inodes, V3INODES)
494  __XFS_HAS_V4_FEAT(crc, CRC)
495  __XFS_HAS_V4_FEAT(pquotino, PQUOTINO)
496  
497  /*
498   * Mount features
499   *
500   * These do not change dynamically - features that can come and go, such as 32
501   * bit inodes and read-only state, are kept as operational state rather than
502   * features.
503   */
504  __XFS_HAS_FEAT(noattr2, NOATTR2)
505  __XFS_HAS_FEAT(noalign, NOALIGN)
506  __XFS_HAS_FEAT(allocsize, ALLOCSIZE)
507  __XFS_HAS_FEAT(large_iosize, LARGE_IOSIZE)
508  __XFS_HAS_FEAT(wsync, WSYNC)
509  __XFS_HAS_FEAT(dirsync, DIRSYNC)
510  __XFS_HAS_FEAT(discard, DISCARD)
511  __XFS_HAS_FEAT(grpid, GRPID)
512  __XFS_HAS_FEAT(small_inums, SMALL_INUMS)
513  __XFS_HAS_FEAT(ikeep, IKEEP)
514  __XFS_HAS_FEAT(swalloc, SWALLOC)
515  __XFS_HAS_FEAT(filestreams, FILESTREAMS)
516  __XFS_HAS_FEAT(dax_always, DAX_ALWAYS)
517  __XFS_HAS_FEAT(dax_never, DAX_NEVER)
518  __XFS_HAS_FEAT(norecovery, NORECOVERY)
519  __XFS_HAS_FEAT(nouuid, NOUUID)
520  
521  /*
522   * Operational mount state flags
523   *
524   * Use these with atomic bit ops only!
525   */
526  #define XFS_OPSTATE_UNMOUNTING		0	/* filesystem is unmounting */
527  #define XFS_OPSTATE_CLEAN		1	/* mount was clean */
528  #define XFS_OPSTATE_SHUTDOWN		2	/* stop all fs operations */
529  #define XFS_OPSTATE_INODE32		3	/* inode32 allocator active */
530  #define XFS_OPSTATE_READONLY		4	/* read-only fs */
531  
532  /*
533   * If set, inactivation worker threads will be scheduled to process queued
534   * inodegc work.  If not, queued inodes remain in memory waiting to be
535   * processed.
536   */
537  #define XFS_OPSTATE_INODEGC_ENABLED	5
538  /*
539   * If set, background speculative prealloc gc worker threads will be scheduled
540   * to process queued blockgc work.  If not, inodes retain their preallocations
541   * until explicitly deleted.
542   */
543  #define XFS_OPSTATE_BLOCKGC_ENABLED	6
544  
545  /* Kernel has logged a warning about pNFS being used on this fs. */
546  #define XFS_OPSTATE_WARNED_PNFS		7
547  /* Kernel has logged a warning about online fsck being used on this fs. */
548  #define XFS_OPSTATE_WARNED_SCRUB	8
549  /* Kernel has logged a warning about shrink being used on this fs. */
550  #define XFS_OPSTATE_WARNED_SHRINK	9
551  /* Kernel has logged a warning about logged xattr updates being used. */
552  #define XFS_OPSTATE_WARNED_LARP		10
553  /* Mount time quotacheck is running */
554  #define XFS_OPSTATE_QUOTACHECK_RUNNING	11
555  /* Do we want to clear log incompat flags? */
556  #define XFS_OPSTATE_UNSET_LOG_INCOMPAT	12
557  /* Filesystem can use logged extended attributes */
558  #define XFS_OPSTATE_USE_LARP		13
559  /* Kernel has logged a warning about blocksize > pagesize on this fs. */
560  #define XFS_OPSTATE_WARNED_LBS		14
561  /* Kernel has logged a warning about exchange-range being used on this fs. */
562  #define XFS_OPSTATE_WARNED_EXCHRANGE	15
563  /* Kernel has logged a warning about parent pointers being used on this fs. */
564  #define XFS_OPSTATE_WARNED_PPTR		16
565  /* Kernel has logged a warning about metadata dirs being used on this fs. */
566  #define XFS_OPSTATE_WARNED_METADIR	17
567  /* Filesystem should use qflags to determine quotaon status */
568  #define XFS_OPSTATE_RESUMING_QUOTAON	18
569  /* Kernel has logged a warning about zoned RT device being used on this fs. */
570  #define XFS_OPSTATE_WARNED_ZONED	19
571  /* (Zoned) GC is in progress */
572  #define XFS_OPSTATE_ZONEGC_RUNNING	20
573  
574  #define __XFS_IS_OPSTATE(name, NAME) \
575  static inline bool xfs_is_ ## name (struct xfs_mount *mp) \
576  { \
577  	return test_bit(XFS_OPSTATE_ ## NAME, &mp->m_opstate); \
578  } \
579  static inline bool xfs_clear_ ## name (struct xfs_mount *mp) \
580  { \
581  	return test_and_clear_bit(XFS_OPSTATE_ ## NAME, &mp->m_opstate); \
582  } \
583  static inline bool xfs_set_ ## name (struct xfs_mount *mp) \
584  { \
585  	return test_and_set_bit(XFS_OPSTATE_ ## NAME, &mp->m_opstate); \
586  }
587  
588  __XFS_IS_OPSTATE(unmounting, UNMOUNTING)
589  __XFS_IS_OPSTATE(clean, CLEAN)
590  __XFS_IS_OPSTATE(shutdown, SHUTDOWN)
591  __XFS_IS_OPSTATE(inode32, INODE32)
592  __XFS_IS_OPSTATE(readonly, READONLY)
593  __XFS_IS_OPSTATE(inodegc_enabled, INODEGC_ENABLED)
594  __XFS_IS_OPSTATE(blockgc_enabled, BLOCKGC_ENABLED)
595  #ifdef CONFIG_XFS_QUOTA
596  __XFS_IS_OPSTATE(quotacheck_running, QUOTACHECK_RUNNING)
597  __XFS_IS_OPSTATE(resuming_quotaon, RESUMING_QUOTAON)
598  #else
599  static inline bool xfs_is_quotacheck_running(struct xfs_mount *mp)
600  {
601  	return false;
602  }
603  static inline bool xfs_is_resuming_quotaon(struct xfs_mount *mp)
604  {
605  	return false;
606  }
607  static inline void xfs_set_resuming_quotaon(struct xfs_mount *m)
608  {
609  }
610  static inline bool xfs_clear_resuming_quotaon(struct xfs_mount *mp)
611  {
612  	return false;
613  }
614  #endif /* CONFIG_XFS_QUOTA */
615  __XFS_IS_OPSTATE(done_with_log_incompat, UNSET_LOG_INCOMPAT)
616  __XFS_IS_OPSTATE(using_logged_xattrs, USE_LARP)
617  __XFS_IS_OPSTATE(zonegc_running, ZONEGC_RUNNING)
618  
619  static inline bool
620  xfs_should_warn(struct xfs_mount *mp, long nr)
621  {
622  	return !test_and_set_bit(nr, &mp->m_opstate);
623  }
624  
625  #define XFS_OPSTATE_STRINGS \
626  	{ (1UL << XFS_OPSTATE_UNMOUNTING),		"unmounting" }, \
627  	{ (1UL << XFS_OPSTATE_CLEAN),			"clean" }, \
628  	{ (1UL << XFS_OPSTATE_SHUTDOWN),		"shutdown" }, \
629  	{ (1UL << XFS_OPSTATE_INODE32),			"inode32" }, \
630  	{ (1UL << XFS_OPSTATE_READONLY),		"read_only" }, \
631  	{ (1UL << XFS_OPSTATE_INODEGC_ENABLED),		"inodegc" }, \
632  	{ (1UL << XFS_OPSTATE_BLOCKGC_ENABLED),		"blockgc" }, \
633  	{ (1UL << XFS_OPSTATE_WARNED_SCRUB),		"wscrub" }, \
634  	{ (1UL << XFS_OPSTATE_WARNED_SHRINK),		"wshrink" }, \
635  	{ (1UL << XFS_OPSTATE_WARNED_LARP),		"wlarp" }, \
636  	{ (1UL << XFS_OPSTATE_QUOTACHECK_RUNNING),	"quotacheck" }, \
637  	{ (1UL << XFS_OPSTATE_UNSET_LOG_INCOMPAT),	"unset_log_incompat" }, \
638  	{ (1UL << XFS_OPSTATE_USE_LARP),		"logged_xattrs" }
639  
640  /*
641   * Max and min values for mount-option defined I/O
642   * preallocation sizes.
643   */
644  #define XFS_MAX_IO_LOG		30	/* 1G */
645  #define XFS_MIN_IO_LOG		PAGE_SHIFT
646  
647  void xfs_do_force_shutdown(struct xfs_mount *mp, uint32_t flags, char *fname,
648  		int lnnum);
649  #define xfs_force_shutdown(m,f)	\
650  	xfs_do_force_shutdown(m, f, __FILE__, __LINE__)
651  
652  #define SHUTDOWN_META_IO_ERROR	(1u << 0) /* write attempt to metadata failed */
653  #define SHUTDOWN_LOG_IO_ERROR	(1u << 1) /* write attempt to the log failed */
654  #define SHUTDOWN_FORCE_UMOUNT	(1u << 2) /* shutdown from a forced unmount */
655  #define SHUTDOWN_CORRUPT_INCORE	(1u << 3) /* corrupt in-memory structures */
656  #define SHUTDOWN_CORRUPT_ONDISK	(1u << 4)  /* corrupt metadata on device */
657  #define SHUTDOWN_DEVICE_REMOVED	(1u << 5) /* device removed underneath us */
658  
659  #define XFS_SHUTDOWN_STRINGS \
660  	{ SHUTDOWN_META_IO_ERROR,	"metadata_io" }, \
661  	{ SHUTDOWN_LOG_IO_ERROR,	"log_io" }, \
662  	{ SHUTDOWN_FORCE_UMOUNT,	"force_umount" }, \
663  	{ SHUTDOWN_CORRUPT_INCORE,	"corruption" }, \
664  	{ SHUTDOWN_DEVICE_REMOVED,	"device_removed" }
665  
666  /*
667   * Flags for xfs_mountfs
668   */
669  #define XFS_MFSI_QUIET		0x40	/* Be silent if mount errors found */
670  
671  static inline xfs_agnumber_t
xfs_daddr_to_agno(struct xfs_mount * mp,xfs_daddr_t d)672  xfs_daddr_to_agno(struct xfs_mount *mp, xfs_daddr_t d)
673  {
674  	xfs_rfsblock_t ld = XFS_BB_TO_FSBT(mp, d);
675  	do_div(ld, mp->m_sb.sb_agblocks);
676  	return (xfs_agnumber_t) ld;
677  }
678  
679  static inline xfs_agblock_t
xfs_daddr_to_agbno(struct xfs_mount * mp,xfs_daddr_t d)680  xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
681  {
682  	xfs_rfsblock_t ld = XFS_BB_TO_FSBT(mp, d);
683  	return (xfs_agblock_t) do_div(ld, mp->m_sb.sb_agblocks);
684  }
685  
686  extern void	xfs_uuid_table_free(void);
687  uint64_t	xfs_default_resblks(struct xfs_mount *mp,
688  			enum xfs_free_counter ctr);
689  extern int	xfs_mountfs(xfs_mount_t *mp);
690  extern void	xfs_unmountfs(xfs_mount_t *);
691  
692  /*
693   * Deltas for the block count can vary from 1 to very large, but lock contention
694   * only occurs on frequent small block count updates such as in the delayed
695   * allocation path for buffered writes (page a time updates). Hence we set
696   * a large batch count (1024) to minimise global counter updates except when
697   * we get near to ENOSPC and we have to be very accurate with our updates.
698   */
699  #define XFS_FDBLOCKS_BATCH	1024
700  
701  uint64_t xfs_freecounter_unavailable(struct xfs_mount *mp,
702  		enum xfs_free_counter ctr);
703  
704  /*
705   * Sum up the freecount, but never return negative values.
706   */
xfs_sum_freecounter(struct xfs_mount * mp,enum xfs_free_counter ctr)707  static inline s64 xfs_sum_freecounter(struct xfs_mount *mp,
708  		enum xfs_free_counter ctr)
709  {
710  	return percpu_counter_sum_positive(&mp->m_free[ctr].count);
711  }
712  
713  /*
714   * Same as above, but does return negative values.  Mostly useful for
715   * special cases like repair and tracing.
716   */
xfs_sum_freecounter_raw(struct xfs_mount * mp,enum xfs_free_counter ctr)717  static inline s64 xfs_sum_freecounter_raw(struct xfs_mount *mp,
718  		enum xfs_free_counter ctr)
719  {
720  	return percpu_counter_sum(&mp->m_free[ctr].count);
721  }
722  
723  /*
724   * This just provides and estimate without the cpu-local updates, use
725   * xfs_sum_freecounter for the exact value.
726   */
xfs_estimate_freecounter(struct xfs_mount * mp,enum xfs_free_counter ctr)727  static inline s64 xfs_estimate_freecounter(struct xfs_mount *mp,
728  		enum xfs_free_counter ctr)
729  {
730  	return percpu_counter_read_positive(&mp->m_free[ctr].count);
731  }
732  
xfs_compare_freecounter(struct xfs_mount * mp,enum xfs_free_counter ctr,s64 rhs,s32 batch)733  static inline int xfs_compare_freecounter(struct xfs_mount *mp,
734  		enum xfs_free_counter ctr, s64 rhs, s32 batch)
735  {
736  	return __percpu_counter_compare(&mp->m_free[ctr].count, rhs, batch);
737  }
738  
xfs_set_freecounter(struct xfs_mount * mp,enum xfs_free_counter ctr,uint64_t val)739  static inline void xfs_set_freecounter(struct xfs_mount *mp,
740  		enum xfs_free_counter ctr, uint64_t val)
741  {
742  	percpu_counter_set(&mp->m_free[ctr].count, val);
743  }
744  
745  int xfs_dec_freecounter(struct xfs_mount *mp, enum xfs_free_counter ctr,
746  		uint64_t delta, bool rsvd);
747  void xfs_add_freecounter(struct xfs_mount *mp, enum xfs_free_counter ctr,
748  		uint64_t delta);
749  
xfs_dec_fdblocks(struct xfs_mount * mp,uint64_t delta,bool reserved)750  static inline int xfs_dec_fdblocks(struct xfs_mount *mp, uint64_t delta,
751  		bool reserved)
752  {
753  	return xfs_dec_freecounter(mp, XC_FREE_BLOCKS, delta, reserved);
754  }
755  
xfs_add_fdblocks(struct xfs_mount * mp,uint64_t delta)756  static inline void xfs_add_fdblocks(struct xfs_mount *mp, uint64_t delta)
757  {
758  	xfs_add_freecounter(mp, XC_FREE_BLOCKS, delta);
759  }
760  
xfs_dec_frextents(struct xfs_mount * mp,uint64_t delta)761  static inline int xfs_dec_frextents(struct xfs_mount *mp, uint64_t delta)
762  {
763  	return xfs_dec_freecounter(mp, XC_FREE_RTEXTENTS, delta, false);
764  }
765  
xfs_add_frextents(struct xfs_mount * mp,uint64_t delta)766  static inline void xfs_add_frextents(struct xfs_mount *mp, uint64_t delta)
767  {
768  	xfs_add_freecounter(mp, XC_FREE_RTEXTENTS, delta);
769  }
770  
771  extern int	xfs_readsb(xfs_mount_t *, int);
772  extern void	xfs_freesb(xfs_mount_t *);
773  extern bool	xfs_fs_writable(struct xfs_mount *mp, int level);
774  extern int	xfs_sb_validate_fsb_count(struct xfs_sb *, uint64_t);
775  
776  extern int	xfs_dev_is_read_only(struct xfs_mount *, char *);
777  
778  extern void	xfs_set_low_space_thresholds(struct xfs_mount *);
779  
780  int	xfs_zero_extent(struct xfs_inode *ip, xfs_fsblock_t start_fsb,
781  			xfs_off_t count_fsb);
782  
783  struct xfs_error_cfg * xfs_error_get_cfg(struct xfs_mount *mp,
784  		int error_class, int error);
785  void xfs_force_summary_recalc(struct xfs_mount *mp);
786  int xfs_add_incompat_log_feature(struct xfs_mount *mp, uint32_t feature);
787  bool xfs_clear_incompat_log_features(struct xfs_mount *mp);
788  void xfs_mod_delalloc(struct xfs_inode *ip, int64_t data_delta,
789  		int64_t ind_delta);
xfs_mod_sb_delalloc(struct xfs_mount * mp,int64_t delta)790  static inline void xfs_mod_sb_delalloc(struct xfs_mount *mp, int64_t delta)
791  {
792  	percpu_counter_add(&mp->m_delalloc_blks, delta);
793  }
794  
795  #endif	/* __XFS_MOUNT_H__ */
796