xref: /linux/fs/xfs/xfs_mount.h (revision a48373e7d35a89f6f9b39f0d0da9bf158af054ee)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #ifndef __XFS_MOUNT_H__
7 #define	__XFS_MOUNT_H__
8 
9 struct xlog;
10 struct xfs_inode;
11 struct xfs_mru_cache;
12 struct xfs_ail;
13 struct xfs_quotainfo;
14 struct xfs_da_geometry;
15 struct xfs_perag;
16 struct xfs_healthmon;
17 
18 /* dynamic preallocation free space thresholds, 5% down to 1% */
19 enum {
20 	XFS_LOWSP_1_PCNT = 0,
21 	XFS_LOWSP_2_PCNT,
22 	XFS_LOWSP_3_PCNT,
23 	XFS_LOWSP_4_PCNT,
24 	XFS_LOWSP_5_PCNT,
25 	XFS_LOWSP_MAX,
26 };
27 
28 /*
29  * Error Configuration
30  *
31  * Error classes define the subsystem the configuration belongs to.
32  * Error numbers define the errors that are configurable.
33  */
34 enum {
35 	XFS_ERR_METADATA,
36 	XFS_ERR_CLASS_MAX,
37 };
38 enum {
39 	XFS_ERR_DEFAULT,
40 	XFS_ERR_EIO,
41 	XFS_ERR_ENOSPC,
42 	XFS_ERR_ENODEV,
43 	XFS_ERR_ERRNO_MAX,
44 };
45 
46 #define XFS_ERR_RETRY_FOREVER	-1
47 
48 /*
49  * Although retry_timeout is in jiffies which is normally an unsigned long,
50  * we limit the retry timeout to 86400 seconds, or one day.  So even a
51  * signed 32-bit long is sufficient for a HZ value up to 24855.  Making it
52  * signed lets us store the special "-1" value, meaning retry forever.
53  */
54 struct xfs_error_cfg {
55 	struct xfs_kobj	kobj;
56 	int		max_retries;
57 	long		retry_timeout;	/* in jiffies, -1 = infinite */
58 };
59 
60 /*
61  * Per-cpu deferred inode inactivation GC lists.
62  */
63 struct xfs_inodegc {
64 	struct xfs_mount	*mp;
65 	struct llist_head	list;
66 	struct delayed_work	work;
67 	int			error;
68 
69 	/* approximate count of inodes in the list */
70 	unsigned int		items;
71 	unsigned int		shrinker_hits;
72 	unsigned int		cpu;
73 };
74 
75 /*
76  * Container for each type of groups, used to look up individual groups and
77  * describes the geometry.
78  */
79 struct xfs_groups {
80 	struct xarray		xa;
81 
82 	/*
83 	 * Maximum capacity of the group in FSBs.
84 	 *
85 	 * Each group is laid out densely in the daddr space.  For the
86 	 * degenerate case of a pre-rtgroups filesystem, the incore rtgroup
87 	 * pretends to have a zero-block and zero-blklog rtgroup.
88 	 */
89 	uint32_t		blocks;
90 
91 	/*
92 	 * Log(2) of the logical size of each group.
93 	 *
94 	 * Compared to the blocks field above this is rounded up to the next
95 	 * power of two, and thus lays out the xfs_fsblock_t/xfs_rtblock_t
96 	 * space sparsely with a hole from blocks to (1 << blklog) at the end
97 	 * of each group.
98 	 */
99 	uint8_t			blklog;
100 
101 	/*
102 	 * Zoned devices can have gaps beyond the usable capacity of a zone and
103 	 * the end in the LBA/daddr address space.  In other words, the hardware
104 	 * equivalent to the RT groups already takes care of the power of 2
105 	 * alignment for us.  In this case the sparse FSB/RTB address space maps
106 	 * 1:1 to the device address space.
107 	 */
108 	bool			has_daddr_gaps;
109 
110 	/*
111 	 * Mask to extract the group-relative block number from a FSB.
112 	 * For a pre-rtgroups filesystem we pretend to have one very large
113 	 * rtgroup, so this mask must be 64-bit.
114 	 */
115 	uint64_t		blkmask;
116 
117 	/*
118 	 * Start of the first group in the device.  This is used to support a
119 	 * RT device following the data device on the same block device for
120 	 * SMR hard drives.
121 	 */
122 	xfs_fsblock_t		start_fsb;
123 
124 	/*
125 	 * Maximum length of an atomic write for files stored in this
126 	 * collection of allocation groups, in fsblocks.
127 	 */
128 	xfs_extlen_t		awu_max;
129 };
130 
131 struct xfs_freecounter {
132 	/* free blocks for general use: */
133 	struct percpu_counter	count;
134 
135 	/* total reserved blocks: */
136 	uint64_t		res_total;
137 
138 	/* available reserved blocks: */
139 	uint64_t		res_avail;
140 
141 	/* reserved blks @ remount,ro: */
142 	uint64_t		res_saved;
143 };
144 
145 /*
146  * The struct xfsmount layout is optimised to separate read-mostly variables
147  * from variables that are frequently modified. We put the read-mostly variables
148  * first, then place all the other variables at the end.
149  *
150  * Typically, read-mostly variables are those that are set at mount time and
151  * never changed again, or only change rarely as a result of things like sysfs
152  * knobs being tweaked.
153  */
154 typedef struct xfs_mount {
155 	struct xfs_sb		m_sb;		/* copy of fs superblock */
156 	struct super_block	*m_super;
157 	struct xfs_ail		*m_ail;		/* fs active log item list */
158 	struct xfs_buf		*m_sb_bp;	/* buffer for superblock */
159 	struct xfs_buf		*m_rtsb_bp;	/* realtime superblock */
160 	char			*m_rtname;	/* realtime device name */
161 	char			*m_logname;	/* external log device name */
162 	struct xfs_da_geometry	*m_dir_geo;	/* directory block geometry */
163 	struct xfs_da_geometry	*m_attr_geo;	/* attribute block geometry */
164 	struct xlog		*m_log;		/* log specific stuff */
165 	struct xfs_inode	*m_rootip;	/* pointer to root directory */
166 	struct xfs_inode	*m_metadirip;	/* ptr to metadata directory */
167 	struct xfs_inode	*m_rtdirip;	/* ptr to realtime metadir */
168 	struct xfs_quotainfo	*m_quotainfo;	/* disk quota information */
169 	struct xfs_buftarg	*m_ddev_targp;	/* data device */
170 	struct xfs_buftarg	*m_logdev_targp;/* log device */
171 	struct xfs_buftarg	*m_rtdev_targp;	/* rt device */
172 	void __percpu		*m_inodegc;	/* percpu inodegc structures */
173 	struct xfs_mru_cache	*m_filestream;  /* per-mount filestream data */
174 	struct workqueue_struct *m_buf_workqueue;
175 	struct workqueue_struct	*m_unwritten_workqueue;
176 	struct workqueue_struct	*m_reclaim_workqueue;
177 	struct workqueue_struct	*m_sync_workqueue;
178 	struct workqueue_struct *m_blockgc_wq;
179 	struct workqueue_struct *m_inodegc_wq;
180 
181 	int			m_bsize;	/* fs logical block size */
182 	uint8_t			m_blkbit_log;	/* blocklog + NBBY */
183 	uint8_t			m_blkbb_log;	/* blocklog - BBSHIFT */
184 	uint8_t			m_agno_log;	/* log #ag's */
185 	uint8_t			m_sectbb_log;	/* sectlog - BBSHIFT */
186 	int8_t			m_rtxblklog;	/* log2 of rextsize, if possible */
187 
188 	uint			m_blockmask;	/* sb_blocksize-1 */
189 	uint			m_blockwsize;	/* sb_blocksize in words */
190 	/* number of rt extents per rt bitmap block if rtgroups enabled */
191 	unsigned int		m_rtx_per_rbmblock;
192 	uint			m_alloc_mxr[2];	/* max alloc btree records */
193 	uint			m_alloc_mnr[2];	/* min alloc btree records */
194 	uint			m_bmap_dmxr[2];	/* max bmap btree records */
195 	uint			m_bmap_dmnr[2];	/* min bmap btree records */
196 	uint			m_rmap_mxr[2];	/* max rmap btree records */
197 	uint			m_rmap_mnr[2];	/* min rmap btree records */
198 	uint			m_rtrmap_mxr[2]; /* max rtrmap btree records */
199 	uint			m_rtrmap_mnr[2]; /* min rtrmap btree records */
200 	uint			m_refc_mxr[2];	/* max refc btree records */
201 	uint			m_refc_mnr[2];	/* min refc btree records */
202 	uint			m_rtrefc_mxr[2]; /* max rtrefc btree records */
203 	uint			m_rtrefc_mnr[2]; /* min rtrefc btree records */
204 	uint			m_alloc_maxlevels; /* max alloc btree levels */
205 	uint			m_bm_maxlevels[2]; /* max bmap btree levels */
206 	uint			m_rmap_maxlevels; /* max rmap btree levels */
207 	uint			m_rtrmap_maxlevels; /* max rtrmap btree level */
208 	uint			m_refc_maxlevels; /* max refcount btree level */
209 	uint			m_rtrefc_maxlevels; /* max rtrefc btree level */
210 	unsigned int		m_agbtree_maxlevels; /* max level of all AG btrees */
211 	unsigned int		m_rtbtree_maxlevels; /* max level of all rt btrees */
212 	xfs_extlen_t		m_ag_prealloc_blocks; /* reserved ag blocks */
213 	uint			m_alloc_set_aside; /* space we can't use */
214 	uint			m_ag_max_usable; /* max space per AG */
215 	int			m_dalign;	/* stripe unit */
216 	int			m_swidth;	/* stripe width */
217 	xfs_agnumber_t		m_maxagi;	/* highest inode alloc group */
218 	uint			m_allocsize_log;/* min write size log bytes */
219 	uint			m_allocsize_blocks; /* min write size blocks */
220 	int			m_logbufs;	/* number of log buffers */
221 	int			m_logbsize;	/* size of each log buffer */
222 	unsigned int		m_rsumlevels;	/* rt summary levels */
223 	xfs_filblks_t		m_rsumblocks;	/* size of rt summary, FSBs */
224 	int			m_fixedfsid[2];	/* unchanged for life of FS */
225 	uint			m_qflags;	/* quota status flags */
226 	uint64_t		m_features;	/* active filesystem features */
227 	uint64_t		m_low_space[XFS_LOWSP_MAX];
228 	uint64_t		m_low_rtexts[XFS_LOWSP_MAX];
229 	uint64_t		m_rtxblkmask;	/* rt extent block mask */
230 	struct xfs_ino_geometry	m_ino_geo;	/* inode geometry */
231 	struct xfs_trans_resv	m_resv;		/* precomputed res values */
232 						/* low free space thresholds */
233 	unsigned long		m_opstate;	/* dynamic state flags */
234 	bool			m_always_cow;
235 	bool			m_fail_unmount;
236 	bool			m_finobt_nores; /* no per-AG finobt resv. */
237 	bool			m_update_sb;	/* sb needs update in mount */
238 	unsigned int		m_max_open_zones;
239 	unsigned int		m_zonegc_low_space;
240 
241 	/* max_atomic_write mount option value */
242 	unsigned long long	m_awu_max_bytes;
243 
244 	/*
245 	 * Bitsets of per-fs metadata that have been checked and/or are sick.
246 	 * Callers must hold m_sb_lock to access these two fields.
247 	 */
248 	uint8_t			m_fs_checked;
249 	uint8_t			m_fs_sick;
250 	/*
251 	 * Bitsets of rt metadata that have been checked and/or are sick.
252 	 * Callers must hold m_sb_lock to access this field.
253 	 */
254 	uint8_t			m_rt_checked;
255 	uint8_t			m_rt_sick;
256 
257 	/*
258 	 * End of read-mostly variables. Frequently written variables and locks
259 	 * should be placed below this comment from now on. The first variable
260 	 * here is marked as cacheline aligned so they it is separated from
261 	 * the read-mostly variables.
262 	 */
263 
264 	spinlock_t ____cacheline_aligned m_sb_lock; /* sb counter lock */
265 	struct percpu_counter	m_icount;	/* allocated inodes counter */
266 	struct percpu_counter	m_ifree;	/* free inodes counter */
267 
268 	struct xfs_freecounter	m_free[XC_FREE_NR];
269 
270 	/*
271 	 * Count of data device blocks reserved for delayed allocations,
272 	 * including indlen blocks.  Does not include allocated CoW staging
273 	 * extents or anything related to the rt device.
274 	 */
275 	struct percpu_counter	m_delalloc_blks;
276 
277 	/*
278 	 * RT version of the above.
279 	 */
280 	struct percpu_counter	m_delalloc_rtextents;
281 
282 	/*
283 	 * Global count of allocation btree blocks in use across all AGs. Only
284 	 * used when perag reservation is enabled. Helps prevent block
285 	 * reservation from attempting to reserve allocation btree blocks.
286 	 */
287 	atomic64_t		m_allocbt_blks;
288 
289 	struct xfs_groups	m_groups[XG_TYPE_MAX];
290 	struct delayed_work	m_reclaim_work;	/* background inode reclaim */
291 	struct xfs_zone_info	*m_zone_info;	/* zone allocator information */
292 	struct dentry		*m_debugfs;	/* debugfs parent */
293 	struct xfs_kobj		m_kobj;
294 	struct xfs_kobj		m_error_kobj;
295 	struct xfs_kobj		m_error_meta_kobj;
296 	struct xfs_error_cfg	m_error_cfg[XFS_ERR_CLASS_MAX][XFS_ERR_ERRNO_MAX];
297 	struct xstats		m_stats;	/* per-fs stats */
298 #ifdef CONFIG_XFS_ONLINE_SCRUB_STATS
299 	struct xchk_stats	*m_scrub_stats;
300 #endif
301 	struct xfs_kobj		m_zoned_kobj;
302 	xfs_agnumber_t		m_agfrotor;	/* last ag where space found */
303 	atomic_t		m_agirotor;	/* last ag dir inode alloced */
304 	atomic_t		m_rtgrotor;	/* last rtgroup rtpicked */
305 
306 	struct mutex		m_metafile_resv_lock;
307 	uint64_t		m_metafile_resv_target;
308 	uint64_t		m_metafile_resv_used;
309 	uint64_t		m_metafile_resv_avail;
310 
311 	/* Memory shrinker to throttle and reprioritize inodegc */
312 	struct shrinker		*m_inodegc_shrinker;
313 	/*
314 	 * Workqueue item so that we can coalesce multiple inode flush attempts
315 	 * into a single flush.
316 	 */
317 	struct work_struct	m_flush_inodes_work;
318 
319 	/*
320 	 * Generation of the filesysyem layout.  This is incremented by each
321 	 * growfs, and used by the pNFS server to ensure the client updates
322 	 * its view of the block device once it gets a layout that might
323 	 * reference the newly added blocks.  Does not need to be persistent
324 	 * as long as we only allow file system size increments, but if we
325 	 * ever support shrinks it would have to be persisted in addition
326 	 * to various other kinds of pain inflicted on the pNFS server.
327 	 */
328 	uint32_t		m_generation;
329 	struct mutex		m_growlock;	/* growfs mutex */
330 
331 #ifdef DEBUG
332 	/*
333 	 * Frequency with which errors are injected.  Replaces xfs_etest; the
334 	 * value stored in here is the inverse of the frequency with which the
335 	 * error triggers.  1 = always, 2 = half the time, etc.
336 	 */
337 	unsigned int		*m_errortag;
338 	struct xfs_kobj		m_errortag_kobj;
339 #endif
340 
341 	/* cpus that have inodes queued for inactivation */
342 	struct cpumask		m_inodegc_cpumask;
343 
344 	/* Hook to feed dirent updates to an active online repair. */
345 	struct xfs_hooks	m_dir_update_hooks;
346 
347 	/* Private data referring to a health monitor object. */
348 	struct xfs_healthmon	*m_healthmon;
349 } xfs_mount_t;
350 
351 #define M_IGEO(mp)		(&(mp)->m_ino_geo)
352 
353 /*
354  * Flags for m_features.
355  *
356  * These are all the active features in the filesystem, regardless of how
357  * they are configured.
358  */
359 #define XFS_FEAT_ATTR		(1ULL << 0)	/* xattrs present in fs */
360 #define XFS_FEAT_NLINK		(1ULL << 1)	/* 32 bit link counts */
361 #define XFS_FEAT_QUOTA		(1ULL << 2)	/* quota active */
362 #define XFS_FEAT_ALIGN		(1ULL << 3)	/* inode alignment */
363 #define XFS_FEAT_DALIGN		(1ULL << 4)	/* data alignment */
364 #define XFS_FEAT_LOGV2		(1ULL << 5)	/* version 2 logs */
365 #define XFS_FEAT_SECTOR		(1ULL << 6)	/* sector size > 512 bytes */
366 #define XFS_FEAT_EXTFLG		(1ULL << 7)	/* unwritten extents */
367 #define XFS_FEAT_ASCIICI	(1ULL << 8)	/* ASCII only case-insens. */
368 #define XFS_FEAT_LAZYSBCOUNT	(1ULL << 9)	/* Superblk counters */
369 #define XFS_FEAT_PARENT		(1ULL << 11)	/* parent pointers */
370 #define XFS_FEAT_PROJID32	(1ULL << 12)	/* 32 bit project id */
371 #define XFS_FEAT_CRC		(1ULL << 13)	/* metadata CRCs */
372 #define XFS_FEAT_V3INODES	(1ULL << 14)	/* Version 3 inodes */
373 #define XFS_FEAT_PQUOTINO	(1ULL << 15)	/* non-shared proj/grp quotas */
374 #define XFS_FEAT_FTYPE		(1ULL << 16)	/* inode type in dir */
375 #define XFS_FEAT_FINOBT		(1ULL << 17)	/* free inode btree */
376 #define XFS_FEAT_RMAPBT		(1ULL << 18)	/* reverse map btree */
377 #define XFS_FEAT_REFLINK	(1ULL << 19)	/* reflinked files */
378 #define XFS_FEAT_SPINODES	(1ULL << 20)	/* sparse inode chunks */
379 #define XFS_FEAT_META_UUID	(1ULL << 21)	/* metadata UUID */
380 #define XFS_FEAT_REALTIME	(1ULL << 22)	/* realtime device present */
381 #define XFS_FEAT_INOBTCNT	(1ULL << 23)	/* inobt block counts */
382 #define XFS_FEAT_BIGTIME	(1ULL << 24)	/* large timestamps */
383 #define XFS_FEAT_NEEDSREPAIR	(1ULL << 25)	/* needs xfs_repair */
384 #define XFS_FEAT_NREXT64	(1ULL << 26)	/* large extent counters */
385 #define XFS_FEAT_EXCHANGE_RANGE	(1ULL << 27)	/* exchange range */
386 #define XFS_FEAT_METADIR	(1ULL << 28)	/* metadata directory tree */
387 #define XFS_FEAT_ZONED		(1ULL << 29)	/* zoned RT device */
388 
389 /* Mount features */
390 #define XFS_FEAT_NOLIFETIME	(1ULL << 47)	/* disable lifetime hints */
391 #define XFS_FEAT_NOALIGN	(1ULL << 49)	/* ignore alignment */
392 #define XFS_FEAT_ALLOCSIZE	(1ULL << 50)	/* user specified allocation size */
393 #define XFS_FEAT_LARGE_IOSIZE	(1ULL << 51)	/* report large preferred
394 						 * I/O size in stat() */
395 #define XFS_FEAT_WSYNC		(1ULL << 52)	/* synchronous metadata ops */
396 #define XFS_FEAT_DIRSYNC	(1ULL << 53)	/* synchronous directory ops */
397 #define XFS_FEAT_DISCARD	(1ULL << 54)	/* discard unused blocks */
398 #define XFS_FEAT_GRPID		(1ULL << 55)	/* group-ID assigned from directory */
399 #define XFS_FEAT_SMALL_INUMS	(1ULL << 56)	/* user wants 32bit inodes */
400 #define XFS_FEAT_SWALLOC	(1ULL << 58)	/* stripe width allocation */
401 #define XFS_FEAT_FILESTREAMS	(1ULL << 59)	/* use filestreams allocator */
402 #define XFS_FEAT_DAX_ALWAYS	(1ULL << 60)	/* DAX always enabled */
403 #define XFS_FEAT_DAX_NEVER	(1ULL << 61)	/* DAX never enabled */
404 #define XFS_FEAT_NORECOVERY	(1ULL << 62)	/* no recovery - dirty fs */
405 #define XFS_FEAT_NOUUID		(1ULL << 63)	/* ignore uuid during mount */
406 
407 #define __XFS_HAS_FEAT(name, NAME) \
408 static inline bool xfs_has_ ## name (const struct xfs_mount *mp) \
409 { \
410 	return mp->m_features & XFS_FEAT_ ## NAME; \
411 }
412 
413 /* Some features can be added dynamically so they need a set wrapper, too. */
414 #define __XFS_ADD_FEAT(name, NAME) \
415 	__XFS_HAS_FEAT(name, NAME); \
416 static inline void xfs_add_ ## name (struct xfs_mount *mp) \
417 { \
418 	mp->m_features |= XFS_FEAT_ ## NAME; \
419 	xfs_sb_version_add ## name(&mp->m_sb); \
420 }
421 
422 /* Superblock features */
423 __XFS_ADD_FEAT(attr, ATTR)
424 __XFS_HAS_FEAT(nlink, NLINK)
425 __XFS_ADD_FEAT(quota, QUOTA)
426 __XFS_HAS_FEAT(dalign, DALIGN)
427 __XFS_HAS_FEAT(sector, SECTOR)
428 __XFS_HAS_FEAT(asciici, ASCIICI)
429 __XFS_HAS_FEAT(parent, PARENT)
430 __XFS_HAS_FEAT(ftype, FTYPE)
431 __XFS_HAS_FEAT(finobt, FINOBT)
432 __XFS_HAS_FEAT(rmapbt, RMAPBT)
433 __XFS_HAS_FEAT(reflink, REFLINK)
434 __XFS_HAS_FEAT(sparseinodes, SPINODES)
435 __XFS_HAS_FEAT(metauuid, META_UUID)
436 __XFS_HAS_FEAT(realtime, REALTIME)
437 __XFS_HAS_FEAT(inobtcounts, INOBTCNT)
438 __XFS_HAS_FEAT(bigtime, BIGTIME)
439 __XFS_HAS_FEAT(needsrepair, NEEDSREPAIR)
440 __XFS_HAS_FEAT(large_extent_counts, NREXT64)
441 __XFS_HAS_FEAT(exchange_range, EXCHANGE_RANGE)
442 __XFS_HAS_FEAT(metadir, METADIR)
443 __XFS_HAS_FEAT(zoned, ZONED)
444 __XFS_HAS_FEAT(nolifetime, NOLIFETIME)
445 
446 static inline bool xfs_has_rtgroups(const struct xfs_mount *mp)
447 {
448 	/* all metadir file systems also allow rtgroups */
449 	return xfs_has_metadir(mp);
450 }
451 
452 static inline bool xfs_has_rtsb(const struct xfs_mount *mp)
453 {
454 	/* all rtgroups filesystems with an rt section have an rtsb */
455 	return xfs_has_rtgroups(mp) &&
456 		xfs_has_realtime(mp) &&
457 		!xfs_has_zoned(mp);
458 }
459 
460 static inline bool xfs_has_rtrmapbt(const struct xfs_mount *mp)
461 {
462 	return xfs_has_rtgroups(mp) && xfs_has_realtime(mp) &&
463 	       xfs_has_rmapbt(mp);
464 }
465 
466 static inline bool xfs_has_rtreflink(const struct xfs_mount *mp)
467 {
468 	return xfs_has_metadir(mp) && xfs_has_realtime(mp) &&
469 	       xfs_has_reflink(mp);
470 }
471 
472 static inline bool xfs_has_nonzoned(const struct xfs_mount *mp)
473 {
474 	return !xfs_has_zoned(mp);
475 }
476 
477 static inline bool xfs_can_sw_atomic_write(struct xfs_mount *mp)
478 {
479 	return xfs_has_reflink(mp);
480 }
481 
482 /*
483  * Some features are always on for v5 file systems, allow the compiler to
484  * eliminiate dead code when building without v4 support.
485  */
486 #define __XFS_HAS_V4_FEAT(name, NAME) \
487 static inline bool xfs_has_ ## name (struct xfs_mount *mp) \
488 { \
489 	return !IS_ENABLED(CONFIG_XFS_SUPPORT_V4) || \
490 		(mp->m_features & XFS_FEAT_ ## NAME); \
491 }
492 
493 #define __XFS_ADD_V4_FEAT(name, NAME) \
494 	__XFS_HAS_V4_FEAT(name, NAME); \
495 static inline void xfs_add_ ## name (struct xfs_mount *mp) \
496 { \
497 	if (IS_ENABLED(CONFIG_XFS_SUPPORT_V4)) { \
498 		mp->m_features |= XFS_FEAT_ ## NAME; \
499 		xfs_sb_version_add ## name(&mp->m_sb); \
500 	} \
501 }
502 
503 __XFS_HAS_V4_FEAT(align, ALIGN)
504 __XFS_HAS_V4_FEAT(logv2, LOGV2)
505 __XFS_HAS_V4_FEAT(extflg, EXTFLG)
506 __XFS_HAS_V4_FEAT(lazysbcount, LAZYSBCOUNT)
507 __XFS_ADD_V4_FEAT(projid32, PROJID32)
508 __XFS_HAS_V4_FEAT(v3inodes, V3INODES)
509 __XFS_HAS_V4_FEAT(crc, CRC)
510 __XFS_HAS_V4_FEAT(pquotino, PQUOTINO)
511 
512 static inline void xfs_add_attr2(struct xfs_mount *mp)
513 {
514 	if (IS_ENABLED(CONFIG_XFS_SUPPORT_V4))
515 		xfs_sb_version_addattr2(&mp->m_sb);
516 }
517 
518 /*
519  * Mount features
520  *
521  * These do not change dynamically - features that can come and go, such as 32
522  * bit inodes and read-only state, are kept as operational state rather than
523  * features.
524  */
525 __XFS_HAS_FEAT(noalign, NOALIGN)
526 __XFS_HAS_FEAT(allocsize, ALLOCSIZE)
527 __XFS_HAS_FEAT(large_iosize, LARGE_IOSIZE)
528 __XFS_HAS_FEAT(wsync, WSYNC)
529 __XFS_HAS_FEAT(dirsync, DIRSYNC)
530 __XFS_HAS_FEAT(discard, DISCARD)
531 __XFS_HAS_FEAT(grpid, GRPID)
532 __XFS_HAS_FEAT(small_inums, SMALL_INUMS)
533 __XFS_HAS_FEAT(swalloc, SWALLOC)
534 __XFS_HAS_FEAT(filestreams, FILESTREAMS)
535 __XFS_HAS_FEAT(dax_always, DAX_ALWAYS)
536 __XFS_HAS_FEAT(dax_never, DAX_NEVER)
537 __XFS_HAS_FEAT(norecovery, NORECOVERY)
538 __XFS_HAS_FEAT(nouuid, NOUUID)
539 
540 /*
541  * Operational mount state flags
542  *
543  * Use these with atomic bit ops only!
544  */
545 #define XFS_OPSTATE_UNMOUNTING		0	/* filesystem is unmounting */
546 #define XFS_OPSTATE_CLEAN		1	/* mount was clean */
547 #define XFS_OPSTATE_SHUTDOWN		2	/* stop all fs operations */
548 #define XFS_OPSTATE_INODE32		3	/* inode32 allocator active */
549 #define XFS_OPSTATE_READONLY		4	/* read-only fs */
550 
551 /*
552  * If set, inactivation worker threads will be scheduled to process queued
553  * inodegc work.  If not, queued inodes remain in memory waiting to be
554  * processed.
555  */
556 #define XFS_OPSTATE_INODEGC_ENABLED	5
557 /*
558  * If set, background speculative prealloc gc worker threads will be scheduled
559  * to process queued blockgc work.  If not, inodes retain their preallocations
560  * until explicitly deleted.
561  */
562 #define XFS_OPSTATE_BLOCKGC_ENABLED	6
563 
564 /* Kernel has logged a warning about shrink being used on this fs. */
565 #define XFS_OPSTATE_WARNED_SHRINK	9
566 /* Kernel has logged a warning about logged xattr updates being used. */
567 #define XFS_OPSTATE_WARNED_LARP		10
568 /* Mount time quotacheck is running */
569 #define XFS_OPSTATE_QUOTACHECK_RUNNING	11
570 /* Do we want to clear log incompat flags? */
571 #define XFS_OPSTATE_UNSET_LOG_INCOMPAT	12
572 /* Filesystem can use logged extended attributes */
573 #define XFS_OPSTATE_USE_LARP		13
574 /* Kernel has logged a warning about blocksize > pagesize on this fs. */
575 #define XFS_OPSTATE_WARNED_LBS		14
576 /* Kernel has logged a warning about metadata dirs being used on this fs. */
577 #define XFS_OPSTATE_WARNED_METADIR	17
578 /* Filesystem should use qflags to determine quotaon status */
579 #define XFS_OPSTATE_RESUMING_QUOTAON	18
580 /* Kernel has logged a warning about zoned RT device being used on this fs. */
581 #define XFS_OPSTATE_WARNED_ZONED	19
582 /* (Zoned) GC is in progress */
583 #define XFS_OPSTATE_ZONEGC_RUNNING	20
584 
585 #define __XFS_IS_OPSTATE(name, NAME) \
586 static inline bool xfs_is_ ## name (struct xfs_mount *mp) \
587 { \
588 	return test_bit(XFS_OPSTATE_ ## NAME, &mp->m_opstate); \
589 } \
590 static inline bool xfs_clear_ ## name (struct xfs_mount *mp) \
591 { \
592 	return test_and_clear_bit(XFS_OPSTATE_ ## NAME, &mp->m_opstate); \
593 } \
594 static inline bool xfs_set_ ## name (struct xfs_mount *mp) \
595 { \
596 	return test_and_set_bit(XFS_OPSTATE_ ## NAME, &mp->m_opstate); \
597 }
598 
599 __XFS_IS_OPSTATE(unmounting, UNMOUNTING)
600 __XFS_IS_OPSTATE(clean, CLEAN)
601 __XFS_IS_OPSTATE(shutdown, SHUTDOWN)
602 __XFS_IS_OPSTATE(inode32, INODE32)
603 __XFS_IS_OPSTATE(readonly, READONLY)
604 __XFS_IS_OPSTATE(inodegc_enabled, INODEGC_ENABLED)
605 __XFS_IS_OPSTATE(blockgc_enabled, BLOCKGC_ENABLED)
606 #ifdef CONFIG_XFS_QUOTA
607 __XFS_IS_OPSTATE(quotacheck_running, QUOTACHECK_RUNNING)
608 __XFS_IS_OPSTATE(resuming_quotaon, RESUMING_QUOTAON)
609 #else
610 static inline bool xfs_is_quotacheck_running(struct xfs_mount *mp)
611 {
612 	return false;
613 }
614 static inline bool xfs_is_resuming_quotaon(struct xfs_mount *mp)
615 {
616 	return false;
617 }
618 static inline void xfs_set_resuming_quotaon(struct xfs_mount *m)
619 {
620 }
621 static inline bool xfs_clear_resuming_quotaon(struct xfs_mount *mp)
622 {
623 	return false;
624 }
625 #endif /* CONFIG_XFS_QUOTA */
626 __XFS_IS_OPSTATE(done_with_log_incompat, UNSET_LOG_INCOMPAT)
627 __XFS_IS_OPSTATE(using_logged_xattrs, USE_LARP)
628 __XFS_IS_OPSTATE(zonegc_running, ZONEGC_RUNNING)
629 
630 static inline bool
631 xfs_should_warn(struct xfs_mount *mp, long nr)
632 {
633 	return !test_and_set_bit(nr, &mp->m_opstate);
634 }
635 
636 #define XFS_OPSTATE_STRINGS \
637 	{ (1UL << XFS_OPSTATE_UNMOUNTING),		"unmounting" }, \
638 	{ (1UL << XFS_OPSTATE_CLEAN),			"clean" }, \
639 	{ (1UL << XFS_OPSTATE_SHUTDOWN),		"shutdown" }, \
640 	{ (1UL << XFS_OPSTATE_INODE32),			"inode32" }, \
641 	{ (1UL << XFS_OPSTATE_READONLY),		"read_only" }, \
642 	{ (1UL << XFS_OPSTATE_INODEGC_ENABLED),		"inodegc" }, \
643 	{ (1UL << XFS_OPSTATE_BLOCKGC_ENABLED),		"blockgc" }, \
644 	{ (1UL << XFS_OPSTATE_WARNED_SHRINK),		"wshrink" }, \
645 	{ (1UL << XFS_OPSTATE_WARNED_LARP),		"wlarp" }, \
646 	{ (1UL << XFS_OPSTATE_QUOTACHECK_RUNNING),	"quotacheck" }, \
647 	{ (1UL << XFS_OPSTATE_UNSET_LOG_INCOMPAT),	"unset_log_incompat" }, \
648 	{ (1UL << XFS_OPSTATE_USE_LARP),		"logged_xattrs" }
649 
650 /*
651  * Max and min values for mount-option defined I/O
652  * preallocation sizes.
653  */
654 #define XFS_MAX_IO_LOG		30	/* 1G */
655 #define XFS_MIN_IO_LOG		PAGE_SHIFT
656 
657 void xfs_do_force_shutdown(struct xfs_mount *mp, uint32_t flags, char *fname,
658 		int lnnum);
659 #define xfs_force_shutdown(m,f)	\
660 	xfs_do_force_shutdown(m, f, __FILE__, __LINE__)
661 
662 #define SHUTDOWN_META_IO_ERROR	(1u << 0) /* write attempt to metadata failed */
663 #define SHUTDOWN_LOG_IO_ERROR	(1u << 1) /* write attempt to the log failed */
664 #define SHUTDOWN_FORCE_UMOUNT	(1u << 2) /* shutdown from a forced unmount */
665 #define SHUTDOWN_CORRUPT_INCORE	(1u << 3) /* corrupt in-memory structures */
666 #define SHUTDOWN_CORRUPT_ONDISK	(1u << 4)  /* corrupt metadata on device */
667 #define SHUTDOWN_DEVICE_REMOVED	(1u << 5) /* device removed underneath us */
668 
669 #define XFS_SHUTDOWN_STRINGS \
670 	{ SHUTDOWN_META_IO_ERROR,	"metadata_io" }, \
671 	{ SHUTDOWN_LOG_IO_ERROR,	"log_io" }, \
672 	{ SHUTDOWN_FORCE_UMOUNT,	"force_umount" }, \
673 	{ SHUTDOWN_CORRUPT_INCORE,	"corruption" }, \
674 	{ SHUTDOWN_DEVICE_REMOVED,	"device_removed" }
675 
676 /*
677  * Flags for xfs_mountfs
678  */
679 #define XFS_MFSI_QUIET		0x40	/* Be silent if mount errors found */
680 
681 static inline xfs_agnumber_t
682 xfs_daddr_to_agno(struct xfs_mount *mp, xfs_daddr_t d)
683 {
684 	xfs_rfsblock_t ld = XFS_BB_TO_FSBT(mp, d);
685 	do_div(ld, mp->m_sb.sb_agblocks);
686 	return (xfs_agnumber_t) ld;
687 }
688 
689 static inline xfs_agblock_t
690 xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
691 {
692 	xfs_rfsblock_t ld = XFS_BB_TO_FSBT(mp, d);
693 	return (xfs_agblock_t) do_div(ld, mp->m_sb.sb_agblocks);
694 }
695 
696 extern void	xfs_uuid_table_free(void);
697 uint64_t	xfs_default_resblks(struct xfs_mount *mp,
698 			enum xfs_free_counter ctr);
699 extern int	xfs_mountfs(xfs_mount_t *mp);
700 extern void	xfs_unmountfs(xfs_mount_t *);
701 
702 /*
703  * Deltas for the block count can vary from 1 to very large, but lock contention
704  * only occurs on frequent small block count updates such as in the delayed
705  * allocation path for buffered writes (page a time updates). Hence we set
706  * a large batch count (1024) to minimise global counter updates except when
707  * we get near to ENOSPC and we have to be very accurate with our updates.
708  */
709 #define XFS_FDBLOCKS_BATCH	1024
710 
711 uint64_t xfs_freecounter_unavailable(struct xfs_mount *mp,
712 		enum xfs_free_counter ctr);
713 
714 /*
715  * Sum up the freecount, but never return negative values.
716  */
717 static inline s64 xfs_sum_freecounter(struct xfs_mount *mp,
718 		enum xfs_free_counter ctr)
719 {
720 	return percpu_counter_sum_positive(&mp->m_free[ctr].count);
721 }
722 
723 /*
724  * Same as above, but does return negative values.  Mostly useful for
725  * special cases like repair and tracing.
726  */
727 static inline s64 xfs_sum_freecounter_raw(struct xfs_mount *mp,
728 		enum xfs_free_counter ctr)
729 {
730 	return percpu_counter_sum(&mp->m_free[ctr].count);
731 }
732 
733 /*
734  * This just provides and estimate without the cpu-local updates, use
735  * xfs_sum_freecounter for the exact value.
736  */
737 static inline s64 xfs_estimate_freecounter(struct xfs_mount *mp,
738 		enum xfs_free_counter ctr)
739 {
740 	return percpu_counter_read_positive(&mp->m_free[ctr].count);
741 }
742 
743 static inline int xfs_compare_freecounter(struct xfs_mount *mp,
744 		enum xfs_free_counter ctr, s64 rhs, s32 batch)
745 {
746 	return __percpu_counter_compare(&mp->m_free[ctr].count, rhs, batch);
747 }
748 
749 static inline void xfs_set_freecounter(struct xfs_mount *mp,
750 		enum xfs_free_counter ctr, uint64_t val)
751 {
752 	percpu_counter_set(&mp->m_free[ctr].count, val);
753 }
754 
755 int xfs_dec_freecounter(struct xfs_mount *mp, enum xfs_free_counter ctr,
756 		uint64_t delta, bool rsvd);
757 void xfs_add_freecounter(struct xfs_mount *mp, enum xfs_free_counter ctr,
758 		uint64_t delta);
759 
760 static inline int xfs_dec_fdblocks(struct xfs_mount *mp, uint64_t delta,
761 		bool reserved)
762 {
763 	return xfs_dec_freecounter(mp, XC_FREE_BLOCKS, delta, reserved);
764 }
765 
766 static inline void xfs_add_fdblocks(struct xfs_mount *mp, uint64_t delta)
767 {
768 	xfs_add_freecounter(mp, XC_FREE_BLOCKS, delta);
769 }
770 
771 static inline int xfs_dec_frextents(struct xfs_mount *mp, uint64_t delta)
772 {
773 	return xfs_dec_freecounter(mp, XC_FREE_RTEXTENTS, delta, false);
774 }
775 
776 static inline void xfs_add_frextents(struct xfs_mount *mp, uint64_t delta)
777 {
778 	xfs_add_freecounter(mp, XC_FREE_RTEXTENTS, delta);
779 }
780 
781 extern int	xfs_readsb(xfs_mount_t *, int);
782 extern void	xfs_freesb(xfs_mount_t *);
783 extern bool	xfs_fs_writable(struct xfs_mount *mp, int level);
784 extern int	xfs_sb_validate_fsb_count(struct xfs_sb *, uint64_t);
785 
786 extern int	xfs_dev_is_read_only(struct xfs_mount *, char *);
787 
788 extern void	xfs_set_low_space_thresholds(struct xfs_mount *);
789 
790 int	xfs_zero_extent(struct xfs_inode *ip, xfs_fsblock_t start_fsb,
791 			xfs_off_t count_fsb);
792 
793 struct xfs_error_cfg * xfs_error_get_cfg(struct xfs_mount *mp,
794 		int error_class, int error);
795 void xfs_force_summary_recalc(struct xfs_mount *mp);
796 int xfs_add_incompat_log_feature(struct xfs_mount *mp, uint32_t feature);
797 bool xfs_clear_incompat_log_features(struct xfs_mount *mp);
798 void xfs_mod_delalloc(struct xfs_inode *ip, int64_t data_delta,
799 		int64_t ind_delta);
800 static inline void xfs_mod_sb_delalloc(struct xfs_mount *mp, int64_t delta)
801 {
802 	percpu_counter_add(&mp->m_delalloc_blks, delta);
803 }
804 
805 int xfs_set_max_atomic_write_opt(struct xfs_mount *mp,
806 		unsigned long long new_max_bytes);
807 
808 static inline struct xfs_buftarg *
809 xfs_group_type_buftarg(
810 	struct xfs_mount	*mp,
811 	enum xfs_group_type	type)
812 {
813 	switch (type) {
814 	case XG_TYPE_AG:
815 		return mp->m_ddev_targp;
816 	case XG_TYPE_RTG:
817 		return mp->m_rtdev_targp;
818 	default:
819 		ASSERT(0);
820 		break;
821 	}
822 	return NULL;
823 }
824 
825 #endif	/* __XFS_MOUNT_H__ */
826