1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #ifndef __XFS_MOUNT_H__
7 #define __XFS_MOUNT_H__
8
9 struct xlog;
10 struct xfs_inode;
11 struct xfs_mru_cache;
12 struct xfs_ail;
13 struct xfs_quotainfo;
14 struct xfs_da_geometry;
15 struct xfs_perag;
16
17 /* dynamic preallocation free space thresholds, 5% down to 1% */
18 enum {
19 XFS_LOWSP_1_PCNT = 0,
20 XFS_LOWSP_2_PCNT,
21 XFS_LOWSP_3_PCNT,
22 XFS_LOWSP_4_PCNT,
23 XFS_LOWSP_5_PCNT,
24 XFS_LOWSP_MAX,
25 };
26
27 /*
28 * Error Configuration
29 *
30 * Error classes define the subsystem the configuration belongs to.
31 * Error numbers define the errors that are configurable.
32 */
33 enum {
34 XFS_ERR_METADATA,
35 XFS_ERR_CLASS_MAX,
36 };
37 enum {
38 XFS_ERR_DEFAULT,
39 XFS_ERR_EIO,
40 XFS_ERR_ENOSPC,
41 XFS_ERR_ENODEV,
42 XFS_ERR_ERRNO_MAX,
43 };
44
45 #define XFS_ERR_RETRY_FOREVER -1
46
47 /*
48 * Although retry_timeout is in jiffies which is normally an unsigned long,
49 * we limit the retry timeout to 86400 seconds, or one day. So even a
50 * signed 32-bit long is sufficient for a HZ value up to 24855. Making it
51 * signed lets us store the special "-1" value, meaning retry forever.
52 */
53 struct xfs_error_cfg {
54 struct xfs_kobj kobj;
55 int max_retries;
56 long retry_timeout; /* in jiffies, -1 = infinite */
57 };
58
59 /*
60 * Per-cpu deferred inode inactivation GC lists.
61 */
62 struct xfs_inodegc {
63 struct xfs_mount *mp;
64 struct llist_head list;
65 struct delayed_work work;
66 int error;
67
68 /* approximate count of inodes in the list */
69 unsigned int items;
70 unsigned int shrinker_hits;
71 unsigned int cpu;
72 };
73
74 /*
75 * Container for each type of groups, used to look up individual groups and
76 * describes the geometry.
77 */
78 struct xfs_groups {
79 struct xarray xa;
80
81 /*
82 * Maximum capacity of the group in FSBs.
83 *
84 * Each group is laid out densely in the daddr space. For the
85 * degenerate case of a pre-rtgroups filesystem, the incore rtgroup
86 * pretends to have a zero-block and zero-blklog rtgroup.
87 */
88 uint32_t blocks;
89
90 /*
91 * Log(2) of the logical size of each group.
92 *
93 * Compared to the blocks field above this is rounded up to the next
94 * power of two, and thus lays out the xfs_fsblock_t/xfs_rtblock_t
95 * space sparsely with a hole from blocks to (1 << blklog) at the end
96 * of each group.
97 */
98 uint8_t blklog;
99
100 /*
101 * Zoned devices can have gaps beyond the usable capacity of a zone and
102 * the end in the LBA/daddr address space. In other words, the hardware
103 * equivalent to the RT groups already takes care of the power of 2
104 * alignment for us. In this case the sparse FSB/RTB address space maps
105 * 1:1 to the device address space.
106 */
107 bool has_daddr_gaps;
108
109 /*
110 * Mask to extract the group-relative block number from a FSB.
111 * For a pre-rtgroups filesystem we pretend to have one very large
112 * rtgroup, so this mask must be 64-bit.
113 */
114 uint64_t blkmask;
115
116 /*
117 * Start of the first group in the device. This is used to support a
118 * RT device following the data device on the same block device for
119 * SMR hard drives.
120 */
121 xfs_fsblock_t start_fsb;
122
123 /*
124 * Maximum length of an atomic write for files stored in this
125 * collection of allocation groups, in fsblocks.
126 */
127 xfs_extlen_t awu_max;
128 };
129
130 struct xfs_freecounter {
131 /* free blocks for general use: */
132 struct percpu_counter count;
133
134 /* total reserved blocks: */
135 uint64_t res_total;
136
137 /* available reserved blocks: */
138 uint64_t res_avail;
139
140 /* reserved blks @ remount,ro: */
141 uint64_t res_saved;
142 };
143
144 /*
145 * The struct xfsmount layout is optimised to separate read-mostly variables
146 * from variables that are frequently modified. We put the read-mostly variables
147 * first, then place all the other variables at the end.
148 *
149 * Typically, read-mostly variables are those that are set at mount time and
150 * never changed again, or only change rarely as a result of things like sysfs
151 * knobs being tweaked.
152 */
153 typedef struct xfs_mount {
154 struct xfs_sb m_sb; /* copy of fs superblock */
155 struct super_block *m_super;
156 struct xfs_ail *m_ail; /* fs active log item list */
157 struct xfs_buf *m_sb_bp; /* buffer for superblock */
158 struct xfs_buf *m_rtsb_bp; /* realtime superblock */
159 char *m_rtname; /* realtime device name */
160 char *m_logname; /* external log device name */
161 struct xfs_da_geometry *m_dir_geo; /* directory block geometry */
162 struct xfs_da_geometry *m_attr_geo; /* attribute block geometry */
163 struct xlog *m_log; /* log specific stuff */
164 struct xfs_inode *m_rootip; /* pointer to root directory */
165 struct xfs_inode *m_metadirip; /* ptr to metadata directory */
166 struct xfs_inode *m_rtdirip; /* ptr to realtime metadir */
167 struct xfs_quotainfo *m_quotainfo; /* disk quota information */
168 struct xfs_buftarg *m_ddev_targp; /* data device */
169 struct xfs_buftarg *m_logdev_targp;/* log device */
170 struct xfs_buftarg *m_rtdev_targp; /* rt device */
171 void __percpu *m_inodegc; /* percpu inodegc structures */
172 struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
173 struct workqueue_struct *m_buf_workqueue;
174 struct workqueue_struct *m_unwritten_workqueue;
175 struct workqueue_struct *m_reclaim_workqueue;
176 struct workqueue_struct *m_sync_workqueue;
177 struct workqueue_struct *m_blockgc_wq;
178 struct workqueue_struct *m_inodegc_wq;
179
180 int m_bsize; /* fs logical block size */
181 uint8_t m_blkbit_log; /* blocklog + NBBY */
182 uint8_t m_blkbb_log; /* blocklog - BBSHIFT */
183 uint8_t m_agno_log; /* log #ag's */
184 uint8_t m_sectbb_log; /* sectlog - BBSHIFT */
185 int8_t m_rtxblklog; /* log2 of rextsize, if possible */
186
187 uint m_blockmask; /* sb_blocksize-1 */
188 uint m_blockwsize; /* sb_blocksize in words */
189 /* number of rt extents per rt bitmap block if rtgroups enabled */
190 unsigned int m_rtx_per_rbmblock;
191 uint m_alloc_mxr[2]; /* max alloc btree records */
192 uint m_alloc_mnr[2]; /* min alloc btree records */
193 uint m_bmap_dmxr[2]; /* max bmap btree records */
194 uint m_bmap_dmnr[2]; /* min bmap btree records */
195 uint m_rmap_mxr[2]; /* max rmap btree records */
196 uint m_rmap_mnr[2]; /* min rmap btree records */
197 uint m_rtrmap_mxr[2]; /* max rtrmap btree records */
198 uint m_rtrmap_mnr[2]; /* min rtrmap btree records */
199 uint m_refc_mxr[2]; /* max refc btree records */
200 uint m_refc_mnr[2]; /* min refc btree records */
201 uint m_rtrefc_mxr[2]; /* max rtrefc btree records */
202 uint m_rtrefc_mnr[2]; /* min rtrefc btree records */
203 uint m_alloc_maxlevels; /* max alloc btree levels */
204 uint m_bm_maxlevels[2]; /* max bmap btree levels */
205 uint m_rmap_maxlevels; /* max rmap btree levels */
206 uint m_rtrmap_maxlevels; /* max rtrmap btree level */
207 uint m_refc_maxlevels; /* max refcount btree level */
208 uint m_rtrefc_maxlevels; /* max rtrefc btree level */
209 unsigned int m_agbtree_maxlevels; /* max level of all AG btrees */
210 unsigned int m_rtbtree_maxlevels; /* max level of all rt btrees */
211 xfs_extlen_t m_ag_prealloc_blocks; /* reserved ag blocks */
212 uint m_alloc_set_aside; /* space we can't use */
213 uint m_ag_max_usable; /* max space per AG */
214 int m_dalign; /* stripe unit */
215 int m_swidth; /* stripe width */
216 xfs_agnumber_t m_maxagi; /* highest inode alloc group */
217 uint m_allocsize_log;/* min write size log bytes */
218 uint m_allocsize_blocks; /* min write size blocks */
219 int m_logbufs; /* number of log buffers */
220 int m_logbsize; /* size of each log buffer */
221 unsigned int m_rsumlevels; /* rt summary levels */
222 xfs_filblks_t m_rsumblocks; /* size of rt summary, FSBs */
223 int m_fixedfsid[2]; /* unchanged for life of FS */
224 uint m_qflags; /* quota status flags */
225 uint64_t m_features; /* active filesystem features */
226 uint64_t m_low_space[XFS_LOWSP_MAX];
227 uint64_t m_low_rtexts[XFS_LOWSP_MAX];
228 uint64_t m_rtxblkmask; /* rt extent block mask */
229 struct xfs_ino_geometry m_ino_geo; /* inode geometry */
230 struct xfs_trans_resv m_resv; /* precomputed res values */
231 /* low free space thresholds */
232 unsigned long m_opstate; /* dynamic state flags */
233 bool m_always_cow;
234 bool m_fail_unmount;
235 bool m_finobt_nores; /* no per-AG finobt resv. */
236 bool m_update_sb; /* sb needs update in mount */
237 unsigned int m_max_open_zones;
238 unsigned int m_zonegc_low_space;
239 struct xfs_mru_cache *m_zone_cache; /* Inode to open zone cache */
240
241 /* max_atomic_write mount option value */
242 unsigned long long m_awu_max_bytes;
243
244 /*
245 * Bitsets of per-fs metadata that have been checked and/or are sick.
246 * Callers must hold m_sb_lock to access these two fields.
247 */
248 uint8_t m_fs_checked;
249 uint8_t m_fs_sick;
250 /*
251 * Bitsets of rt metadata that have been checked and/or are sick.
252 * Callers must hold m_sb_lock to access this field.
253 */
254 uint8_t m_rt_checked;
255 uint8_t m_rt_sick;
256
257 /*
258 * End of read-mostly variables. Frequently written variables and locks
259 * should be placed below this comment from now on. The first variable
260 * here is marked as cacheline aligned so they it is separated from
261 * the read-mostly variables.
262 */
263
264 spinlock_t ____cacheline_aligned m_sb_lock; /* sb counter lock */
265 struct percpu_counter m_icount; /* allocated inodes counter */
266 struct percpu_counter m_ifree; /* free inodes counter */
267
268 struct xfs_freecounter m_free[XC_FREE_NR];
269
270 /*
271 * Count of data device blocks reserved for delayed allocations,
272 * including indlen blocks. Does not include allocated CoW staging
273 * extents or anything related to the rt device.
274 */
275 struct percpu_counter m_delalloc_blks;
276
277 /*
278 * RT version of the above.
279 */
280 struct percpu_counter m_delalloc_rtextents;
281
282 /*
283 * Global count of allocation btree blocks in use across all AGs. Only
284 * used when perag reservation is enabled. Helps prevent block
285 * reservation from attempting to reserve allocation btree blocks.
286 */
287 atomic64_t m_allocbt_blks;
288
289 struct xfs_groups m_groups[XG_TYPE_MAX];
290 struct delayed_work m_reclaim_work; /* background inode reclaim */
291 struct xfs_zone_info *m_zone_info; /* zone allocator information */
292 struct dentry *m_debugfs; /* debugfs parent */
293 struct xfs_kobj m_kobj;
294 struct xfs_kobj m_error_kobj;
295 struct xfs_kobj m_error_meta_kobj;
296 struct xfs_error_cfg m_error_cfg[XFS_ERR_CLASS_MAX][XFS_ERR_ERRNO_MAX];
297 struct xstats m_stats; /* per-fs stats */
298 #ifdef CONFIG_XFS_ONLINE_SCRUB_STATS
299 struct xchk_stats *m_scrub_stats;
300 #endif
301 struct xfs_kobj m_zoned_kobj;
302 xfs_agnumber_t m_agfrotor; /* last ag where space found */
303 atomic_t m_agirotor; /* last ag dir inode alloced */
304 atomic_t m_rtgrotor; /* last rtgroup rtpicked */
305
306 struct mutex m_metafile_resv_lock;
307 uint64_t m_metafile_resv_target;
308 uint64_t m_metafile_resv_used;
309 uint64_t m_metafile_resv_avail;
310
311 /* Memory shrinker to throttle and reprioritize inodegc */
312 struct shrinker *m_inodegc_shrinker;
313 /*
314 * Workqueue item so that we can coalesce multiple inode flush attempts
315 * into a single flush.
316 */
317 struct work_struct m_flush_inodes_work;
318
319 /*
320 * Generation of the filesysyem layout. This is incremented by each
321 * growfs, and used by the pNFS server to ensure the client updates
322 * its view of the block device once it gets a layout that might
323 * reference the newly added blocks. Does not need to be persistent
324 * as long as we only allow file system size increments, but if we
325 * ever support shrinks it would have to be persisted in addition
326 * to various other kinds of pain inflicted on the pNFS server.
327 */
328 uint32_t m_generation;
329 struct mutex m_growlock; /* growfs mutex */
330
331 #ifdef DEBUG
332 /*
333 * Frequency with which errors are injected. Replaces xfs_etest; the
334 * value stored in here is the inverse of the frequency with which the
335 * error triggers. 1 = always, 2 = half the time, etc.
336 */
337 unsigned int *m_errortag;
338 struct xfs_kobj m_errortag_kobj;
339 #endif
340
341 /* cpus that have inodes queued for inactivation */
342 struct cpumask m_inodegc_cpumask;
343
344 /* Hook to feed dirent updates to an active online repair. */
345 struct xfs_hooks m_dir_update_hooks;
346 } xfs_mount_t;
347
348 #define M_IGEO(mp) (&(mp)->m_ino_geo)
349
350 /*
351 * Flags for m_features.
352 *
353 * These are all the active features in the filesystem, regardless of how
354 * they are configured.
355 */
356 #define XFS_FEAT_ATTR (1ULL << 0) /* xattrs present in fs */
357 #define XFS_FEAT_NLINK (1ULL << 1) /* 32 bit link counts */
358 #define XFS_FEAT_QUOTA (1ULL << 2) /* quota active */
359 #define XFS_FEAT_ALIGN (1ULL << 3) /* inode alignment */
360 #define XFS_FEAT_DALIGN (1ULL << 4) /* data alignment */
361 #define XFS_FEAT_LOGV2 (1ULL << 5) /* version 2 logs */
362 #define XFS_FEAT_SECTOR (1ULL << 6) /* sector size > 512 bytes */
363 #define XFS_FEAT_EXTFLG (1ULL << 7) /* unwritten extents */
364 #define XFS_FEAT_ASCIICI (1ULL << 8) /* ASCII only case-insens. */
365 #define XFS_FEAT_LAZYSBCOUNT (1ULL << 9) /* Superblk counters */
366 #define XFS_FEAT_ATTR2 (1ULL << 10) /* dynamic attr fork */
367 #define XFS_FEAT_PARENT (1ULL << 11) /* parent pointers */
368 #define XFS_FEAT_PROJID32 (1ULL << 12) /* 32 bit project id */
369 #define XFS_FEAT_CRC (1ULL << 13) /* metadata CRCs */
370 #define XFS_FEAT_V3INODES (1ULL << 14) /* Version 3 inodes */
371 #define XFS_FEAT_PQUOTINO (1ULL << 15) /* non-shared proj/grp quotas */
372 #define XFS_FEAT_FTYPE (1ULL << 16) /* inode type in dir */
373 #define XFS_FEAT_FINOBT (1ULL << 17) /* free inode btree */
374 #define XFS_FEAT_RMAPBT (1ULL << 18) /* reverse map btree */
375 #define XFS_FEAT_REFLINK (1ULL << 19) /* reflinked files */
376 #define XFS_FEAT_SPINODES (1ULL << 20) /* sparse inode chunks */
377 #define XFS_FEAT_META_UUID (1ULL << 21) /* metadata UUID */
378 #define XFS_FEAT_REALTIME (1ULL << 22) /* realtime device present */
379 #define XFS_FEAT_INOBTCNT (1ULL << 23) /* inobt block counts */
380 #define XFS_FEAT_BIGTIME (1ULL << 24) /* large timestamps */
381 #define XFS_FEAT_NEEDSREPAIR (1ULL << 25) /* needs xfs_repair */
382 #define XFS_FEAT_NREXT64 (1ULL << 26) /* large extent counters */
383 #define XFS_FEAT_EXCHANGE_RANGE (1ULL << 27) /* exchange range */
384 #define XFS_FEAT_METADIR (1ULL << 28) /* metadata directory tree */
385 #define XFS_FEAT_ZONED (1ULL << 29) /* zoned RT device */
386
387 /* Mount features */
388 #define XFS_FEAT_NOLIFETIME (1ULL << 47) /* disable lifetime hints */
389 #define XFS_FEAT_NOATTR2 (1ULL << 48) /* disable attr2 creation */
390 #define XFS_FEAT_NOALIGN (1ULL << 49) /* ignore alignment */
391 #define XFS_FEAT_ALLOCSIZE (1ULL << 50) /* user specified allocation size */
392 #define XFS_FEAT_LARGE_IOSIZE (1ULL << 51) /* report large preferred
393 * I/O size in stat() */
394 #define XFS_FEAT_WSYNC (1ULL << 52) /* synchronous metadata ops */
395 #define XFS_FEAT_DIRSYNC (1ULL << 53) /* synchronous directory ops */
396 #define XFS_FEAT_DISCARD (1ULL << 54) /* discard unused blocks */
397 #define XFS_FEAT_GRPID (1ULL << 55) /* group-ID assigned from directory */
398 #define XFS_FEAT_SMALL_INUMS (1ULL << 56) /* user wants 32bit inodes */
399 #define XFS_FEAT_IKEEP (1ULL << 57) /* keep empty inode clusters*/
400 #define XFS_FEAT_SWALLOC (1ULL << 58) /* stripe width allocation */
401 #define XFS_FEAT_FILESTREAMS (1ULL << 59) /* use filestreams allocator */
402 #define XFS_FEAT_DAX_ALWAYS (1ULL << 60) /* DAX always enabled */
403 #define XFS_FEAT_DAX_NEVER (1ULL << 61) /* DAX never enabled */
404 #define XFS_FEAT_NORECOVERY (1ULL << 62) /* no recovery - dirty fs */
405 #define XFS_FEAT_NOUUID (1ULL << 63) /* ignore uuid during mount */
406
407 #define __XFS_HAS_FEAT(name, NAME) \
408 static inline bool xfs_has_ ## name (const struct xfs_mount *mp) \
409 { \
410 return mp->m_features & XFS_FEAT_ ## NAME; \
411 }
412
413 /* Some features can be added dynamically so they need a set wrapper, too. */
414 #define __XFS_ADD_FEAT(name, NAME) \
415 __XFS_HAS_FEAT(name, NAME); \
416 static inline void xfs_add_ ## name (struct xfs_mount *mp) \
417 { \
418 mp->m_features |= XFS_FEAT_ ## NAME; \
419 xfs_sb_version_add ## name(&mp->m_sb); \
420 }
421
422 /* Superblock features */
__XFS_ADD_FEAT(attr,ATTR)423 __XFS_ADD_FEAT(attr, ATTR)
424 __XFS_HAS_FEAT(nlink, NLINK)
425 __XFS_ADD_FEAT(quota, QUOTA)
426 __XFS_HAS_FEAT(dalign, DALIGN)
427 __XFS_HAS_FEAT(sector, SECTOR)
428 __XFS_HAS_FEAT(asciici, ASCIICI)
429 __XFS_HAS_FEAT(parent, PARENT)
430 __XFS_HAS_FEAT(ftype, FTYPE)
431 __XFS_HAS_FEAT(finobt, FINOBT)
432 __XFS_HAS_FEAT(rmapbt, RMAPBT)
433 __XFS_HAS_FEAT(reflink, REFLINK)
434 __XFS_HAS_FEAT(sparseinodes, SPINODES)
435 __XFS_HAS_FEAT(metauuid, META_UUID)
436 __XFS_HAS_FEAT(realtime, REALTIME)
437 __XFS_HAS_FEAT(inobtcounts, INOBTCNT)
438 __XFS_HAS_FEAT(bigtime, BIGTIME)
439 __XFS_HAS_FEAT(needsrepair, NEEDSREPAIR)
440 __XFS_HAS_FEAT(large_extent_counts, NREXT64)
441 __XFS_HAS_FEAT(exchange_range, EXCHANGE_RANGE)
442 __XFS_HAS_FEAT(metadir, METADIR)
443 __XFS_HAS_FEAT(zoned, ZONED)
444 __XFS_HAS_FEAT(nolifetime, NOLIFETIME)
445
446 static inline bool xfs_has_rtgroups(const struct xfs_mount *mp)
447 {
448 /* all metadir file systems also allow rtgroups */
449 return xfs_has_metadir(mp);
450 }
451
xfs_has_rtsb(const struct xfs_mount * mp)452 static inline bool xfs_has_rtsb(const struct xfs_mount *mp)
453 {
454 /* all rtgroups filesystems with an rt section have an rtsb */
455 return xfs_has_rtgroups(mp) &&
456 xfs_has_realtime(mp) &&
457 !xfs_has_zoned(mp);
458 }
459
xfs_has_rtrmapbt(const struct xfs_mount * mp)460 static inline bool xfs_has_rtrmapbt(const struct xfs_mount *mp)
461 {
462 return xfs_has_rtgroups(mp) && xfs_has_realtime(mp) &&
463 xfs_has_rmapbt(mp);
464 }
465
xfs_has_rtreflink(const struct xfs_mount * mp)466 static inline bool xfs_has_rtreflink(const struct xfs_mount *mp)
467 {
468 return xfs_has_metadir(mp) && xfs_has_realtime(mp) &&
469 xfs_has_reflink(mp);
470 }
471
xfs_has_nonzoned(const struct xfs_mount * mp)472 static inline bool xfs_has_nonzoned(const struct xfs_mount *mp)
473 {
474 return !xfs_has_zoned(mp);
475 }
476
xfs_can_sw_atomic_write(struct xfs_mount * mp)477 static inline bool xfs_can_sw_atomic_write(struct xfs_mount *mp)
478 {
479 return xfs_has_reflink(mp);
480 }
481
482 /*
483 * Some features are always on for v5 file systems, allow the compiler to
484 * eliminiate dead code when building without v4 support.
485 */
486 #define __XFS_HAS_V4_FEAT(name, NAME) \
487 static inline bool xfs_has_ ## name (struct xfs_mount *mp) \
488 { \
489 return !IS_ENABLED(CONFIG_XFS_SUPPORT_V4) || \
490 (mp->m_features & XFS_FEAT_ ## NAME); \
491 }
492
493 #define __XFS_ADD_V4_FEAT(name, NAME) \
494 __XFS_HAS_V4_FEAT(name, NAME); \
495 static inline void xfs_add_ ## name (struct xfs_mount *mp) \
496 { \
497 if (IS_ENABLED(CONFIG_XFS_SUPPORT_V4)) { \
498 mp->m_features |= XFS_FEAT_ ## NAME; \
499 xfs_sb_version_add ## name(&mp->m_sb); \
500 } \
501 }
502
__XFS_HAS_V4_FEAT(align,ALIGN)503 __XFS_HAS_V4_FEAT(align, ALIGN)
504 __XFS_HAS_V4_FEAT(logv2, LOGV2)
505 __XFS_HAS_V4_FEAT(extflg, EXTFLG)
506 __XFS_HAS_V4_FEAT(lazysbcount, LAZYSBCOUNT)
507 __XFS_ADD_V4_FEAT(attr2, ATTR2)
508 __XFS_ADD_V4_FEAT(projid32, PROJID32)
509 __XFS_HAS_V4_FEAT(v3inodes, V3INODES)
510 __XFS_HAS_V4_FEAT(crc, CRC)
511 __XFS_HAS_V4_FEAT(pquotino, PQUOTINO)
512
513 /*
514 * Mount features
515 *
516 * These do not change dynamically - features that can come and go, such as 32
517 * bit inodes and read-only state, are kept as operational state rather than
518 * features.
519 */
520 __XFS_HAS_FEAT(noattr2, NOATTR2)
521 __XFS_HAS_FEAT(noalign, NOALIGN)
522 __XFS_HAS_FEAT(allocsize, ALLOCSIZE)
523 __XFS_HAS_FEAT(large_iosize, LARGE_IOSIZE)
524 __XFS_HAS_FEAT(wsync, WSYNC)
525 __XFS_HAS_FEAT(dirsync, DIRSYNC)
526 __XFS_HAS_FEAT(discard, DISCARD)
527 __XFS_HAS_FEAT(grpid, GRPID)
528 __XFS_HAS_FEAT(small_inums, SMALL_INUMS)
529 __XFS_HAS_FEAT(ikeep, IKEEP)
530 __XFS_HAS_FEAT(swalloc, SWALLOC)
531 __XFS_HAS_FEAT(filestreams, FILESTREAMS)
532 __XFS_HAS_FEAT(dax_always, DAX_ALWAYS)
533 __XFS_HAS_FEAT(dax_never, DAX_NEVER)
534 __XFS_HAS_FEAT(norecovery, NORECOVERY)
535 __XFS_HAS_FEAT(nouuid, NOUUID)
536
537 /*
538 * Operational mount state flags
539 *
540 * Use these with atomic bit ops only!
541 */
542 #define XFS_OPSTATE_UNMOUNTING 0 /* filesystem is unmounting */
543 #define XFS_OPSTATE_CLEAN 1 /* mount was clean */
544 #define XFS_OPSTATE_SHUTDOWN 2 /* stop all fs operations */
545 #define XFS_OPSTATE_INODE32 3 /* inode32 allocator active */
546 #define XFS_OPSTATE_READONLY 4 /* read-only fs */
547
548 /*
549 * If set, inactivation worker threads will be scheduled to process queued
550 * inodegc work. If not, queued inodes remain in memory waiting to be
551 * processed.
552 */
553 #define XFS_OPSTATE_INODEGC_ENABLED 5
554 /*
555 * If set, background speculative prealloc gc worker threads will be scheduled
556 * to process queued blockgc work. If not, inodes retain their preallocations
557 * until explicitly deleted.
558 */
559 #define XFS_OPSTATE_BLOCKGC_ENABLED 6
560
561 /* Kernel has logged a warning about shrink being used on this fs. */
562 #define XFS_OPSTATE_WARNED_SHRINK 9
563 /* Kernel has logged a warning about logged xattr updates being used. */
564 #define XFS_OPSTATE_WARNED_LARP 10
565 /* Mount time quotacheck is running */
566 #define XFS_OPSTATE_QUOTACHECK_RUNNING 11
567 /* Do we want to clear log incompat flags? */
568 #define XFS_OPSTATE_UNSET_LOG_INCOMPAT 12
569 /* Filesystem can use logged extended attributes */
570 #define XFS_OPSTATE_USE_LARP 13
571 /* Kernel has logged a warning about blocksize > pagesize on this fs. */
572 #define XFS_OPSTATE_WARNED_LBS 14
573 /* Kernel has logged a warning about metadata dirs being used on this fs. */
574 #define XFS_OPSTATE_WARNED_METADIR 17
575 /* Filesystem should use qflags to determine quotaon status */
576 #define XFS_OPSTATE_RESUMING_QUOTAON 18
577 /* Kernel has logged a warning about zoned RT device being used on this fs. */
578 #define XFS_OPSTATE_WARNED_ZONED 19
579 /* (Zoned) GC is in progress */
580 #define XFS_OPSTATE_ZONEGC_RUNNING 20
581
582 #define __XFS_IS_OPSTATE(name, NAME) \
583 static inline bool xfs_is_ ## name (struct xfs_mount *mp) \
584 { \
585 return test_bit(XFS_OPSTATE_ ## NAME, &mp->m_opstate); \
586 } \
587 static inline bool xfs_clear_ ## name (struct xfs_mount *mp) \
588 { \
589 return test_and_clear_bit(XFS_OPSTATE_ ## NAME, &mp->m_opstate); \
590 } \
591 static inline bool xfs_set_ ## name (struct xfs_mount *mp) \
592 { \
593 return test_and_set_bit(XFS_OPSTATE_ ## NAME, &mp->m_opstate); \
594 }
595
596 __XFS_IS_OPSTATE(unmounting, UNMOUNTING)
597 __XFS_IS_OPSTATE(clean, CLEAN)
598 __XFS_IS_OPSTATE(shutdown, SHUTDOWN)
599 __XFS_IS_OPSTATE(inode32, INODE32)
600 __XFS_IS_OPSTATE(readonly, READONLY)
601 __XFS_IS_OPSTATE(inodegc_enabled, INODEGC_ENABLED)
602 __XFS_IS_OPSTATE(blockgc_enabled, BLOCKGC_ENABLED)
603 #ifdef CONFIG_XFS_QUOTA
604 __XFS_IS_OPSTATE(quotacheck_running, QUOTACHECK_RUNNING)
605 __XFS_IS_OPSTATE(resuming_quotaon, RESUMING_QUOTAON)
606 #else
607 static inline bool xfs_is_quotacheck_running(struct xfs_mount *mp)
608 {
609 return false;
610 }
611 static inline bool xfs_is_resuming_quotaon(struct xfs_mount *mp)
612 {
613 return false;
614 }
615 static inline void xfs_set_resuming_quotaon(struct xfs_mount *m)
616 {
617 }
618 static inline bool xfs_clear_resuming_quotaon(struct xfs_mount *mp)
619 {
620 return false;
621 }
622 #endif /* CONFIG_XFS_QUOTA */
623 __XFS_IS_OPSTATE(done_with_log_incompat, UNSET_LOG_INCOMPAT)
624 __XFS_IS_OPSTATE(using_logged_xattrs, USE_LARP)
625 __XFS_IS_OPSTATE(zonegc_running, ZONEGC_RUNNING)
626
627 static inline bool
628 xfs_should_warn(struct xfs_mount *mp, long nr)
629 {
630 return !test_and_set_bit(nr, &mp->m_opstate);
631 }
632
633 #define XFS_OPSTATE_STRINGS \
634 { (1UL << XFS_OPSTATE_UNMOUNTING), "unmounting" }, \
635 { (1UL << XFS_OPSTATE_CLEAN), "clean" }, \
636 { (1UL << XFS_OPSTATE_SHUTDOWN), "shutdown" }, \
637 { (1UL << XFS_OPSTATE_INODE32), "inode32" }, \
638 { (1UL << XFS_OPSTATE_READONLY), "read_only" }, \
639 { (1UL << XFS_OPSTATE_INODEGC_ENABLED), "inodegc" }, \
640 { (1UL << XFS_OPSTATE_BLOCKGC_ENABLED), "blockgc" }, \
641 { (1UL << XFS_OPSTATE_WARNED_SHRINK), "wshrink" }, \
642 { (1UL << XFS_OPSTATE_WARNED_LARP), "wlarp" }, \
643 { (1UL << XFS_OPSTATE_QUOTACHECK_RUNNING), "quotacheck" }, \
644 { (1UL << XFS_OPSTATE_UNSET_LOG_INCOMPAT), "unset_log_incompat" }, \
645 { (1UL << XFS_OPSTATE_USE_LARP), "logged_xattrs" }
646
647 /*
648 * Max and min values for mount-option defined I/O
649 * preallocation sizes.
650 */
651 #define XFS_MAX_IO_LOG 30 /* 1G */
652 #define XFS_MIN_IO_LOG PAGE_SHIFT
653
654 void xfs_do_force_shutdown(struct xfs_mount *mp, uint32_t flags, char *fname,
655 int lnnum);
656 #define xfs_force_shutdown(m,f) \
657 xfs_do_force_shutdown(m, f, __FILE__, __LINE__)
658
659 #define SHUTDOWN_META_IO_ERROR (1u << 0) /* write attempt to metadata failed */
660 #define SHUTDOWN_LOG_IO_ERROR (1u << 1) /* write attempt to the log failed */
661 #define SHUTDOWN_FORCE_UMOUNT (1u << 2) /* shutdown from a forced unmount */
662 #define SHUTDOWN_CORRUPT_INCORE (1u << 3) /* corrupt in-memory structures */
663 #define SHUTDOWN_CORRUPT_ONDISK (1u << 4) /* corrupt metadata on device */
664 #define SHUTDOWN_DEVICE_REMOVED (1u << 5) /* device removed underneath us */
665
666 #define XFS_SHUTDOWN_STRINGS \
667 { SHUTDOWN_META_IO_ERROR, "metadata_io" }, \
668 { SHUTDOWN_LOG_IO_ERROR, "log_io" }, \
669 { SHUTDOWN_FORCE_UMOUNT, "force_umount" }, \
670 { SHUTDOWN_CORRUPT_INCORE, "corruption" }, \
671 { SHUTDOWN_DEVICE_REMOVED, "device_removed" }
672
673 /*
674 * Flags for xfs_mountfs
675 */
676 #define XFS_MFSI_QUIET 0x40 /* Be silent if mount errors found */
677
678 static inline xfs_agnumber_t
xfs_daddr_to_agno(struct xfs_mount * mp,xfs_daddr_t d)679 xfs_daddr_to_agno(struct xfs_mount *mp, xfs_daddr_t d)
680 {
681 xfs_rfsblock_t ld = XFS_BB_TO_FSBT(mp, d);
682 do_div(ld, mp->m_sb.sb_agblocks);
683 return (xfs_agnumber_t) ld;
684 }
685
686 static inline xfs_agblock_t
xfs_daddr_to_agbno(struct xfs_mount * mp,xfs_daddr_t d)687 xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
688 {
689 xfs_rfsblock_t ld = XFS_BB_TO_FSBT(mp, d);
690 return (xfs_agblock_t) do_div(ld, mp->m_sb.sb_agblocks);
691 }
692
693 extern void xfs_uuid_table_free(void);
694 uint64_t xfs_default_resblks(struct xfs_mount *mp,
695 enum xfs_free_counter ctr);
696 extern int xfs_mountfs(xfs_mount_t *mp);
697 extern void xfs_unmountfs(xfs_mount_t *);
698
699 /*
700 * Deltas for the block count can vary from 1 to very large, but lock contention
701 * only occurs on frequent small block count updates such as in the delayed
702 * allocation path for buffered writes (page a time updates). Hence we set
703 * a large batch count (1024) to minimise global counter updates except when
704 * we get near to ENOSPC and we have to be very accurate with our updates.
705 */
706 #define XFS_FDBLOCKS_BATCH 1024
707
708 uint64_t xfs_freecounter_unavailable(struct xfs_mount *mp,
709 enum xfs_free_counter ctr);
710
711 /*
712 * Sum up the freecount, but never return negative values.
713 */
xfs_sum_freecounter(struct xfs_mount * mp,enum xfs_free_counter ctr)714 static inline s64 xfs_sum_freecounter(struct xfs_mount *mp,
715 enum xfs_free_counter ctr)
716 {
717 return percpu_counter_sum_positive(&mp->m_free[ctr].count);
718 }
719
720 /*
721 * Same as above, but does return negative values. Mostly useful for
722 * special cases like repair and tracing.
723 */
xfs_sum_freecounter_raw(struct xfs_mount * mp,enum xfs_free_counter ctr)724 static inline s64 xfs_sum_freecounter_raw(struct xfs_mount *mp,
725 enum xfs_free_counter ctr)
726 {
727 return percpu_counter_sum(&mp->m_free[ctr].count);
728 }
729
730 /*
731 * This just provides and estimate without the cpu-local updates, use
732 * xfs_sum_freecounter for the exact value.
733 */
xfs_estimate_freecounter(struct xfs_mount * mp,enum xfs_free_counter ctr)734 static inline s64 xfs_estimate_freecounter(struct xfs_mount *mp,
735 enum xfs_free_counter ctr)
736 {
737 return percpu_counter_read_positive(&mp->m_free[ctr].count);
738 }
739
xfs_compare_freecounter(struct xfs_mount * mp,enum xfs_free_counter ctr,s64 rhs,s32 batch)740 static inline int xfs_compare_freecounter(struct xfs_mount *mp,
741 enum xfs_free_counter ctr, s64 rhs, s32 batch)
742 {
743 return __percpu_counter_compare(&mp->m_free[ctr].count, rhs, batch);
744 }
745
xfs_set_freecounter(struct xfs_mount * mp,enum xfs_free_counter ctr,uint64_t val)746 static inline void xfs_set_freecounter(struct xfs_mount *mp,
747 enum xfs_free_counter ctr, uint64_t val)
748 {
749 percpu_counter_set(&mp->m_free[ctr].count, val);
750 }
751
752 int xfs_dec_freecounter(struct xfs_mount *mp, enum xfs_free_counter ctr,
753 uint64_t delta, bool rsvd);
754 void xfs_add_freecounter(struct xfs_mount *mp, enum xfs_free_counter ctr,
755 uint64_t delta);
756
xfs_dec_fdblocks(struct xfs_mount * mp,uint64_t delta,bool reserved)757 static inline int xfs_dec_fdblocks(struct xfs_mount *mp, uint64_t delta,
758 bool reserved)
759 {
760 return xfs_dec_freecounter(mp, XC_FREE_BLOCKS, delta, reserved);
761 }
762
xfs_add_fdblocks(struct xfs_mount * mp,uint64_t delta)763 static inline void xfs_add_fdblocks(struct xfs_mount *mp, uint64_t delta)
764 {
765 xfs_add_freecounter(mp, XC_FREE_BLOCKS, delta);
766 }
767
xfs_dec_frextents(struct xfs_mount * mp,uint64_t delta)768 static inline int xfs_dec_frextents(struct xfs_mount *mp, uint64_t delta)
769 {
770 return xfs_dec_freecounter(mp, XC_FREE_RTEXTENTS, delta, false);
771 }
772
xfs_add_frextents(struct xfs_mount * mp,uint64_t delta)773 static inline void xfs_add_frextents(struct xfs_mount *mp, uint64_t delta)
774 {
775 xfs_add_freecounter(mp, XC_FREE_RTEXTENTS, delta);
776 }
777
778 extern int xfs_readsb(xfs_mount_t *, int);
779 extern void xfs_freesb(xfs_mount_t *);
780 extern bool xfs_fs_writable(struct xfs_mount *mp, int level);
781 extern int xfs_sb_validate_fsb_count(struct xfs_sb *, uint64_t);
782
783 extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
784
785 extern void xfs_set_low_space_thresholds(struct xfs_mount *);
786
787 int xfs_zero_extent(struct xfs_inode *ip, xfs_fsblock_t start_fsb,
788 xfs_off_t count_fsb);
789
790 struct xfs_error_cfg * xfs_error_get_cfg(struct xfs_mount *mp,
791 int error_class, int error);
792 void xfs_force_summary_recalc(struct xfs_mount *mp);
793 int xfs_add_incompat_log_feature(struct xfs_mount *mp, uint32_t feature);
794 bool xfs_clear_incompat_log_features(struct xfs_mount *mp);
795 void xfs_mod_delalloc(struct xfs_inode *ip, int64_t data_delta,
796 int64_t ind_delta);
xfs_mod_sb_delalloc(struct xfs_mount * mp,int64_t delta)797 static inline void xfs_mod_sb_delalloc(struct xfs_mount *mp, int64_t delta)
798 {
799 percpu_counter_add(&mp->m_delalloc_blks, delta);
800 }
801
802 int xfs_set_max_atomic_write_opt(struct xfs_mount *mp,
803 unsigned long long new_max_bytes);
804
805 #endif /* __XFS_MOUNT_H__ */
806