1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * Copyright (c) 2013 Red Hat, Inc. 5 * All Rights Reserved. 6 */ 7 #ifndef __XFS_SHARED_H__ 8 #define __XFS_SHARED_H__ 9 10 /* 11 * Definitions shared between kernel and userspace that don't fit into any other 12 * header file that is shared with userspace. 13 */ 14 struct xfs_ifork; 15 struct xfs_buf; 16 struct xfs_buf_ops; 17 struct xfs_mount; 18 struct xfs_trans; 19 struct xfs_inode; 20 21 /* 22 * Buffer verifier operations are widely used, including userspace tools 23 */ 24 extern const struct xfs_buf_ops xfs_agf_buf_ops; 25 extern const struct xfs_buf_ops xfs_agfl_buf_ops; 26 extern const struct xfs_buf_ops xfs_agi_buf_ops; 27 extern const struct xfs_buf_ops xfs_attr3_leaf_buf_ops; 28 extern const struct xfs_buf_ops xfs_attr3_rmt_buf_ops; 29 extern const struct xfs_buf_ops xfs_bmbt_buf_ops; 30 extern const struct xfs_buf_ops xfs_bnobt_buf_ops; 31 extern const struct xfs_buf_ops xfs_cntbt_buf_ops; 32 extern const struct xfs_buf_ops xfs_da3_node_buf_ops; 33 extern const struct xfs_buf_ops xfs_dquot_buf_ops; 34 extern const struct xfs_buf_ops xfs_dquot_buf_ra_ops; 35 extern const struct xfs_buf_ops xfs_finobt_buf_ops; 36 extern const struct xfs_buf_ops xfs_inobt_buf_ops; 37 extern const struct xfs_buf_ops xfs_inode_buf_ops; 38 extern const struct xfs_buf_ops xfs_inode_buf_ra_ops; 39 extern const struct xfs_buf_ops xfs_refcountbt_buf_ops; 40 extern const struct xfs_buf_ops xfs_rmapbt_buf_ops; 41 extern const struct xfs_buf_ops xfs_rtbitmap_buf_ops; 42 extern const struct xfs_buf_ops xfs_rtsummary_buf_ops; 43 extern const struct xfs_buf_ops xfs_rtbuf_ops; 44 extern const struct xfs_buf_ops xfs_rtsb_buf_ops; 45 extern const struct xfs_buf_ops xfs_rtrefcountbt_buf_ops; 46 extern const struct xfs_buf_ops xfs_rtrmapbt_buf_ops; 47 extern const struct xfs_buf_ops xfs_sb_buf_ops; 48 extern const struct xfs_buf_ops xfs_sb_quiet_buf_ops; 49 extern const struct xfs_buf_ops xfs_symlink_buf_ops; 50 51 /* btree ops */ 52 extern const struct xfs_btree_ops xfs_bnobt_ops; 53 extern const struct xfs_btree_ops xfs_cntbt_ops; 54 extern const struct xfs_btree_ops xfs_inobt_ops; 55 extern const struct xfs_btree_ops xfs_finobt_ops; 56 extern const struct xfs_btree_ops xfs_bmbt_ops; 57 extern const struct xfs_btree_ops xfs_refcountbt_ops; 58 extern const struct xfs_btree_ops xfs_rmapbt_ops; 59 extern const struct xfs_btree_ops xfs_rmapbt_mem_ops; 60 extern const struct xfs_btree_ops xfs_rtrmapbt_ops; 61 extern const struct xfs_btree_ops xfs_rtrmapbt_mem_ops; 62 extern const struct xfs_btree_ops xfs_rtrefcountbt_ops; 63 xfs_btree_is_bno(const struct xfs_btree_ops * ops)64static inline bool xfs_btree_is_bno(const struct xfs_btree_ops *ops) 65 { 66 return ops == &xfs_bnobt_ops; 67 } 68 xfs_btree_is_cnt(const struct xfs_btree_ops * ops)69static inline bool xfs_btree_is_cnt(const struct xfs_btree_ops *ops) 70 { 71 return ops == &xfs_cntbt_ops; 72 } 73 xfs_btree_is_bmap(const struct xfs_btree_ops * ops)74static inline bool xfs_btree_is_bmap(const struct xfs_btree_ops *ops) 75 { 76 return ops == &xfs_bmbt_ops; 77 } 78 xfs_btree_is_ino(const struct xfs_btree_ops * ops)79static inline bool xfs_btree_is_ino(const struct xfs_btree_ops *ops) 80 { 81 return ops == &xfs_inobt_ops; 82 } 83 xfs_btree_is_fino(const struct xfs_btree_ops * ops)84static inline bool xfs_btree_is_fino(const struct xfs_btree_ops *ops) 85 { 86 return ops == &xfs_finobt_ops; 87 } 88 xfs_btree_is_refcount(const struct xfs_btree_ops * ops)89static inline bool xfs_btree_is_refcount(const struct xfs_btree_ops *ops) 90 { 91 return ops == &xfs_refcountbt_ops; 92 } 93 xfs_btree_is_rmap(const struct xfs_btree_ops * ops)94static inline bool xfs_btree_is_rmap(const struct xfs_btree_ops *ops) 95 { 96 return ops == &xfs_rmapbt_ops; 97 } 98 99 #ifdef CONFIG_XFS_BTREE_IN_MEM xfs_btree_is_mem_rmap(const struct xfs_btree_ops * ops)100static inline bool xfs_btree_is_mem_rmap(const struct xfs_btree_ops *ops) 101 { 102 return ops == &xfs_rmapbt_mem_ops; 103 } 104 xfs_btree_is_mem_rtrmap(const struct xfs_btree_ops * ops)105static inline bool xfs_btree_is_mem_rtrmap(const struct xfs_btree_ops *ops) 106 { 107 return ops == &xfs_rtrmapbt_mem_ops; 108 } 109 #else 110 # define xfs_btree_is_mem_rmap(...) (false) 111 # define xfs_btree_is_mem_rtrmap(...) (false) 112 #endif 113 xfs_btree_is_rtrmap(const struct xfs_btree_ops * ops)114static inline bool xfs_btree_is_rtrmap(const struct xfs_btree_ops *ops) 115 { 116 return ops == &xfs_rtrmapbt_ops; 117 } 118 xfs_btree_is_rtrefcount(const struct xfs_btree_ops * ops)119static inline bool xfs_btree_is_rtrefcount(const struct xfs_btree_ops *ops) 120 { 121 return ops == &xfs_rtrefcountbt_ops; 122 } 123 124 /* log size calculation functions */ 125 int xfs_log_calc_unit_res(struct xfs_mount *mp, int unit_bytes); 126 int xfs_log_calc_minimum_size(struct xfs_mount *); 127 128 struct xfs_trans_res; 129 void xfs_log_get_max_trans_res(struct xfs_mount *mp, 130 struct xfs_trans_res *max_resp); 131 132 /* 133 * Values for t_flags. 134 */ 135 /* Transaction needs to be logged */ 136 #define XFS_TRANS_DIRTY (1u << 0) 137 /* Superblock is dirty and needs to be logged */ 138 #define XFS_TRANS_SB_DIRTY (1u << 1) 139 /* Transaction took a permanent log reservation */ 140 #define XFS_TRANS_PERM_LOG_RES (1u << 2) 141 /* Synchronous transaction commit needed */ 142 #define XFS_TRANS_SYNC (1u << 3) 143 /* Transaction can use reserve block pool */ 144 #define XFS_TRANS_RESERVE (1u << 4) 145 /* Transaction should avoid VFS level superblock write accounting */ 146 #define XFS_TRANS_NO_WRITECOUNT (1u << 5) 147 /* Transaction has freed blocks returned to it's reservation */ 148 #define XFS_TRANS_RES_FDBLKS (1u << 6) 149 /* Transaction contains an intent done log item */ 150 #define XFS_TRANS_HAS_INTENT_DONE (1u << 7) 151 /* 152 * LOWMODE is used by the allocator to activate the lowspace algorithm - when 153 * free space is running low the extent allocator may choose to allocate an 154 * extent from an AG without leaving sufficient space for a btree split when 155 * inserting the new extent. In this case the allocator will enable the 156 * lowspace algorithm which is supposed to allow further allocations (such as 157 * btree splits and newroots) to allocate from sequential AGs. In order to 158 * avoid locking AGs out of order the lowspace algorithm will start searching 159 * for free space from AG 0. If the correct transaction reservations have been 160 * made then this algorithm will eventually find all the space it needs. 161 */ 162 #define XFS_TRANS_LOWMODE (1u << 8) 163 164 /* Transaction has locked the rtbitmap and rtsum inodes */ 165 #define XFS_TRANS_RTBITMAP_LOCKED (1u << 9) 166 167 /* 168 * Field values for xfs_trans_mod_sb. 169 */ 170 #define XFS_TRANS_SB_ICOUNT 0x00000001 171 #define XFS_TRANS_SB_IFREE 0x00000002 172 #define XFS_TRANS_SB_FDBLOCKS 0x00000004 173 #define XFS_TRANS_SB_RES_FDBLOCKS 0x00000008 174 #define XFS_TRANS_SB_FREXTENTS 0x00000010 175 #define XFS_TRANS_SB_RES_FREXTENTS 0x00000020 176 #define XFS_TRANS_SB_DBLOCKS 0x00000040 177 #define XFS_TRANS_SB_AGCOUNT 0x00000080 178 #define XFS_TRANS_SB_IMAXPCT 0x00000100 179 #define XFS_TRANS_SB_REXTSIZE 0x00000200 180 #define XFS_TRANS_SB_RBMBLOCKS 0x00000400 181 #define XFS_TRANS_SB_RBLOCKS 0x00000800 182 #define XFS_TRANS_SB_REXTENTS 0x00001000 183 #define XFS_TRANS_SB_REXTSLOG 0x00002000 184 #define XFS_TRANS_SB_RGCOUNT 0x00004000 185 186 /* 187 * Here we centralize the specification of XFS meta-data buffer reference count 188 * values. This determines how hard the buffer cache tries to hold onto the 189 * buffer. 190 */ 191 #define XFS_AGF_REF 4 192 #define XFS_AGI_REF 4 193 #define XFS_AGFL_REF 3 194 #define XFS_INO_BTREE_REF 3 195 #define XFS_ALLOC_BTREE_REF 2 196 #define XFS_BMAP_BTREE_REF 2 197 #define XFS_RMAP_BTREE_REF 2 198 #define XFS_DIR_BTREE_REF 2 199 #define XFS_INO_REF 2 200 #define XFS_ATTR_BTREE_REF 1 201 #define XFS_DQUOT_REF 1 202 #define XFS_REFC_BTREE_REF 1 203 #define XFS_SSB_REF 0 204 205 /* Computed inode geometry for the filesystem. */ 206 struct xfs_ino_geometry { 207 /* Maximum inode count in this filesystem. */ 208 uint64_t maxicount; 209 210 /* Actual inode cluster buffer size, in bytes. */ 211 unsigned int inode_cluster_size; 212 213 /* 214 * Desired inode cluster buffer size, in bytes. This value is not 215 * rounded up to at least one filesystem block, which is necessary for 216 * the sole purpose of validating sb_spino_align. Runtime code must 217 * only ever use inode_cluster_size. 218 */ 219 unsigned int inode_cluster_size_raw; 220 221 /* Inode cluster sizes, adjusted to be at least 1 fsb. */ 222 unsigned int inodes_per_cluster; 223 unsigned int blocks_per_cluster; 224 225 /* Inode cluster alignment. */ 226 unsigned int cluster_align; 227 unsigned int cluster_align_inodes; 228 unsigned int inoalign_mask; /* mask sb_inoalignmt if used */ 229 230 unsigned int inobt_mxr[2]; /* max inobt btree records */ 231 unsigned int inobt_mnr[2]; /* min inobt btree records */ 232 unsigned int inobt_maxlevels; /* max inobt btree levels. */ 233 234 /* Size of inode allocations under normal operation. */ 235 unsigned int ialloc_inos; 236 unsigned int ialloc_blks; 237 238 /* Minimum inode blocks for a sparse allocation. */ 239 unsigned int ialloc_min_blks; 240 241 /* stripe unit inode alignment */ 242 unsigned int ialloc_align; 243 244 unsigned int agino_log; /* #bits for agino in inum */ 245 246 /* precomputed default inode attribute fork offset */ 247 unsigned int attr_fork_offset; 248 249 /* precomputed value for di_flags2 */ 250 uint64_t new_diflags2; 251 252 /* minimum folio order of a page cache allocation */ 253 unsigned int min_folio_order; 254 255 }; 256 257 #endif /* __XFS_SHARED_H__ */ 258