1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __SHMEM_FS_H
3 #define __SHMEM_FS_H
4
5 #include <linux/file.h>
6 #include <linux/swap.h>
7 #include <linux/mempolicy.h>
8 #include <linux/pagemap.h>
9 #include <linux/percpu_counter.h>
10 #include <linux/xattr.h>
11 #include <linux/fs_parser.h>
12 #include <linux/userfaultfd_k.h>
13 #include <linux/bits.h>
14
15 struct swap_iocb;
16
17 /* inode in-kernel data */
18
19 #ifdef CONFIG_TMPFS_QUOTA
20 #define SHMEM_MAXQUOTAS 2
21 #endif
22
23 /* Suppress pre-accounting of the entire object size. */
24 #define SHMEM_F_NORESERVE BIT(0)
25 /* Disallow swapping. */
26 #define SHMEM_F_LOCKED BIT(1)
27 /*
28 * Disallow growing, shrinking, or hole punching in the inode. Combined with
29 * folio pinning, makes sure the inode's mapping stays fixed.
30 *
31 * In some ways similar to F_SEAL_GROW | F_SEAL_SHRINK, but can be removed and
32 * isn't directly visible to userspace.
33 */
34 #define SHMEM_F_MAPPING_FROZEN BIT(2)
35
36 struct shmem_inode_info {
37 spinlock_t lock;
38 unsigned int seals; /* shmem seals */
39 unsigned long flags;
40 unsigned long alloced; /* data pages alloced to file */
41 unsigned long swapped; /* subtotal assigned to swap */
42 union {
43 struct offset_ctx dir_offsets; /* stable directory offsets */
44 struct {
45 struct list_head shrinklist; /* shrinkable hpage inodes */
46 struct list_head swaplist; /* chain of maybes on swap */
47 };
48 };
49 struct timespec64 i_crtime; /* file creation time */
50 struct shared_policy policy; /* NUMA memory alloc policy */
51 struct simple_xattrs xattrs; /* list of xattrs */
52 pgoff_t fallocend; /* highest fallocate endindex */
53 unsigned int fsflags; /* for FS_IOC_[SG]ETFLAGS */
54 atomic_t stop_eviction; /* hold when working on inode */
55 #ifdef CONFIG_TMPFS_QUOTA
56 struct dquot __rcu *i_dquot[MAXQUOTAS];
57 #endif
58 struct inode vfs_inode;
59 };
60
61 #define SHMEM_FL_USER_VISIBLE (FS_FL_USER_VISIBLE | FS_CASEFOLD_FL)
62 #define SHMEM_FL_USER_MODIFIABLE \
63 (FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL | FS_NOATIME_FL | FS_CASEFOLD_FL)
64 #define SHMEM_FL_INHERITED (FS_NODUMP_FL | FS_NOATIME_FL | FS_CASEFOLD_FL)
65
66 struct shmem_quota_limits {
67 qsize_t usrquota_bhardlimit; /* Default user quota block hard limit */
68 qsize_t usrquota_ihardlimit; /* Default user quota inode hard limit */
69 qsize_t grpquota_bhardlimit; /* Default group quota block hard limit */
70 qsize_t grpquota_ihardlimit; /* Default group quota inode hard limit */
71 };
72
73 struct shmem_sb_info {
74 unsigned long max_blocks; /* How many blocks are allowed */
75 struct percpu_counter used_blocks; /* How many are allocated */
76 unsigned long max_inodes; /* How many inodes are allowed */
77 unsigned long free_ispace; /* How much ispace left for allocation */
78 raw_spinlock_t stat_lock; /* Serialize shmem_sb_info changes */
79 umode_t mode; /* Mount mode for root directory */
80 unsigned char huge; /* Whether to try for hugepages */
81 kuid_t uid; /* Mount uid for root directory */
82 kgid_t gid; /* Mount gid for root directory */
83 bool full_inums; /* If i_ino should be uint or ino_t */
84 bool noswap; /* ignores VM reclaim / swap requests */
85 ino_t next_ino; /* The next per-sb inode number to use */
86 ino_t __percpu *ino_batch; /* The next per-cpu inode number to use */
87 struct mempolicy *mpol; /* default memory policy for mappings */
88 spinlock_t shrinklist_lock; /* Protects shrinklist */
89 struct list_head shrinklist; /* List of shinkable inodes */
90 unsigned long shrinklist_len; /* Length of shrinklist */
91 struct shmem_quota_limits qlimits; /* Default quota limits */
92 };
93
SHMEM_I(struct inode * inode)94 static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
95 {
96 return container_of(inode, struct shmem_inode_info, vfs_inode);
97 }
98
99 /*
100 * Functions in mm/shmem.c called directly from elsewhere:
101 */
102 extern const struct fs_parameter_spec shmem_fs_parameters[];
103 extern void shmem_init(void);
104 extern int shmem_init_fs_context(struct fs_context *fc);
105 struct file *shmem_file_setup(const char *name, loff_t size, vma_flags_t flags);
106 struct file *shmem_kernel_file_setup(const char *name, loff_t size, vma_flags_t vma_flags);
107 extern struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt,
108 const char *name, loff_t size, vma_flags_t flags);
109 int shmem_zero_setup(struct vm_area_struct *vma);
110 int shmem_zero_setup_desc(struct vm_area_desc *desc);
111 extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr,
112 unsigned long len, unsigned long pgoff, unsigned long flags);
113 extern int shmem_lock(struct file *file, int lock, struct ucounts *ucounts);
114 #ifdef CONFIG_SHMEM
115 bool shmem_mapping(const struct address_space *mapping);
116 #else
shmem_mapping(const struct address_space * mapping)117 static inline bool shmem_mapping(const struct address_space *mapping)
118 {
119 return false;
120 }
121 #endif /* CONFIG_SHMEM */
122 void shmem_unlock_mapping(struct address_space *mapping);
123 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
124 pgoff_t index, gfp_t gfp_mask);
125 int shmem_writeout(struct folio *folio, struct swap_iocb **plug,
126 struct list_head *folio_list);
127 void shmem_truncate_range(struct inode *inode, loff_t start, uoff_t end);
128 int shmem_unuse(unsigned int type);
129
130 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
131 unsigned long shmem_allowable_huge_orders(struct inode *inode,
132 struct vm_area_struct *vma, pgoff_t index,
133 loff_t write_end, bool shmem_huge_force);
134 bool shmem_hpage_pmd_enabled(void);
135 #else
shmem_allowable_huge_orders(struct inode * inode,struct vm_area_struct * vma,pgoff_t index,loff_t write_end,bool shmem_huge_force)136 static inline unsigned long shmem_allowable_huge_orders(struct inode *inode,
137 struct vm_area_struct *vma, pgoff_t index,
138 loff_t write_end, bool shmem_huge_force)
139 {
140 return 0;
141 }
142
shmem_hpage_pmd_enabled(void)143 static inline bool shmem_hpage_pmd_enabled(void)
144 {
145 return false;
146 }
147 #endif
148
149 #ifdef CONFIG_SHMEM
150 extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
151 extern void shmem_uncharge(struct inode *inode, long pages);
152 #else
shmem_swap_usage(struct vm_area_struct * vma)153 static inline unsigned long shmem_swap_usage(struct vm_area_struct *vma)
154 {
155 return 0;
156 }
157
shmem_uncharge(struct inode * inode,long pages)158 static inline void shmem_uncharge(struct inode *inode, long pages)
159 {
160 }
161 #endif
162 extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
163 pgoff_t start, pgoff_t end);
164
165 /* Flag allocation requirements to shmem_get_folio */
166 enum sgp_type {
167 SGP_READ, /* don't exceed i_size, don't allocate page */
168 SGP_NOALLOC, /* similar, but fail on hole or use fallocated page */
169 SGP_CACHE, /* don't exceed i_size, may allocate page */
170 SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */
171 SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */
172 };
173
174 int shmem_get_folio(struct inode *inode, pgoff_t index, loff_t write_end,
175 struct folio **foliop, enum sgp_type sgp);
176 struct folio *shmem_read_folio_gfp(struct address_space *mapping,
177 pgoff_t index, gfp_t gfp);
178
shmem_read_folio(struct address_space * mapping,pgoff_t index)179 static inline struct folio *shmem_read_folio(struct address_space *mapping,
180 pgoff_t index)
181 {
182 return shmem_read_folio_gfp(mapping, index, mapping_gfp_mask(mapping));
183 }
184
shmem_read_mapping_page(struct address_space * mapping,pgoff_t index)185 static inline struct page *shmem_read_mapping_page(
186 struct address_space *mapping, pgoff_t index)
187 {
188 return shmem_read_mapping_page_gfp(mapping, index,
189 mapping_gfp_mask(mapping));
190 }
191
shmem_file(struct file * file)192 static inline bool shmem_file(struct file *file)
193 {
194 if (!IS_ENABLED(CONFIG_SHMEM))
195 return false;
196 if (!file || !file->f_mapping)
197 return false;
198 return shmem_mapping(file->f_mapping);
199 }
200
201 /* Must be called with inode lock taken exclusive. */
shmem_freeze(struct inode * inode,bool freeze)202 static inline void shmem_freeze(struct inode *inode, bool freeze)
203 {
204 if (freeze)
205 SHMEM_I(inode)->flags |= SHMEM_F_MAPPING_FROZEN;
206 else
207 SHMEM_I(inode)->flags &= ~SHMEM_F_MAPPING_FROZEN;
208 }
209
210 /*
211 * If fallocate(FALLOC_FL_KEEP_SIZE) has been used, there may be pages
212 * beyond i_size's notion of EOF, which fallocate has committed to reserving:
213 * which split_huge_page() must therefore not delete. This use of a single
214 * "fallocend" per inode errs on the side of not deleting a reservation when
215 * in doubt: there are plenty of cases when it preserves unreserved pages.
216 */
shmem_fallocend(struct inode * inode,pgoff_t eof)217 static inline pgoff_t shmem_fallocend(struct inode *inode, pgoff_t eof)
218 {
219 return max(eof, SHMEM_I(inode)->fallocend);
220 }
221
222 extern bool shmem_charge(struct inode *inode, long pages);
223
224 #ifdef CONFIG_USERFAULTFD
225 #ifdef CONFIG_SHMEM
226 extern int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
227 struct vm_area_struct *dst_vma,
228 unsigned long dst_addr,
229 unsigned long src_addr,
230 uffd_flags_t flags,
231 struct folio **foliop);
232 #else /* !CONFIG_SHMEM */
233 #define shmem_mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, \
234 src_addr, flags, foliop) ({ BUG(); 0; })
235 #endif /* CONFIG_SHMEM */
236 #endif /* CONFIG_USERFAULTFD */
237
238 /*
239 * Used space is stored as unsigned 64-bit value in bytes but
240 * quota core supports only signed 64-bit values so use that
241 * as a limit
242 */
243 #define SHMEM_QUOTA_MAX_SPC_LIMIT 0x7fffffffffffffffLL /* 2^63-1 */
244 #define SHMEM_QUOTA_MAX_INO_LIMIT 0x7fffffffffffffffLL
245
246 #ifdef CONFIG_TMPFS_QUOTA
247 extern const struct dquot_operations shmem_quota_operations;
248 extern struct quota_format_type shmem_quota_format;
249 #endif /* CONFIG_TMPFS_QUOTA */
250
251 #endif
252