1 /*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.
9 * Copyright (C) 2002-2011 Hugh Dickins.
10 * Copyright (C) 2011 Google Inc.
11 * Copyright (C) 2002-2005 VERITAS Software Corporation.
12 * Copyright (C) 2004 Andi Kleen, SuSE Labs
13 *
14 * Extended attribute support for tmpfs:
15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17 *
18 * tiny-shmem:
19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20 *
21 * This file is released under the GPL.
22 */
23
24 #include <linux/fs.h>
25 #include <linux/init.h>
26 #include <linux/vfs.h>
27 #include <linux/mount.h>
28 #include <linux/ramfs.h>
29 #include <linux/pagemap.h>
30 #include <linux/file.h>
31 #include <linux/fileattr.h>
32 #include <linux/mm.h>
33 #include <linux/random.h>
34 #include <linux/sched/signal.h>
35 #include <linux/export.h>
36 #include <linux/shmem_fs.h>
37 #include <linux/swap.h>
38 #include <linux/uio.h>
39 #include <linux/hugetlb.h>
40 #include <linux/fs_parser.h>
41 #include <linux/swapfile.h>
42 #include <linux/iversion.h>
43 #include <linux/unicode.h>
44 #include "swap.h"
45
46 static struct vfsmount *shm_mnt __ro_after_init;
47
48 #ifdef CONFIG_SHMEM
49 /*
50 * This virtual memory filesystem is heavily based on the ramfs. It
51 * extends ramfs by the ability to use swap and honor resource limits
52 * which makes it a completely usable filesystem.
53 */
54
55 #include <linux/xattr.h>
56 #include <linux/exportfs.h>
57 #include <linux/posix_acl.h>
58 #include <linux/posix_acl_xattr.h>
59 #include <linux/mman.h>
60 #include <linux/string.h>
61 #include <linux/slab.h>
62 #include <linux/backing-dev.h>
63 #include <linux/writeback.h>
64 #include <linux/pagevec.h>
65 #include <linux/percpu_counter.h>
66 #include <linux/falloc.h>
67 #include <linux/splice.h>
68 #include <linux/security.h>
69 #include <linux/swapops.h>
70 #include <linux/mempolicy.h>
71 #include <linux/namei.h>
72 #include <linux/ctype.h>
73 #include <linux/migrate.h>
74 #include <linux/highmem.h>
75 #include <linux/seq_file.h>
76 #include <linux/magic.h>
77 #include <linux/syscalls.h>
78 #include <linux/fcntl.h>
79 #include <uapi/linux/memfd.h>
80 #include <linux/rmap.h>
81 #include <linux/uuid.h>
82 #include <linux/quotaops.h>
83 #include <linux/rcupdate_wait.h>
84
85 #include <linux/uaccess.h>
86
87 #include "internal.h"
88
89 #define BLOCKS_PER_PAGE (PAGE_SIZE/512)
90 #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
91
92 /* Pretend that each entry is of this size in directory's i_size */
93 #define BOGO_DIRENT_SIZE 20
94
95 /* Pretend that one inode + its dentry occupy this much memory */
96 #define BOGO_INODE_SIZE 1024
97
98 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
99 #define SHORT_SYMLINK_LEN 128
100
101 /*
102 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
103 * inode->i_private (with i_rwsem making sure that it has only one user at
104 * a time): we would prefer not to enlarge the shmem inode just for that.
105 */
106 struct shmem_falloc {
107 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
108 pgoff_t start; /* start of range currently being fallocated */
109 pgoff_t next; /* the next page offset to be fallocated */
110 pgoff_t nr_falloced; /* how many new pages have been fallocated */
111 pgoff_t nr_unswapped; /* how often writepage refused to swap out */
112 };
113
114 struct shmem_options {
115 unsigned long long blocks;
116 unsigned long long inodes;
117 struct mempolicy *mpol;
118 kuid_t uid;
119 kgid_t gid;
120 umode_t mode;
121 bool full_inums;
122 int huge;
123 int seen;
124 bool noswap;
125 unsigned short quota_types;
126 struct shmem_quota_limits qlimits;
127 #if IS_ENABLED(CONFIG_UNICODE)
128 struct unicode_map *encoding;
129 bool strict_encoding;
130 #endif
131 #define SHMEM_SEEN_BLOCKS 1
132 #define SHMEM_SEEN_INODES 2
133 #define SHMEM_SEEN_HUGE 4
134 #define SHMEM_SEEN_INUMS 8
135 #define SHMEM_SEEN_NOSWAP 16
136 #define SHMEM_SEEN_QUOTA 32
137 };
138
139 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
140 static unsigned long huge_shmem_orders_always __read_mostly;
141 static unsigned long huge_shmem_orders_madvise __read_mostly;
142 static unsigned long huge_shmem_orders_inherit __read_mostly;
143 static unsigned long huge_shmem_orders_within_size __read_mostly;
144 static bool shmem_orders_configured __initdata;
145 #endif
146
147 #ifdef CONFIG_TMPFS
shmem_default_max_blocks(void)148 static unsigned long shmem_default_max_blocks(void)
149 {
150 return totalram_pages() / 2;
151 }
152
shmem_default_max_inodes(void)153 static unsigned long shmem_default_max_inodes(void)
154 {
155 unsigned long nr_pages = totalram_pages();
156
157 return min3(nr_pages - totalhigh_pages(), nr_pages / 2,
158 ULONG_MAX / BOGO_INODE_SIZE);
159 }
160 #endif
161
162 static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
163 struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
164 struct vm_area_struct *vma, vm_fault_t *fault_type);
165
SHMEM_SB(struct super_block * sb)166 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
167 {
168 return sb->s_fs_info;
169 }
170
171 /*
172 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
173 * for shared memory and for shared anonymous (/dev/zero) mappings
174 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
175 * consistent with the pre-accounting of private mappings ...
176 */
shmem_acct_size(unsigned long flags,loff_t size)177 static inline int shmem_acct_size(unsigned long flags, loff_t size)
178 {
179 return (flags & VM_NORESERVE) ?
180 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
181 }
182
shmem_unacct_size(unsigned long flags,loff_t size)183 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
184 {
185 if (!(flags & VM_NORESERVE))
186 vm_unacct_memory(VM_ACCT(size));
187 }
188
shmem_reacct_size(unsigned long flags,loff_t oldsize,loff_t newsize)189 static inline int shmem_reacct_size(unsigned long flags,
190 loff_t oldsize, loff_t newsize)
191 {
192 if (!(flags & VM_NORESERVE)) {
193 if (VM_ACCT(newsize) > VM_ACCT(oldsize))
194 return security_vm_enough_memory_mm(current->mm,
195 VM_ACCT(newsize) - VM_ACCT(oldsize));
196 else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
197 vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
198 }
199 return 0;
200 }
201
202 /*
203 * ... whereas tmpfs objects are accounted incrementally as
204 * pages are allocated, in order to allow large sparse files.
205 * shmem_get_folio reports shmem_acct_blocks failure as -ENOSPC not -ENOMEM,
206 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
207 */
shmem_acct_blocks(unsigned long flags,long pages)208 static inline int shmem_acct_blocks(unsigned long flags, long pages)
209 {
210 if (!(flags & VM_NORESERVE))
211 return 0;
212
213 return security_vm_enough_memory_mm(current->mm,
214 pages * VM_ACCT(PAGE_SIZE));
215 }
216
shmem_unacct_blocks(unsigned long flags,long pages)217 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
218 {
219 if (flags & VM_NORESERVE)
220 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
221 }
222
shmem_inode_acct_blocks(struct inode * inode,long pages)223 static int shmem_inode_acct_blocks(struct inode *inode, long pages)
224 {
225 struct shmem_inode_info *info = SHMEM_I(inode);
226 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
227 int err = -ENOSPC;
228
229 if (shmem_acct_blocks(info->flags, pages))
230 return err;
231
232 might_sleep(); /* when quotas */
233 if (sbinfo->max_blocks) {
234 if (!percpu_counter_limited_add(&sbinfo->used_blocks,
235 sbinfo->max_blocks, pages))
236 goto unacct;
237
238 err = dquot_alloc_block_nodirty(inode, pages);
239 if (err) {
240 percpu_counter_sub(&sbinfo->used_blocks, pages);
241 goto unacct;
242 }
243 } else {
244 err = dquot_alloc_block_nodirty(inode, pages);
245 if (err)
246 goto unacct;
247 }
248
249 return 0;
250
251 unacct:
252 shmem_unacct_blocks(info->flags, pages);
253 return err;
254 }
255
shmem_inode_unacct_blocks(struct inode * inode,long pages)256 static void shmem_inode_unacct_blocks(struct inode *inode, long pages)
257 {
258 struct shmem_inode_info *info = SHMEM_I(inode);
259 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
260
261 might_sleep(); /* when quotas */
262 dquot_free_block_nodirty(inode, pages);
263
264 if (sbinfo->max_blocks)
265 percpu_counter_sub(&sbinfo->used_blocks, pages);
266 shmem_unacct_blocks(info->flags, pages);
267 }
268
269 static const struct super_operations shmem_ops;
270 static const struct address_space_operations shmem_aops;
271 static const struct file_operations shmem_file_operations;
272 static const struct inode_operations shmem_inode_operations;
273 static const struct inode_operations shmem_dir_inode_operations;
274 static const struct inode_operations shmem_special_inode_operations;
275 static const struct vm_operations_struct shmem_vm_ops;
276 static const struct vm_operations_struct shmem_anon_vm_ops;
277 static struct file_system_type shmem_fs_type;
278
shmem_mapping(struct address_space * mapping)279 bool shmem_mapping(struct address_space *mapping)
280 {
281 return mapping->a_ops == &shmem_aops;
282 }
283 EXPORT_SYMBOL_GPL(shmem_mapping);
284
vma_is_anon_shmem(struct vm_area_struct * vma)285 bool vma_is_anon_shmem(struct vm_area_struct *vma)
286 {
287 return vma->vm_ops == &shmem_anon_vm_ops;
288 }
289
vma_is_shmem(struct vm_area_struct * vma)290 bool vma_is_shmem(struct vm_area_struct *vma)
291 {
292 return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops;
293 }
294
295 static LIST_HEAD(shmem_swaplist);
296 static DEFINE_MUTEX(shmem_swaplist_mutex);
297
298 #ifdef CONFIG_TMPFS_QUOTA
299
shmem_enable_quotas(struct super_block * sb,unsigned short quota_types)300 static int shmem_enable_quotas(struct super_block *sb,
301 unsigned short quota_types)
302 {
303 int type, err = 0;
304
305 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
306 for (type = 0; type < SHMEM_MAXQUOTAS; type++) {
307 if (!(quota_types & (1 << type)))
308 continue;
309 err = dquot_load_quota_sb(sb, type, QFMT_SHMEM,
310 DQUOT_USAGE_ENABLED |
311 DQUOT_LIMITS_ENABLED);
312 if (err)
313 goto out_err;
314 }
315 return 0;
316
317 out_err:
318 pr_warn("tmpfs: failed to enable quota tracking (type=%d, err=%d)\n",
319 type, err);
320 for (type--; type >= 0; type--)
321 dquot_quota_off(sb, type);
322 return err;
323 }
324
shmem_disable_quotas(struct super_block * sb)325 static void shmem_disable_quotas(struct super_block *sb)
326 {
327 int type;
328
329 for (type = 0; type < SHMEM_MAXQUOTAS; type++)
330 dquot_quota_off(sb, type);
331 }
332
shmem_get_dquots(struct inode * inode)333 static struct dquot __rcu **shmem_get_dquots(struct inode *inode)
334 {
335 return SHMEM_I(inode)->i_dquot;
336 }
337 #endif /* CONFIG_TMPFS_QUOTA */
338
339 /*
340 * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
341 * produces a novel ino for the newly allocated inode.
342 *
343 * It may also be called when making a hard link to permit the space needed by
344 * each dentry. However, in that case, no new inode number is needed since that
345 * internally draws from another pool of inode numbers (currently global
346 * get_next_ino()). This case is indicated by passing NULL as inop.
347 */
348 #define SHMEM_INO_BATCH 1024
shmem_reserve_inode(struct super_block * sb,ino_t * inop)349 static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
350 {
351 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
352 ino_t ino;
353
354 if (!(sb->s_flags & SB_KERNMOUNT)) {
355 raw_spin_lock(&sbinfo->stat_lock);
356 if (sbinfo->max_inodes) {
357 if (sbinfo->free_ispace < BOGO_INODE_SIZE) {
358 raw_spin_unlock(&sbinfo->stat_lock);
359 return -ENOSPC;
360 }
361 sbinfo->free_ispace -= BOGO_INODE_SIZE;
362 }
363 if (inop) {
364 ino = sbinfo->next_ino++;
365 if (unlikely(is_zero_ino(ino)))
366 ino = sbinfo->next_ino++;
367 if (unlikely(!sbinfo->full_inums &&
368 ino > UINT_MAX)) {
369 /*
370 * Emulate get_next_ino uint wraparound for
371 * compatibility
372 */
373 if (IS_ENABLED(CONFIG_64BIT))
374 pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
375 __func__, MINOR(sb->s_dev));
376 sbinfo->next_ino = 1;
377 ino = sbinfo->next_ino++;
378 }
379 *inop = ino;
380 }
381 raw_spin_unlock(&sbinfo->stat_lock);
382 } else if (inop) {
383 /*
384 * __shmem_file_setup, one of our callers, is lock-free: it
385 * doesn't hold stat_lock in shmem_reserve_inode since
386 * max_inodes is always 0, and is called from potentially
387 * unknown contexts. As such, use a per-cpu batched allocator
388 * which doesn't require the per-sb stat_lock unless we are at
389 * the batch boundary.
390 *
391 * We don't need to worry about inode{32,64} since SB_KERNMOUNT
392 * shmem mounts are not exposed to userspace, so we don't need
393 * to worry about things like glibc compatibility.
394 */
395 ino_t *next_ino;
396
397 next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
398 ino = *next_ino;
399 if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
400 raw_spin_lock(&sbinfo->stat_lock);
401 ino = sbinfo->next_ino;
402 sbinfo->next_ino += SHMEM_INO_BATCH;
403 raw_spin_unlock(&sbinfo->stat_lock);
404 if (unlikely(is_zero_ino(ino)))
405 ino++;
406 }
407 *inop = ino;
408 *next_ino = ++ino;
409 put_cpu();
410 }
411
412 return 0;
413 }
414
shmem_free_inode(struct super_block * sb,size_t freed_ispace)415 static void shmem_free_inode(struct super_block *sb, size_t freed_ispace)
416 {
417 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
418 if (sbinfo->max_inodes) {
419 raw_spin_lock(&sbinfo->stat_lock);
420 sbinfo->free_ispace += BOGO_INODE_SIZE + freed_ispace;
421 raw_spin_unlock(&sbinfo->stat_lock);
422 }
423 }
424
425 /**
426 * shmem_recalc_inode - recalculate the block usage of an inode
427 * @inode: inode to recalc
428 * @alloced: the change in number of pages allocated to inode
429 * @swapped: the change in number of pages swapped from inode
430 *
431 * We have to calculate the free blocks since the mm can drop
432 * undirtied hole pages behind our back.
433 *
434 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
435 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
436 */
shmem_recalc_inode(struct inode * inode,long alloced,long swapped)437 static void shmem_recalc_inode(struct inode *inode, long alloced, long swapped)
438 {
439 struct shmem_inode_info *info = SHMEM_I(inode);
440 long freed;
441
442 spin_lock(&info->lock);
443 info->alloced += alloced;
444 info->swapped += swapped;
445 freed = info->alloced - info->swapped -
446 READ_ONCE(inode->i_mapping->nrpages);
447 /*
448 * Special case: whereas normally shmem_recalc_inode() is called
449 * after i_mapping->nrpages has already been adjusted (up or down),
450 * shmem_writepage() has to raise swapped before nrpages is lowered -
451 * to stop a racing shmem_recalc_inode() from thinking that a page has
452 * been freed. Compensate here, to avoid the need for a followup call.
453 */
454 if (swapped > 0)
455 freed += swapped;
456 if (freed > 0)
457 info->alloced -= freed;
458 spin_unlock(&info->lock);
459
460 /* The quota case may block */
461 if (freed > 0)
462 shmem_inode_unacct_blocks(inode, freed);
463 }
464
shmem_charge(struct inode * inode,long pages)465 bool shmem_charge(struct inode *inode, long pages)
466 {
467 struct address_space *mapping = inode->i_mapping;
468
469 if (shmem_inode_acct_blocks(inode, pages))
470 return false;
471
472 /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
473 xa_lock_irq(&mapping->i_pages);
474 mapping->nrpages += pages;
475 xa_unlock_irq(&mapping->i_pages);
476
477 shmem_recalc_inode(inode, pages, 0);
478 return true;
479 }
480
shmem_uncharge(struct inode * inode,long pages)481 void shmem_uncharge(struct inode *inode, long pages)
482 {
483 /* pages argument is currently unused: keep it to help debugging */
484 /* nrpages adjustment done by __filemap_remove_folio() or caller */
485
486 shmem_recalc_inode(inode, 0, 0);
487 }
488
489 /*
490 * Replace item expected in xarray by a new item, while holding xa_lock.
491 */
shmem_replace_entry(struct address_space * mapping,pgoff_t index,void * expected,void * replacement)492 static int shmem_replace_entry(struct address_space *mapping,
493 pgoff_t index, void *expected, void *replacement)
494 {
495 XA_STATE(xas, &mapping->i_pages, index);
496 void *item;
497
498 VM_BUG_ON(!expected);
499 VM_BUG_ON(!replacement);
500 item = xas_load(&xas);
501 if (item != expected)
502 return -ENOENT;
503 xas_store(&xas, replacement);
504 return 0;
505 }
506
507 /*
508 * Sometimes, before we decide whether to proceed or to fail, we must check
509 * that an entry was not already brought back from swap by a racing thread.
510 *
511 * Checking folio is not enough: by the time a swapcache folio is locked, it
512 * might be reused, and again be swapcache, using the same swap as before.
513 */
shmem_confirm_swap(struct address_space * mapping,pgoff_t index,swp_entry_t swap)514 static bool shmem_confirm_swap(struct address_space *mapping,
515 pgoff_t index, swp_entry_t swap)
516 {
517 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
518 }
519
520 /*
521 * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
522 *
523 * SHMEM_HUGE_NEVER:
524 * disables huge pages for the mount;
525 * SHMEM_HUGE_ALWAYS:
526 * enables huge pages for the mount;
527 * SHMEM_HUGE_WITHIN_SIZE:
528 * only allocate huge pages if the page will be fully within i_size,
529 * also respect fadvise()/madvise() hints;
530 * SHMEM_HUGE_ADVISE:
531 * only allocate huge pages if requested with fadvise()/madvise();
532 */
533
534 #define SHMEM_HUGE_NEVER 0
535 #define SHMEM_HUGE_ALWAYS 1
536 #define SHMEM_HUGE_WITHIN_SIZE 2
537 #define SHMEM_HUGE_ADVISE 3
538
539 /*
540 * Special values.
541 * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
542 *
543 * SHMEM_HUGE_DENY:
544 * disables huge on shm_mnt and all mounts, for emergency use;
545 * SHMEM_HUGE_FORCE:
546 * enables huge on shm_mnt and all mounts, w/o needing option, for testing;
547 *
548 */
549 #define SHMEM_HUGE_DENY (-1)
550 #define SHMEM_HUGE_FORCE (-2)
551
552 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
553 /* ifdef here to avoid bloating shmem.o when not necessary */
554
555 static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
556
shmem_huge_global_enabled(struct inode * inode,pgoff_t index,loff_t write_end,bool shmem_huge_force,unsigned long vm_flags)557 static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
558 loff_t write_end, bool shmem_huge_force,
559 unsigned long vm_flags)
560 {
561 loff_t i_size;
562
563 if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
564 return false;
565 if (!S_ISREG(inode->i_mode))
566 return false;
567 if (shmem_huge == SHMEM_HUGE_DENY)
568 return false;
569 if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
570 return true;
571
572 switch (SHMEM_SB(inode->i_sb)->huge) {
573 case SHMEM_HUGE_ALWAYS:
574 return true;
575 case SHMEM_HUGE_WITHIN_SIZE:
576 index = round_up(index + 1, HPAGE_PMD_NR);
577 i_size = max(write_end, i_size_read(inode));
578 i_size = round_up(i_size, PAGE_SIZE);
579 if (i_size >> PAGE_SHIFT >= index)
580 return true;
581 fallthrough;
582 case SHMEM_HUGE_ADVISE:
583 if (vm_flags & VM_HUGEPAGE)
584 return true;
585 fallthrough;
586 default:
587 return false;
588 }
589 }
590
shmem_parse_huge(const char * str)591 static int shmem_parse_huge(const char *str)
592 {
593 int huge;
594
595 if (!str)
596 return -EINVAL;
597
598 if (!strcmp(str, "never"))
599 huge = SHMEM_HUGE_NEVER;
600 else if (!strcmp(str, "always"))
601 huge = SHMEM_HUGE_ALWAYS;
602 else if (!strcmp(str, "within_size"))
603 huge = SHMEM_HUGE_WITHIN_SIZE;
604 else if (!strcmp(str, "advise"))
605 huge = SHMEM_HUGE_ADVISE;
606 else if (!strcmp(str, "deny"))
607 huge = SHMEM_HUGE_DENY;
608 else if (!strcmp(str, "force"))
609 huge = SHMEM_HUGE_FORCE;
610 else
611 return -EINVAL;
612
613 if (!has_transparent_hugepage() &&
614 huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
615 return -EINVAL;
616
617 /* Do not override huge allocation policy with non-PMD sized mTHP */
618 if (huge == SHMEM_HUGE_FORCE &&
619 huge_shmem_orders_inherit != BIT(HPAGE_PMD_ORDER))
620 return -EINVAL;
621
622 return huge;
623 }
624
625 #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
shmem_format_huge(int huge)626 static const char *shmem_format_huge(int huge)
627 {
628 switch (huge) {
629 case SHMEM_HUGE_NEVER:
630 return "never";
631 case SHMEM_HUGE_ALWAYS:
632 return "always";
633 case SHMEM_HUGE_WITHIN_SIZE:
634 return "within_size";
635 case SHMEM_HUGE_ADVISE:
636 return "advise";
637 case SHMEM_HUGE_DENY:
638 return "deny";
639 case SHMEM_HUGE_FORCE:
640 return "force";
641 default:
642 VM_BUG_ON(1);
643 return "bad_val";
644 }
645 }
646 #endif
647
shmem_unused_huge_shrink(struct shmem_sb_info * sbinfo,struct shrink_control * sc,unsigned long nr_to_free)648 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
649 struct shrink_control *sc, unsigned long nr_to_free)
650 {
651 LIST_HEAD(list), *pos, *next;
652 struct inode *inode;
653 struct shmem_inode_info *info;
654 struct folio *folio;
655 unsigned long batch = sc ? sc->nr_to_scan : 128;
656 unsigned long split = 0, freed = 0;
657
658 if (list_empty(&sbinfo->shrinklist))
659 return SHRINK_STOP;
660
661 spin_lock(&sbinfo->shrinklist_lock);
662 list_for_each_safe(pos, next, &sbinfo->shrinklist) {
663 info = list_entry(pos, struct shmem_inode_info, shrinklist);
664
665 /* pin the inode */
666 inode = igrab(&info->vfs_inode);
667
668 /* inode is about to be evicted */
669 if (!inode) {
670 list_del_init(&info->shrinklist);
671 goto next;
672 }
673
674 list_move(&info->shrinklist, &list);
675 next:
676 sbinfo->shrinklist_len--;
677 if (!--batch)
678 break;
679 }
680 spin_unlock(&sbinfo->shrinklist_lock);
681
682 list_for_each_safe(pos, next, &list) {
683 pgoff_t next, end;
684 loff_t i_size;
685 int ret;
686
687 info = list_entry(pos, struct shmem_inode_info, shrinklist);
688 inode = &info->vfs_inode;
689
690 if (nr_to_free && freed >= nr_to_free)
691 goto move_back;
692
693 i_size = i_size_read(inode);
694 folio = filemap_get_entry(inode->i_mapping, i_size / PAGE_SIZE);
695 if (!folio || xa_is_value(folio))
696 goto drop;
697
698 /* No large folio at the end of the file: nothing to split */
699 if (!folio_test_large(folio)) {
700 folio_put(folio);
701 goto drop;
702 }
703
704 /* Check if there is anything to gain from splitting */
705 next = folio_next_index(folio);
706 end = shmem_fallocend(inode, DIV_ROUND_UP(i_size, PAGE_SIZE));
707 if (end <= folio->index || end >= next) {
708 folio_put(folio);
709 goto drop;
710 }
711
712 /*
713 * Move the inode on the list back to shrinklist if we failed
714 * to lock the page at this time.
715 *
716 * Waiting for the lock may lead to deadlock in the
717 * reclaim path.
718 */
719 if (!folio_trylock(folio)) {
720 folio_put(folio);
721 goto move_back;
722 }
723
724 ret = split_folio(folio);
725 folio_unlock(folio);
726 folio_put(folio);
727
728 /* If split failed move the inode on the list back to shrinklist */
729 if (ret)
730 goto move_back;
731
732 freed += next - end;
733 split++;
734 drop:
735 list_del_init(&info->shrinklist);
736 goto put;
737 move_back:
738 /*
739 * Make sure the inode is either on the global list or deleted
740 * from any local list before iput() since it could be deleted
741 * in another thread once we put the inode (then the local list
742 * is corrupted).
743 */
744 spin_lock(&sbinfo->shrinklist_lock);
745 list_move(&info->shrinklist, &sbinfo->shrinklist);
746 sbinfo->shrinklist_len++;
747 spin_unlock(&sbinfo->shrinklist_lock);
748 put:
749 iput(inode);
750 }
751
752 return split;
753 }
754
shmem_unused_huge_scan(struct super_block * sb,struct shrink_control * sc)755 static long shmem_unused_huge_scan(struct super_block *sb,
756 struct shrink_control *sc)
757 {
758 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
759
760 if (!READ_ONCE(sbinfo->shrinklist_len))
761 return SHRINK_STOP;
762
763 return shmem_unused_huge_shrink(sbinfo, sc, 0);
764 }
765
shmem_unused_huge_count(struct super_block * sb,struct shrink_control * sc)766 static long shmem_unused_huge_count(struct super_block *sb,
767 struct shrink_control *sc)
768 {
769 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
770 return READ_ONCE(sbinfo->shrinklist_len);
771 }
772 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
773
774 #define shmem_huge SHMEM_HUGE_DENY
775
shmem_unused_huge_shrink(struct shmem_sb_info * sbinfo,struct shrink_control * sc,unsigned long nr_to_free)776 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
777 struct shrink_control *sc, unsigned long nr_to_free)
778 {
779 return 0;
780 }
781
shmem_huge_global_enabled(struct inode * inode,pgoff_t index,loff_t write_end,bool shmem_huge_force,unsigned long vm_flags)782 static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
783 loff_t write_end, bool shmem_huge_force,
784 unsigned long vm_flags)
785 {
786 return false;
787 }
788 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
789
shmem_update_stats(struct folio * folio,int nr_pages)790 static void shmem_update_stats(struct folio *folio, int nr_pages)
791 {
792 if (folio_test_pmd_mappable(folio))
793 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr_pages);
794 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
795 __lruvec_stat_mod_folio(folio, NR_SHMEM, nr_pages);
796 }
797
798 /*
799 * Somewhat like filemap_add_folio, but error if expected item has gone.
800 */
shmem_add_to_page_cache(struct folio * folio,struct address_space * mapping,pgoff_t index,void * expected,gfp_t gfp)801 static int shmem_add_to_page_cache(struct folio *folio,
802 struct address_space *mapping,
803 pgoff_t index, void *expected, gfp_t gfp)
804 {
805 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
806 long nr = folio_nr_pages(folio);
807
808 VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
809 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
810 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
811
812 folio_ref_add(folio, nr);
813 folio->mapping = mapping;
814 folio->index = index;
815
816 gfp &= GFP_RECLAIM_MASK;
817 folio_throttle_swaprate(folio, gfp);
818
819 do {
820 xas_lock_irq(&xas);
821 if (expected != xas_find_conflict(&xas)) {
822 xas_set_err(&xas, -EEXIST);
823 goto unlock;
824 }
825 if (expected && xas_find_conflict(&xas)) {
826 xas_set_err(&xas, -EEXIST);
827 goto unlock;
828 }
829 xas_store(&xas, folio);
830 if (xas_error(&xas))
831 goto unlock;
832 shmem_update_stats(folio, nr);
833 mapping->nrpages += nr;
834 unlock:
835 xas_unlock_irq(&xas);
836 } while (xas_nomem(&xas, gfp));
837
838 if (xas_error(&xas)) {
839 folio->mapping = NULL;
840 folio_ref_sub(folio, nr);
841 return xas_error(&xas);
842 }
843
844 return 0;
845 }
846
847 /*
848 * Somewhat like filemap_remove_folio, but substitutes swap for @folio.
849 */
shmem_delete_from_page_cache(struct folio * folio,void * radswap)850 static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
851 {
852 struct address_space *mapping = folio->mapping;
853 long nr = folio_nr_pages(folio);
854 int error;
855
856 xa_lock_irq(&mapping->i_pages);
857 error = shmem_replace_entry(mapping, folio->index, folio, radswap);
858 folio->mapping = NULL;
859 mapping->nrpages -= nr;
860 shmem_update_stats(folio, -nr);
861 xa_unlock_irq(&mapping->i_pages);
862 folio_put_refs(folio, nr);
863 BUG_ON(error);
864 }
865
866 /*
867 * Remove swap entry from page cache, free the swap and its page cache. Returns
868 * the number of pages being freed. 0 means entry not found in XArray (0 pages
869 * being freed).
870 */
shmem_free_swap(struct address_space * mapping,pgoff_t index,void * radswap)871 static long shmem_free_swap(struct address_space *mapping,
872 pgoff_t index, void *radswap)
873 {
874 int order = xa_get_order(&mapping->i_pages, index);
875 void *old;
876
877 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
878 if (old != radswap)
879 return 0;
880 free_swap_and_cache_nr(radix_to_swp_entry(radswap), 1 << order);
881
882 return 1 << order;
883 }
884
885 /*
886 * Determine (in bytes) how many of the shmem object's pages mapped by the
887 * given offsets are swapped out.
888 *
889 * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
890 * as long as the inode doesn't go away and racy results are not a problem.
891 */
shmem_partial_swap_usage(struct address_space * mapping,pgoff_t start,pgoff_t end)892 unsigned long shmem_partial_swap_usage(struct address_space *mapping,
893 pgoff_t start, pgoff_t end)
894 {
895 XA_STATE(xas, &mapping->i_pages, start);
896 struct page *page;
897 unsigned long swapped = 0;
898 unsigned long max = end - 1;
899
900 rcu_read_lock();
901 xas_for_each(&xas, page, max) {
902 if (xas_retry(&xas, page))
903 continue;
904 if (xa_is_value(page))
905 swapped += 1 << xas_get_order(&xas);
906 if (xas.xa_index == max)
907 break;
908 if (need_resched()) {
909 xas_pause(&xas);
910 cond_resched_rcu();
911 }
912 }
913 rcu_read_unlock();
914
915 return swapped << PAGE_SHIFT;
916 }
917
918 /*
919 * Determine (in bytes) how many of the shmem object's pages mapped by the
920 * given vma is swapped out.
921 *
922 * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
923 * as long as the inode doesn't go away and racy results are not a problem.
924 */
shmem_swap_usage(struct vm_area_struct * vma)925 unsigned long shmem_swap_usage(struct vm_area_struct *vma)
926 {
927 struct inode *inode = file_inode(vma->vm_file);
928 struct shmem_inode_info *info = SHMEM_I(inode);
929 struct address_space *mapping = inode->i_mapping;
930 unsigned long swapped;
931
932 /* Be careful as we don't hold info->lock */
933 swapped = READ_ONCE(info->swapped);
934
935 /*
936 * The easier cases are when the shmem object has nothing in swap, or
937 * the vma maps it whole. Then we can simply use the stats that we
938 * already track.
939 */
940 if (!swapped)
941 return 0;
942
943 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
944 return swapped << PAGE_SHIFT;
945
946 /* Here comes the more involved part */
947 return shmem_partial_swap_usage(mapping, vma->vm_pgoff,
948 vma->vm_pgoff + vma_pages(vma));
949 }
950
951 /*
952 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
953 */
shmem_unlock_mapping(struct address_space * mapping)954 void shmem_unlock_mapping(struct address_space *mapping)
955 {
956 struct folio_batch fbatch;
957 pgoff_t index = 0;
958
959 folio_batch_init(&fbatch);
960 /*
961 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
962 */
963 while (!mapping_unevictable(mapping) &&
964 filemap_get_folios(mapping, &index, ~0UL, &fbatch)) {
965 check_move_unevictable_folios(&fbatch);
966 folio_batch_release(&fbatch);
967 cond_resched();
968 }
969 }
970
shmem_get_partial_folio(struct inode * inode,pgoff_t index)971 static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
972 {
973 struct folio *folio;
974
975 /*
976 * At first avoid shmem_get_folio(,,,SGP_READ): that fails
977 * beyond i_size, and reports fallocated folios as holes.
978 */
979 folio = filemap_get_entry(inode->i_mapping, index);
980 if (!folio)
981 return folio;
982 if (!xa_is_value(folio)) {
983 folio_lock(folio);
984 if (folio->mapping == inode->i_mapping)
985 return folio;
986 /* The folio has been swapped out */
987 folio_unlock(folio);
988 folio_put(folio);
989 }
990 /*
991 * But read a folio back from swap if any of it is within i_size
992 * (although in some cases this is just a waste of time).
993 */
994 folio = NULL;
995 shmem_get_folio(inode, index, 0, &folio, SGP_READ);
996 return folio;
997 }
998
999 /*
1000 * Remove range of pages and swap entries from page cache, and free them.
1001 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
1002 */
shmem_undo_range(struct inode * inode,loff_t lstart,loff_t lend,bool unfalloc)1003 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
1004 bool unfalloc)
1005 {
1006 struct address_space *mapping = inode->i_mapping;
1007 struct shmem_inode_info *info = SHMEM_I(inode);
1008 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
1009 pgoff_t end = (lend + 1) >> PAGE_SHIFT;
1010 struct folio_batch fbatch;
1011 pgoff_t indices[PAGEVEC_SIZE];
1012 struct folio *folio;
1013 bool same_folio;
1014 long nr_swaps_freed = 0;
1015 pgoff_t index;
1016 int i;
1017
1018 if (lend == -1)
1019 end = -1; /* unsigned, so actually very big */
1020
1021 if (info->fallocend > start && info->fallocend <= end && !unfalloc)
1022 info->fallocend = start;
1023
1024 folio_batch_init(&fbatch);
1025 index = start;
1026 while (index < end && find_lock_entries(mapping, &index, end - 1,
1027 &fbatch, indices)) {
1028 for (i = 0; i < folio_batch_count(&fbatch); i++) {
1029 folio = fbatch.folios[i];
1030
1031 if (xa_is_value(folio)) {
1032 if (unfalloc)
1033 continue;
1034 nr_swaps_freed += shmem_free_swap(mapping,
1035 indices[i], folio);
1036 continue;
1037 }
1038
1039 if (!unfalloc || !folio_test_uptodate(folio))
1040 truncate_inode_folio(mapping, folio);
1041 folio_unlock(folio);
1042 }
1043 folio_batch_remove_exceptionals(&fbatch);
1044 folio_batch_release(&fbatch);
1045 cond_resched();
1046 }
1047
1048 /*
1049 * When undoing a failed fallocate, we want none of the partial folio
1050 * zeroing and splitting below, but shall want to truncate the whole
1051 * folio when !uptodate indicates that it was added by this fallocate,
1052 * even when [lstart, lend] covers only a part of the folio.
1053 */
1054 if (unfalloc)
1055 goto whole_folios;
1056
1057 same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
1058 folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
1059 if (folio) {
1060 same_folio = lend < folio_pos(folio) + folio_size(folio);
1061 folio_mark_dirty(folio);
1062 if (!truncate_inode_partial_folio(folio, lstart, lend)) {
1063 start = folio_next_index(folio);
1064 if (same_folio)
1065 end = folio->index;
1066 }
1067 folio_unlock(folio);
1068 folio_put(folio);
1069 folio = NULL;
1070 }
1071
1072 if (!same_folio)
1073 folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT);
1074 if (folio) {
1075 folio_mark_dirty(folio);
1076 if (!truncate_inode_partial_folio(folio, lstart, lend))
1077 end = folio->index;
1078 folio_unlock(folio);
1079 folio_put(folio);
1080 }
1081
1082 whole_folios:
1083
1084 index = start;
1085 while (index < end) {
1086 cond_resched();
1087
1088 if (!find_get_entries(mapping, &index, end - 1, &fbatch,
1089 indices)) {
1090 /* If all gone or hole-punch or unfalloc, we're done */
1091 if (index == start || end != -1)
1092 break;
1093 /* But if truncating, restart to make sure all gone */
1094 index = start;
1095 continue;
1096 }
1097 for (i = 0; i < folio_batch_count(&fbatch); i++) {
1098 folio = fbatch.folios[i];
1099
1100 if (xa_is_value(folio)) {
1101 long swaps_freed;
1102
1103 if (unfalloc)
1104 continue;
1105 swaps_freed = shmem_free_swap(mapping, indices[i], folio);
1106 if (!swaps_freed) {
1107 /* Swap was replaced by page: retry */
1108 index = indices[i];
1109 break;
1110 }
1111 nr_swaps_freed += swaps_freed;
1112 continue;
1113 }
1114
1115 folio_lock(folio);
1116
1117 if (!unfalloc || !folio_test_uptodate(folio)) {
1118 if (folio_mapping(folio) != mapping) {
1119 /* Page was replaced by swap: retry */
1120 folio_unlock(folio);
1121 index = indices[i];
1122 break;
1123 }
1124 VM_BUG_ON_FOLIO(folio_test_writeback(folio),
1125 folio);
1126
1127 if (!folio_test_large(folio)) {
1128 truncate_inode_folio(mapping, folio);
1129 } else if (truncate_inode_partial_folio(folio, lstart, lend)) {
1130 /*
1131 * If we split a page, reset the loop so
1132 * that we pick up the new sub pages.
1133 * Otherwise the THP was entirely
1134 * dropped or the target range was
1135 * zeroed, so just continue the loop as
1136 * is.
1137 */
1138 if (!folio_test_large(folio)) {
1139 folio_unlock(folio);
1140 index = start;
1141 break;
1142 }
1143 }
1144 }
1145 folio_unlock(folio);
1146 }
1147 folio_batch_remove_exceptionals(&fbatch);
1148 folio_batch_release(&fbatch);
1149 }
1150
1151 shmem_recalc_inode(inode, 0, -nr_swaps_freed);
1152 }
1153
shmem_truncate_range(struct inode * inode,loff_t lstart,loff_t lend)1154 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1155 {
1156 shmem_undo_range(inode, lstart, lend, false);
1157 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1158 inode_inc_iversion(inode);
1159 }
1160 EXPORT_SYMBOL_GPL(shmem_truncate_range);
1161
shmem_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)1162 static int shmem_getattr(struct mnt_idmap *idmap,
1163 const struct path *path, struct kstat *stat,
1164 u32 request_mask, unsigned int query_flags)
1165 {
1166 struct inode *inode = path->dentry->d_inode;
1167 struct shmem_inode_info *info = SHMEM_I(inode);
1168
1169 if (info->alloced - info->swapped != inode->i_mapping->nrpages)
1170 shmem_recalc_inode(inode, 0, 0);
1171
1172 if (info->fsflags & FS_APPEND_FL)
1173 stat->attributes |= STATX_ATTR_APPEND;
1174 if (info->fsflags & FS_IMMUTABLE_FL)
1175 stat->attributes |= STATX_ATTR_IMMUTABLE;
1176 if (info->fsflags & FS_NODUMP_FL)
1177 stat->attributes |= STATX_ATTR_NODUMP;
1178 stat->attributes_mask |= (STATX_ATTR_APPEND |
1179 STATX_ATTR_IMMUTABLE |
1180 STATX_ATTR_NODUMP);
1181 generic_fillattr(idmap, request_mask, inode, stat);
1182
1183 if (shmem_huge_global_enabled(inode, 0, 0, false, 0))
1184 stat->blksize = HPAGE_PMD_SIZE;
1185
1186 if (request_mask & STATX_BTIME) {
1187 stat->result_mask |= STATX_BTIME;
1188 stat->btime.tv_sec = info->i_crtime.tv_sec;
1189 stat->btime.tv_nsec = info->i_crtime.tv_nsec;
1190 }
1191
1192 return 0;
1193 }
1194
shmem_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)1195 static int shmem_setattr(struct mnt_idmap *idmap,
1196 struct dentry *dentry, struct iattr *attr)
1197 {
1198 struct inode *inode = d_inode(dentry);
1199 struct shmem_inode_info *info = SHMEM_I(inode);
1200 int error;
1201 bool update_mtime = false;
1202 bool update_ctime = true;
1203
1204 error = setattr_prepare(idmap, dentry, attr);
1205 if (error)
1206 return error;
1207
1208 if ((info->seals & F_SEAL_EXEC) && (attr->ia_valid & ATTR_MODE)) {
1209 if ((inode->i_mode ^ attr->ia_mode) & 0111) {
1210 return -EPERM;
1211 }
1212 }
1213
1214 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1215 loff_t oldsize = inode->i_size;
1216 loff_t newsize = attr->ia_size;
1217
1218 /* protected by i_rwsem */
1219 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1220 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1221 return -EPERM;
1222
1223 if (newsize != oldsize) {
1224 error = shmem_reacct_size(SHMEM_I(inode)->flags,
1225 oldsize, newsize);
1226 if (error)
1227 return error;
1228 i_size_write(inode, newsize);
1229 update_mtime = true;
1230 } else {
1231 update_ctime = false;
1232 }
1233 if (newsize <= oldsize) {
1234 loff_t holebegin = round_up(newsize, PAGE_SIZE);
1235 if (oldsize > holebegin)
1236 unmap_mapping_range(inode->i_mapping,
1237 holebegin, 0, 1);
1238 if (info->alloced)
1239 shmem_truncate_range(inode,
1240 newsize, (loff_t)-1);
1241 /* unmap again to remove racily COWed private pages */
1242 if (oldsize > holebegin)
1243 unmap_mapping_range(inode->i_mapping,
1244 holebegin, 0, 1);
1245 }
1246 }
1247
1248 if (is_quota_modification(idmap, inode, attr)) {
1249 error = dquot_initialize(inode);
1250 if (error)
1251 return error;
1252 }
1253
1254 /* Transfer quota accounting */
1255 if (i_uid_needs_update(idmap, attr, inode) ||
1256 i_gid_needs_update(idmap, attr, inode)) {
1257 error = dquot_transfer(idmap, inode, attr);
1258 if (error)
1259 return error;
1260 }
1261
1262 setattr_copy(idmap, inode, attr);
1263 if (attr->ia_valid & ATTR_MODE)
1264 error = posix_acl_chmod(idmap, dentry, inode->i_mode);
1265 if (!error && update_ctime) {
1266 inode_set_ctime_current(inode);
1267 if (update_mtime)
1268 inode_set_mtime_to_ts(inode, inode_get_ctime(inode));
1269 inode_inc_iversion(inode);
1270 }
1271 return error;
1272 }
1273
shmem_evict_inode(struct inode * inode)1274 static void shmem_evict_inode(struct inode *inode)
1275 {
1276 struct shmem_inode_info *info = SHMEM_I(inode);
1277 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1278 size_t freed = 0;
1279
1280 if (shmem_mapping(inode->i_mapping)) {
1281 shmem_unacct_size(info->flags, inode->i_size);
1282 inode->i_size = 0;
1283 mapping_set_exiting(inode->i_mapping);
1284 shmem_truncate_range(inode, 0, (loff_t)-1);
1285 if (!list_empty(&info->shrinklist)) {
1286 spin_lock(&sbinfo->shrinklist_lock);
1287 if (!list_empty(&info->shrinklist)) {
1288 list_del_init(&info->shrinklist);
1289 sbinfo->shrinklist_len--;
1290 }
1291 spin_unlock(&sbinfo->shrinklist_lock);
1292 }
1293 while (!list_empty(&info->swaplist)) {
1294 /* Wait while shmem_unuse() is scanning this inode... */
1295 wait_var_event(&info->stop_eviction,
1296 !atomic_read(&info->stop_eviction));
1297 mutex_lock(&shmem_swaplist_mutex);
1298 /* ...but beware of the race if we peeked too early */
1299 if (!atomic_read(&info->stop_eviction))
1300 list_del_init(&info->swaplist);
1301 mutex_unlock(&shmem_swaplist_mutex);
1302 }
1303 }
1304
1305 simple_xattrs_free(&info->xattrs, sbinfo->max_inodes ? &freed : NULL);
1306 shmem_free_inode(inode->i_sb, freed);
1307 WARN_ON(inode->i_blocks);
1308 clear_inode(inode);
1309 #ifdef CONFIG_TMPFS_QUOTA
1310 dquot_free_inode(inode);
1311 dquot_drop(inode);
1312 #endif
1313 }
1314
shmem_find_swap_entries(struct address_space * mapping,pgoff_t start,struct folio_batch * fbatch,pgoff_t * indices,unsigned int type)1315 static int shmem_find_swap_entries(struct address_space *mapping,
1316 pgoff_t start, struct folio_batch *fbatch,
1317 pgoff_t *indices, unsigned int type)
1318 {
1319 XA_STATE(xas, &mapping->i_pages, start);
1320 struct folio *folio;
1321 swp_entry_t entry;
1322
1323 rcu_read_lock();
1324 xas_for_each(&xas, folio, ULONG_MAX) {
1325 if (xas_retry(&xas, folio))
1326 continue;
1327
1328 if (!xa_is_value(folio))
1329 continue;
1330
1331 entry = radix_to_swp_entry(folio);
1332 /*
1333 * swapin error entries can be found in the mapping. But they're
1334 * deliberately ignored here as we've done everything we can do.
1335 */
1336 if (swp_type(entry) != type)
1337 continue;
1338
1339 indices[folio_batch_count(fbatch)] = xas.xa_index;
1340 if (!folio_batch_add(fbatch, folio))
1341 break;
1342
1343 if (need_resched()) {
1344 xas_pause(&xas);
1345 cond_resched_rcu();
1346 }
1347 }
1348 rcu_read_unlock();
1349
1350 return xas.xa_index;
1351 }
1352
1353 /*
1354 * Move the swapped pages for an inode to page cache. Returns the count
1355 * of pages swapped in, or the error in case of failure.
1356 */
shmem_unuse_swap_entries(struct inode * inode,struct folio_batch * fbatch,pgoff_t * indices)1357 static int shmem_unuse_swap_entries(struct inode *inode,
1358 struct folio_batch *fbatch, pgoff_t *indices)
1359 {
1360 int i = 0;
1361 int ret = 0;
1362 int error = 0;
1363 struct address_space *mapping = inode->i_mapping;
1364
1365 for (i = 0; i < folio_batch_count(fbatch); i++) {
1366 struct folio *folio = fbatch->folios[i];
1367
1368 if (!xa_is_value(folio))
1369 continue;
1370 error = shmem_swapin_folio(inode, indices[i], &folio, SGP_CACHE,
1371 mapping_gfp_mask(mapping), NULL, NULL);
1372 if (error == 0) {
1373 folio_unlock(folio);
1374 folio_put(folio);
1375 ret++;
1376 }
1377 if (error == -ENOMEM)
1378 break;
1379 error = 0;
1380 }
1381 return error ? error : ret;
1382 }
1383
1384 /*
1385 * If swap found in inode, free it and move page from swapcache to filecache.
1386 */
shmem_unuse_inode(struct inode * inode,unsigned int type)1387 static int shmem_unuse_inode(struct inode *inode, unsigned int type)
1388 {
1389 struct address_space *mapping = inode->i_mapping;
1390 pgoff_t start = 0;
1391 struct folio_batch fbatch;
1392 pgoff_t indices[PAGEVEC_SIZE];
1393 int ret = 0;
1394
1395 do {
1396 folio_batch_init(&fbatch);
1397 shmem_find_swap_entries(mapping, start, &fbatch, indices, type);
1398 if (folio_batch_count(&fbatch) == 0) {
1399 ret = 0;
1400 break;
1401 }
1402
1403 ret = shmem_unuse_swap_entries(inode, &fbatch, indices);
1404 if (ret < 0)
1405 break;
1406
1407 start = indices[folio_batch_count(&fbatch) - 1];
1408 } while (true);
1409
1410 return ret;
1411 }
1412
1413 /*
1414 * Read all the shared memory data that resides in the swap
1415 * device 'type' back into memory, so the swap device can be
1416 * unused.
1417 */
shmem_unuse(unsigned int type)1418 int shmem_unuse(unsigned int type)
1419 {
1420 struct shmem_inode_info *info, *next;
1421 int error = 0;
1422
1423 if (list_empty(&shmem_swaplist))
1424 return 0;
1425
1426 mutex_lock(&shmem_swaplist_mutex);
1427 list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1428 if (!info->swapped) {
1429 list_del_init(&info->swaplist);
1430 continue;
1431 }
1432 /*
1433 * Drop the swaplist mutex while searching the inode for swap;
1434 * but before doing so, make sure shmem_evict_inode() will not
1435 * remove placeholder inode from swaplist, nor let it be freed
1436 * (igrab() would protect from unlink, but not from unmount).
1437 */
1438 atomic_inc(&info->stop_eviction);
1439 mutex_unlock(&shmem_swaplist_mutex);
1440
1441 error = shmem_unuse_inode(&info->vfs_inode, type);
1442 cond_resched();
1443
1444 mutex_lock(&shmem_swaplist_mutex);
1445 next = list_next_entry(info, swaplist);
1446 if (!info->swapped)
1447 list_del_init(&info->swaplist);
1448 if (atomic_dec_and_test(&info->stop_eviction))
1449 wake_up_var(&info->stop_eviction);
1450 if (error)
1451 break;
1452 }
1453 mutex_unlock(&shmem_swaplist_mutex);
1454
1455 return error;
1456 }
1457
1458 /*
1459 * Move the page from the page cache to the swap cache.
1460 */
shmem_writepage(struct page * page,struct writeback_control * wbc)1461 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1462 {
1463 struct folio *folio = page_folio(page);
1464 struct address_space *mapping = folio->mapping;
1465 struct inode *inode = mapping->host;
1466 struct shmem_inode_info *info = SHMEM_I(inode);
1467 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1468 swp_entry_t swap;
1469 pgoff_t index;
1470 int nr_pages;
1471 bool split = false;
1472
1473 /*
1474 * Our capabilities prevent regular writeback or sync from ever calling
1475 * shmem_writepage; but a stacking filesystem might use ->writepage of
1476 * its underlying filesystem, in which case tmpfs should write out to
1477 * swap only in response to memory pressure, and not for the writeback
1478 * threads or sync.
1479 */
1480 if (WARN_ON_ONCE(!wbc->for_reclaim))
1481 goto redirty;
1482
1483 if (WARN_ON_ONCE((info->flags & VM_LOCKED) || sbinfo->noswap))
1484 goto redirty;
1485
1486 if (!total_swap_pages)
1487 goto redirty;
1488
1489 /*
1490 * If CONFIG_THP_SWAP is not enabled, the large folio should be
1491 * split when swapping.
1492 *
1493 * And shrinkage of pages beyond i_size does not split swap, so
1494 * swapout of a large folio crossing i_size needs to split too
1495 * (unless fallocate has been used to preallocate beyond EOF).
1496 */
1497 if (folio_test_large(folio)) {
1498 index = shmem_fallocend(inode,
1499 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE));
1500 if ((index > folio->index && index < folio_next_index(folio)) ||
1501 !IS_ENABLED(CONFIG_THP_SWAP))
1502 split = true;
1503 }
1504
1505 if (split) {
1506 try_split:
1507 /* Ensure the subpages are still dirty */
1508 folio_test_set_dirty(folio);
1509 if (split_huge_page_to_list_to_order(page, wbc->list, 0))
1510 goto redirty;
1511 folio = page_folio(page);
1512 folio_clear_dirty(folio);
1513 }
1514
1515 index = folio->index;
1516 nr_pages = folio_nr_pages(folio);
1517
1518 /*
1519 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1520 * value into swapfile.c, the only way we can correctly account for a
1521 * fallocated folio arriving here is now to initialize it and write it.
1522 *
1523 * That's okay for a folio already fallocated earlier, but if we have
1524 * not yet completed the fallocation, then (a) we want to keep track
1525 * of this folio in case we have to undo it, and (b) it may not be a
1526 * good idea to continue anyway, once we're pushing into swap. So
1527 * reactivate the folio, and let shmem_fallocate() quit when too many.
1528 */
1529 if (!folio_test_uptodate(folio)) {
1530 if (inode->i_private) {
1531 struct shmem_falloc *shmem_falloc;
1532 spin_lock(&inode->i_lock);
1533 shmem_falloc = inode->i_private;
1534 if (shmem_falloc &&
1535 !shmem_falloc->waitq &&
1536 index >= shmem_falloc->start &&
1537 index < shmem_falloc->next)
1538 shmem_falloc->nr_unswapped += nr_pages;
1539 else
1540 shmem_falloc = NULL;
1541 spin_unlock(&inode->i_lock);
1542 if (shmem_falloc)
1543 goto redirty;
1544 }
1545 folio_zero_range(folio, 0, folio_size(folio));
1546 flush_dcache_folio(folio);
1547 folio_mark_uptodate(folio);
1548 }
1549
1550 swap = folio_alloc_swap(folio);
1551 if (!swap.val) {
1552 if (nr_pages > 1)
1553 goto try_split;
1554
1555 goto redirty;
1556 }
1557
1558 /*
1559 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1560 * if it's not already there. Do it now before the folio is
1561 * moved to swap cache, when its pagelock no longer protects
1562 * the inode from eviction. But don't unlock the mutex until
1563 * we've incremented swapped, because shmem_unuse_inode() will
1564 * prune a !swapped inode from the swaplist under this mutex.
1565 */
1566 mutex_lock(&shmem_swaplist_mutex);
1567 if (list_empty(&info->swaplist))
1568 list_add(&info->swaplist, &shmem_swaplist);
1569
1570 if (add_to_swap_cache(folio, swap,
1571 __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
1572 NULL) == 0) {
1573 shmem_recalc_inode(inode, 0, nr_pages);
1574 swap_shmem_alloc(swap, nr_pages);
1575 shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap));
1576
1577 mutex_unlock(&shmem_swaplist_mutex);
1578 BUG_ON(folio_mapped(folio));
1579 return swap_writepage(&folio->page, wbc);
1580 }
1581
1582 mutex_unlock(&shmem_swaplist_mutex);
1583 put_swap_folio(folio, swap);
1584 redirty:
1585 folio_mark_dirty(folio);
1586 if (wbc->for_reclaim)
1587 return AOP_WRITEPAGE_ACTIVATE; /* Return with folio locked */
1588 folio_unlock(folio);
1589 return 0;
1590 }
1591
1592 #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
shmem_show_mpol(struct seq_file * seq,struct mempolicy * mpol)1593 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1594 {
1595 char buffer[64];
1596
1597 if (!mpol || mpol->mode == MPOL_DEFAULT)
1598 return; /* show nothing */
1599
1600 mpol_to_str(buffer, sizeof(buffer), mpol);
1601
1602 seq_printf(seq, ",mpol=%s", buffer);
1603 }
1604
shmem_get_sbmpol(struct shmem_sb_info * sbinfo)1605 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1606 {
1607 struct mempolicy *mpol = NULL;
1608 if (sbinfo->mpol) {
1609 raw_spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
1610 mpol = sbinfo->mpol;
1611 mpol_get(mpol);
1612 raw_spin_unlock(&sbinfo->stat_lock);
1613 }
1614 return mpol;
1615 }
1616 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
shmem_show_mpol(struct seq_file * seq,struct mempolicy * mpol)1617 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1618 {
1619 }
shmem_get_sbmpol(struct shmem_sb_info * sbinfo)1620 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1621 {
1622 return NULL;
1623 }
1624 #endif /* CONFIG_NUMA && CONFIG_TMPFS */
1625
1626 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
1627 pgoff_t index, unsigned int order, pgoff_t *ilx);
1628
shmem_swapin_cluster(swp_entry_t swap,gfp_t gfp,struct shmem_inode_info * info,pgoff_t index)1629 static struct folio *shmem_swapin_cluster(swp_entry_t swap, gfp_t gfp,
1630 struct shmem_inode_info *info, pgoff_t index)
1631 {
1632 struct mempolicy *mpol;
1633 pgoff_t ilx;
1634 struct folio *folio;
1635
1636 mpol = shmem_get_pgoff_policy(info, index, 0, &ilx);
1637 folio = swap_cluster_readahead(swap, gfp, mpol, ilx);
1638 mpol_cond_put(mpol);
1639
1640 return folio;
1641 }
1642
1643 /*
1644 * Make sure huge_gfp is always more limited than limit_gfp.
1645 * Some of the flags set permissions, while others set limitations.
1646 */
limit_gfp_mask(gfp_t huge_gfp,gfp_t limit_gfp)1647 static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
1648 {
1649 gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
1650 gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
1651 gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
1652 gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
1653
1654 /* Allow allocations only from the originally specified zones. */
1655 result |= zoneflags;
1656
1657 /*
1658 * Minimize the result gfp by taking the union with the deny flags,
1659 * and the intersection of the allow flags.
1660 */
1661 result |= (limit_gfp & denyflags);
1662 result |= (huge_gfp & limit_gfp) & allowflags;
1663
1664 return result;
1665 }
1666
1667 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
shmem_hpage_pmd_enabled(void)1668 bool shmem_hpage_pmd_enabled(void)
1669 {
1670 if (shmem_huge == SHMEM_HUGE_DENY)
1671 return false;
1672 if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_always))
1673 return true;
1674 if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_madvise))
1675 return true;
1676 if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_within_size))
1677 return true;
1678 if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_inherit) &&
1679 shmem_huge != SHMEM_HUGE_NEVER)
1680 return true;
1681
1682 return false;
1683 }
1684
shmem_allowable_huge_orders(struct inode * inode,struct vm_area_struct * vma,pgoff_t index,loff_t write_end,bool shmem_huge_force)1685 unsigned long shmem_allowable_huge_orders(struct inode *inode,
1686 struct vm_area_struct *vma, pgoff_t index,
1687 loff_t write_end, bool shmem_huge_force)
1688 {
1689 unsigned long mask = READ_ONCE(huge_shmem_orders_always);
1690 unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
1691 unsigned long vm_flags = vma ? vma->vm_flags : 0;
1692 pgoff_t aligned_index;
1693 bool global_huge;
1694 loff_t i_size;
1695 int order;
1696
1697 if (thp_disabled_by_hw() || (vma && vma_thp_disabled(vma, vm_flags)))
1698 return 0;
1699
1700 global_huge = shmem_huge_global_enabled(inode, index, write_end,
1701 shmem_huge_force, vm_flags);
1702 if (!vma || !vma_is_anon_shmem(vma)) {
1703 /*
1704 * For tmpfs, we now only support PMD sized THP if huge page
1705 * is enabled, otherwise fallback to order 0.
1706 */
1707 return global_huge ? BIT(HPAGE_PMD_ORDER) : 0;
1708 }
1709
1710 /*
1711 * Following the 'deny' semantics of the top level, force the huge
1712 * option off from all mounts.
1713 */
1714 if (shmem_huge == SHMEM_HUGE_DENY)
1715 return 0;
1716
1717 /*
1718 * Only allow inherit orders if the top-level value is 'force', which
1719 * means non-PMD sized THP can not override 'huge' mount option now.
1720 */
1721 if (shmem_huge == SHMEM_HUGE_FORCE)
1722 return READ_ONCE(huge_shmem_orders_inherit);
1723
1724 /* Allow mTHP that will be fully within i_size. */
1725 order = highest_order(within_size_orders);
1726 while (within_size_orders) {
1727 aligned_index = round_up(index + 1, 1 << order);
1728 i_size = round_up(i_size_read(inode), PAGE_SIZE);
1729 if (i_size >> PAGE_SHIFT >= aligned_index) {
1730 mask |= within_size_orders;
1731 break;
1732 }
1733
1734 order = next_order(&within_size_orders, order);
1735 }
1736
1737 if (vm_flags & VM_HUGEPAGE)
1738 mask |= READ_ONCE(huge_shmem_orders_madvise);
1739
1740 if (global_huge)
1741 mask |= READ_ONCE(huge_shmem_orders_inherit);
1742
1743 return THP_ORDERS_ALL_FILE_DEFAULT & mask;
1744 }
1745
shmem_suitable_orders(struct inode * inode,struct vm_fault * vmf,struct address_space * mapping,pgoff_t index,unsigned long orders)1746 static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
1747 struct address_space *mapping, pgoff_t index,
1748 unsigned long orders)
1749 {
1750 struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
1751 pgoff_t aligned_index;
1752 unsigned long pages;
1753 int order;
1754
1755 if (vma) {
1756 orders = thp_vma_suitable_orders(vma, vmf->address, orders);
1757 if (!orders)
1758 return 0;
1759 }
1760
1761 /* Find the highest order that can add into the page cache */
1762 order = highest_order(orders);
1763 while (orders) {
1764 pages = 1UL << order;
1765 aligned_index = round_down(index, pages);
1766 /*
1767 * Check for conflict before waiting on a huge allocation.
1768 * Conflict might be that a huge page has just been allocated
1769 * and added to page cache by a racing thread, or that there
1770 * is already at least one small page in the huge extent.
1771 * Be careful to retry when appropriate, but not forever!
1772 * Elsewhere -EEXIST would be the right code, but not here.
1773 */
1774 if (!xa_find(&mapping->i_pages, &aligned_index,
1775 aligned_index + pages - 1, XA_PRESENT))
1776 break;
1777 order = next_order(&orders, order);
1778 }
1779
1780 return orders;
1781 }
1782 #else
shmem_suitable_orders(struct inode * inode,struct vm_fault * vmf,struct address_space * mapping,pgoff_t index,unsigned long orders)1783 static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
1784 struct address_space *mapping, pgoff_t index,
1785 unsigned long orders)
1786 {
1787 return 0;
1788 }
1789 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1790
shmem_alloc_folio(gfp_t gfp,int order,struct shmem_inode_info * info,pgoff_t index)1791 static struct folio *shmem_alloc_folio(gfp_t gfp, int order,
1792 struct shmem_inode_info *info, pgoff_t index)
1793 {
1794 struct mempolicy *mpol;
1795 pgoff_t ilx;
1796 struct folio *folio;
1797
1798 mpol = shmem_get_pgoff_policy(info, index, order, &ilx);
1799 folio = folio_alloc_mpol(gfp, order, mpol, ilx, numa_node_id());
1800 mpol_cond_put(mpol);
1801
1802 return folio;
1803 }
1804
shmem_alloc_and_add_folio(struct vm_fault * vmf,gfp_t gfp,struct inode * inode,pgoff_t index,struct mm_struct * fault_mm,unsigned long orders)1805 static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
1806 gfp_t gfp, struct inode *inode, pgoff_t index,
1807 struct mm_struct *fault_mm, unsigned long orders)
1808 {
1809 struct address_space *mapping = inode->i_mapping;
1810 struct shmem_inode_info *info = SHMEM_I(inode);
1811 unsigned long suitable_orders = 0;
1812 struct folio *folio = NULL;
1813 long pages;
1814 int error, order;
1815
1816 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1817 orders = 0;
1818
1819 if (orders > 0) {
1820 suitable_orders = shmem_suitable_orders(inode, vmf,
1821 mapping, index, orders);
1822
1823 order = highest_order(suitable_orders);
1824 while (suitable_orders) {
1825 pages = 1UL << order;
1826 index = round_down(index, pages);
1827 folio = shmem_alloc_folio(gfp, order, info, index);
1828 if (folio)
1829 goto allocated;
1830
1831 if (pages == HPAGE_PMD_NR)
1832 count_vm_event(THP_FILE_FALLBACK);
1833 count_mthp_stat(order, MTHP_STAT_SHMEM_FALLBACK);
1834 order = next_order(&suitable_orders, order);
1835 }
1836 } else {
1837 pages = 1;
1838 folio = shmem_alloc_folio(gfp, 0, info, index);
1839 }
1840 if (!folio)
1841 return ERR_PTR(-ENOMEM);
1842
1843 allocated:
1844 __folio_set_locked(folio);
1845 __folio_set_swapbacked(folio);
1846
1847 gfp &= GFP_RECLAIM_MASK;
1848 error = mem_cgroup_charge(folio, fault_mm, gfp);
1849 if (error) {
1850 if (xa_find(&mapping->i_pages, &index,
1851 index + pages - 1, XA_PRESENT)) {
1852 error = -EEXIST;
1853 } else if (pages > 1) {
1854 if (pages == HPAGE_PMD_NR) {
1855 count_vm_event(THP_FILE_FALLBACK);
1856 count_vm_event(THP_FILE_FALLBACK_CHARGE);
1857 }
1858 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK);
1859 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE);
1860 }
1861 goto unlock;
1862 }
1863
1864 error = shmem_add_to_page_cache(folio, mapping, index, NULL, gfp);
1865 if (error)
1866 goto unlock;
1867
1868 error = shmem_inode_acct_blocks(inode, pages);
1869 if (error) {
1870 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1871 long freed;
1872 /*
1873 * Try to reclaim some space by splitting a few
1874 * large folios beyond i_size on the filesystem.
1875 */
1876 shmem_unused_huge_shrink(sbinfo, NULL, pages);
1877 /*
1878 * And do a shmem_recalc_inode() to account for freed pages:
1879 * except our folio is there in cache, so not quite balanced.
1880 */
1881 spin_lock(&info->lock);
1882 freed = pages + info->alloced - info->swapped -
1883 READ_ONCE(mapping->nrpages);
1884 if (freed > 0)
1885 info->alloced -= freed;
1886 spin_unlock(&info->lock);
1887 if (freed > 0)
1888 shmem_inode_unacct_blocks(inode, freed);
1889 error = shmem_inode_acct_blocks(inode, pages);
1890 if (error) {
1891 filemap_remove_folio(folio);
1892 goto unlock;
1893 }
1894 }
1895
1896 shmem_recalc_inode(inode, pages, 0);
1897 folio_add_lru(folio);
1898 return folio;
1899
1900 unlock:
1901 folio_unlock(folio);
1902 folio_put(folio);
1903 return ERR_PTR(error);
1904 }
1905
1906 /*
1907 * When a page is moved from swapcache to shmem filecache (either by the
1908 * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of
1909 * shmem_unuse_inode()), it may have been read in earlier from swap, in
1910 * ignorance of the mapping it belongs to. If that mapping has special
1911 * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1912 * we may need to copy to a suitable page before moving to filecache.
1913 *
1914 * In a future release, this may well be extended to respect cpuset and
1915 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1916 * but for now it is a simple matter of zone.
1917 */
shmem_should_replace_folio(struct folio * folio,gfp_t gfp)1918 static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp)
1919 {
1920 return folio_zonenum(folio) > gfp_zone(gfp);
1921 }
1922
shmem_replace_folio(struct folio ** foliop,gfp_t gfp,struct shmem_inode_info * info,pgoff_t index,struct vm_area_struct * vma)1923 static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
1924 struct shmem_inode_info *info, pgoff_t index,
1925 struct vm_area_struct *vma)
1926 {
1927 struct folio *new, *old = *foliop;
1928 swp_entry_t entry = old->swap;
1929 struct address_space *swap_mapping = swap_address_space(entry);
1930 pgoff_t swap_index = swap_cache_index(entry);
1931 XA_STATE(xas, &swap_mapping->i_pages, swap_index);
1932 int nr_pages = folio_nr_pages(old);
1933 int error = 0, i;
1934
1935 /*
1936 * We have arrived here because our zones are constrained, so don't
1937 * limit chance of success by further cpuset and node constraints.
1938 */
1939 gfp &= ~GFP_CONSTRAINT_MASK;
1940 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1941 if (nr_pages > 1) {
1942 gfp_t huge_gfp = vma_thp_gfp_mask(vma);
1943
1944 gfp = limit_gfp_mask(huge_gfp, gfp);
1945 }
1946 #endif
1947
1948 new = shmem_alloc_folio(gfp, folio_order(old), info, index);
1949 if (!new)
1950 return -ENOMEM;
1951
1952 folio_ref_add(new, nr_pages);
1953 folio_copy(new, old);
1954 flush_dcache_folio(new);
1955
1956 __folio_set_locked(new);
1957 __folio_set_swapbacked(new);
1958 folio_mark_uptodate(new);
1959 new->swap = entry;
1960 folio_set_swapcache(new);
1961
1962 /* Swap cache still stores N entries instead of a high-order entry */
1963 xa_lock_irq(&swap_mapping->i_pages);
1964 for (i = 0; i < nr_pages; i++) {
1965 void *item = xas_load(&xas);
1966
1967 if (item != old) {
1968 error = -ENOENT;
1969 break;
1970 }
1971
1972 xas_store(&xas, new);
1973 xas_next(&xas);
1974 }
1975 if (!error) {
1976 mem_cgroup_replace_folio(old, new);
1977 shmem_update_stats(new, nr_pages);
1978 shmem_update_stats(old, -nr_pages);
1979 }
1980 xa_unlock_irq(&swap_mapping->i_pages);
1981
1982 if (unlikely(error)) {
1983 /*
1984 * Is this possible? I think not, now that our callers
1985 * check both the swapcache flag and folio->private
1986 * after getting the folio lock; but be defensive.
1987 * Reverse old to newpage for clear and free.
1988 */
1989 old = new;
1990 } else {
1991 folio_add_lru(new);
1992 *foliop = new;
1993 }
1994
1995 folio_clear_swapcache(old);
1996 old->private = NULL;
1997
1998 folio_unlock(old);
1999 /*
2000 * The old folio are removed from swap cache, drop the 'nr_pages'
2001 * reference, as well as one temporary reference getting from swap
2002 * cache.
2003 */
2004 folio_put_refs(old, nr_pages + 1);
2005 return error;
2006 }
2007
shmem_set_folio_swapin_error(struct inode * inode,pgoff_t index,struct folio * folio,swp_entry_t swap)2008 static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
2009 struct folio *folio, swp_entry_t swap)
2010 {
2011 struct address_space *mapping = inode->i_mapping;
2012 swp_entry_t swapin_error;
2013 void *old;
2014 int nr_pages;
2015
2016 swapin_error = make_poisoned_swp_entry();
2017 old = xa_cmpxchg_irq(&mapping->i_pages, index,
2018 swp_to_radix_entry(swap),
2019 swp_to_radix_entry(swapin_error), 0);
2020 if (old != swp_to_radix_entry(swap))
2021 return;
2022
2023 nr_pages = folio_nr_pages(folio);
2024 folio_wait_writeback(folio);
2025 delete_from_swap_cache(folio);
2026 /*
2027 * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
2028 * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
2029 * in shmem_evict_inode().
2030 */
2031 shmem_recalc_inode(inode, -nr_pages, -nr_pages);
2032 swap_free_nr(swap, nr_pages);
2033 }
2034
shmem_split_large_entry(struct inode * inode,pgoff_t index,swp_entry_t swap,gfp_t gfp)2035 static int shmem_split_large_entry(struct inode *inode, pgoff_t index,
2036 swp_entry_t swap, gfp_t gfp)
2037 {
2038 struct address_space *mapping = inode->i_mapping;
2039 XA_STATE_ORDER(xas, &mapping->i_pages, index, 0);
2040 void *alloced_shadow = NULL;
2041 int alloced_order = 0, i;
2042
2043 /* Convert user data gfp flags to xarray node gfp flags */
2044 gfp &= GFP_RECLAIM_MASK;
2045
2046 for (;;) {
2047 int order = -1, split_order = 0;
2048 void *old = NULL;
2049
2050 xas_lock_irq(&xas);
2051 old = xas_load(&xas);
2052 if (!xa_is_value(old) || swp_to_radix_entry(swap) != old) {
2053 xas_set_err(&xas, -EEXIST);
2054 goto unlock;
2055 }
2056
2057 order = xas_get_order(&xas);
2058
2059 /* Swap entry may have changed before we re-acquire the lock */
2060 if (alloced_order &&
2061 (old != alloced_shadow || order != alloced_order)) {
2062 xas_destroy(&xas);
2063 alloced_order = 0;
2064 }
2065
2066 /* Try to split large swap entry in pagecache */
2067 if (order > 0) {
2068 if (!alloced_order) {
2069 split_order = order;
2070 goto unlock;
2071 }
2072 xas_split(&xas, old, order);
2073
2074 /*
2075 * Re-set the swap entry after splitting, and the swap
2076 * offset of the original large entry must be continuous.
2077 */
2078 for (i = 0; i < 1 << order; i++) {
2079 pgoff_t aligned_index = round_down(index, 1 << order);
2080 swp_entry_t tmp;
2081
2082 tmp = swp_entry(swp_type(swap), swp_offset(swap) + i);
2083 __xa_store(&mapping->i_pages, aligned_index + i,
2084 swp_to_radix_entry(tmp), 0);
2085 }
2086 }
2087
2088 unlock:
2089 xas_unlock_irq(&xas);
2090
2091 /* split needed, alloc here and retry. */
2092 if (split_order) {
2093 xas_split_alloc(&xas, old, split_order, gfp);
2094 if (xas_error(&xas))
2095 goto error;
2096 alloced_shadow = old;
2097 alloced_order = split_order;
2098 xas_reset(&xas);
2099 continue;
2100 }
2101
2102 if (!xas_nomem(&xas, gfp))
2103 break;
2104 }
2105
2106 error:
2107 if (xas_error(&xas))
2108 return xas_error(&xas);
2109
2110 return alloced_order;
2111 }
2112
2113 /*
2114 * Swap in the folio pointed to by *foliop.
2115 * Caller has to make sure that *foliop contains a valid swapped folio.
2116 * Returns 0 and the folio in foliop if success. On failure, returns the
2117 * error code and NULL in *foliop.
2118 */
shmem_swapin_folio(struct inode * inode,pgoff_t index,struct folio ** foliop,enum sgp_type sgp,gfp_t gfp,struct vm_area_struct * vma,vm_fault_t * fault_type)2119 static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
2120 struct folio **foliop, enum sgp_type sgp,
2121 gfp_t gfp, struct vm_area_struct *vma,
2122 vm_fault_t *fault_type)
2123 {
2124 struct address_space *mapping = inode->i_mapping;
2125 struct mm_struct *fault_mm = vma ? vma->vm_mm : NULL;
2126 struct shmem_inode_info *info = SHMEM_I(inode);
2127 struct swap_info_struct *si;
2128 struct folio *folio = NULL;
2129 swp_entry_t swap;
2130 int error, nr_pages;
2131
2132 VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
2133 swap = radix_to_swp_entry(*foliop);
2134 *foliop = NULL;
2135
2136 if (is_poisoned_swp_entry(swap))
2137 return -EIO;
2138
2139 si = get_swap_device(swap);
2140 if (!si) {
2141 if (!shmem_confirm_swap(mapping, index, swap))
2142 return -EEXIST;
2143 else
2144 return -EINVAL;
2145 }
2146
2147 /* Look it up and read it in.. */
2148 folio = swap_cache_get_folio(swap, NULL, 0);
2149 if (!folio) {
2150 int split_order;
2151
2152 /* Or update major stats only when swapin succeeds?? */
2153 if (fault_type) {
2154 *fault_type |= VM_FAULT_MAJOR;
2155 count_vm_event(PGMAJFAULT);
2156 count_memcg_event_mm(fault_mm, PGMAJFAULT);
2157 }
2158
2159 /*
2160 * Now swap device can only swap in order 0 folio, then we
2161 * should split the large swap entry stored in the pagecache
2162 * if necessary.
2163 */
2164 split_order = shmem_split_large_entry(inode, index, swap, gfp);
2165 if (split_order < 0) {
2166 error = split_order;
2167 goto failed;
2168 }
2169
2170 /*
2171 * If the large swap entry has already been split, it is
2172 * necessary to recalculate the new swap entry based on
2173 * the old order alignment.
2174 */
2175 if (split_order > 0) {
2176 pgoff_t offset = index - round_down(index, 1 << split_order);
2177
2178 swap = swp_entry(swp_type(swap), swp_offset(swap) + offset);
2179 }
2180
2181 /* Here we actually start the io */
2182 folio = shmem_swapin_cluster(swap, gfp, info, index);
2183 if (!folio) {
2184 error = -ENOMEM;
2185 goto failed;
2186 }
2187 }
2188
2189 /* We have to do this with folio locked to prevent races */
2190 folio_lock(folio);
2191 if (!folio_test_swapcache(folio) ||
2192 folio->swap.val != swap.val ||
2193 !shmem_confirm_swap(mapping, index, swap)) {
2194 error = -EEXIST;
2195 goto unlock;
2196 }
2197 if (!folio_test_uptodate(folio)) {
2198 error = -EIO;
2199 goto failed;
2200 }
2201 folio_wait_writeback(folio);
2202 nr_pages = folio_nr_pages(folio);
2203
2204 /*
2205 * Some architectures may have to restore extra metadata to the
2206 * folio after reading from swap.
2207 */
2208 arch_swap_restore(folio_swap(swap, folio), folio);
2209
2210 if (shmem_should_replace_folio(folio, gfp)) {
2211 error = shmem_replace_folio(&folio, gfp, info, index, vma);
2212 if (error)
2213 goto failed;
2214 }
2215
2216 error = shmem_add_to_page_cache(folio, mapping,
2217 round_down(index, nr_pages),
2218 swp_to_radix_entry(swap), gfp);
2219 if (error)
2220 goto failed;
2221
2222 shmem_recalc_inode(inode, 0, -nr_pages);
2223
2224 if (sgp == SGP_WRITE)
2225 folio_mark_accessed(folio);
2226
2227 delete_from_swap_cache(folio);
2228 folio_mark_dirty(folio);
2229 swap_free_nr(swap, nr_pages);
2230 put_swap_device(si);
2231
2232 *foliop = folio;
2233 return 0;
2234 failed:
2235 if (!shmem_confirm_swap(mapping, index, swap))
2236 error = -EEXIST;
2237 if (error == -EIO)
2238 shmem_set_folio_swapin_error(inode, index, folio, swap);
2239 unlock:
2240 if (folio) {
2241 folio_unlock(folio);
2242 folio_put(folio);
2243 }
2244 put_swap_device(si);
2245
2246 return error;
2247 }
2248
2249 /*
2250 * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate
2251 *
2252 * If we allocate a new one we do not mark it dirty. That's up to the
2253 * vm. If we swap it in we mark it dirty since we also free the swap
2254 * entry since a page cannot live in both the swap and page cache.
2255 *
2256 * vmf and fault_type are only supplied by shmem_fault: otherwise they are NULL.
2257 */
shmem_get_folio_gfp(struct inode * inode,pgoff_t index,loff_t write_end,struct folio ** foliop,enum sgp_type sgp,gfp_t gfp,struct vm_fault * vmf,vm_fault_t * fault_type)2258 static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
2259 loff_t write_end, struct folio **foliop, enum sgp_type sgp,
2260 gfp_t gfp, struct vm_fault *vmf, vm_fault_t *fault_type)
2261 {
2262 struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
2263 struct mm_struct *fault_mm;
2264 struct folio *folio;
2265 int error;
2266 bool alloced;
2267 unsigned long orders = 0;
2268
2269 if (WARN_ON_ONCE(!shmem_mapping(inode->i_mapping)))
2270 return -EINVAL;
2271
2272 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
2273 return -EFBIG;
2274 repeat:
2275 if (sgp <= SGP_CACHE &&
2276 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode))
2277 return -EINVAL;
2278
2279 alloced = false;
2280 fault_mm = vma ? vma->vm_mm : NULL;
2281
2282 folio = filemap_get_entry(inode->i_mapping, index);
2283 if (folio && vma && userfaultfd_minor(vma)) {
2284 if (!xa_is_value(folio))
2285 folio_put(folio);
2286 *fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
2287 return 0;
2288 }
2289
2290 if (xa_is_value(folio)) {
2291 error = shmem_swapin_folio(inode, index, &folio,
2292 sgp, gfp, vma, fault_type);
2293 if (error == -EEXIST)
2294 goto repeat;
2295
2296 *foliop = folio;
2297 return error;
2298 }
2299
2300 if (folio) {
2301 folio_lock(folio);
2302
2303 /* Has the folio been truncated or swapped out? */
2304 if (unlikely(folio->mapping != inode->i_mapping)) {
2305 folio_unlock(folio);
2306 folio_put(folio);
2307 goto repeat;
2308 }
2309 if (sgp == SGP_WRITE)
2310 folio_mark_accessed(folio);
2311 if (folio_test_uptodate(folio))
2312 goto out;
2313 /* fallocated folio */
2314 if (sgp != SGP_READ)
2315 goto clear;
2316 folio_unlock(folio);
2317 folio_put(folio);
2318 }
2319
2320 /*
2321 * SGP_READ: succeed on hole, with NULL folio, letting caller zero.
2322 * SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail.
2323 */
2324 *foliop = NULL;
2325 if (sgp == SGP_READ)
2326 return 0;
2327 if (sgp == SGP_NOALLOC)
2328 return -ENOENT;
2329
2330 /*
2331 * Fast cache lookup and swap lookup did not find it: allocate.
2332 */
2333
2334 if (vma && userfaultfd_missing(vma)) {
2335 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
2336 return 0;
2337 }
2338
2339 /* Find hugepage orders that are allowed for anonymous shmem and tmpfs. */
2340 orders = shmem_allowable_huge_orders(inode, vma, index, write_end, false);
2341 if (orders > 0) {
2342 gfp_t huge_gfp;
2343
2344 huge_gfp = vma_thp_gfp_mask(vma);
2345 huge_gfp = limit_gfp_mask(huge_gfp, gfp);
2346 folio = shmem_alloc_and_add_folio(vmf, huge_gfp,
2347 inode, index, fault_mm, orders);
2348 if (!IS_ERR(folio)) {
2349 if (folio_test_pmd_mappable(folio))
2350 count_vm_event(THP_FILE_ALLOC);
2351 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC);
2352 goto alloced;
2353 }
2354 if (PTR_ERR(folio) == -EEXIST)
2355 goto repeat;
2356 }
2357
2358 folio = shmem_alloc_and_add_folio(vmf, gfp, inode, index, fault_mm, 0);
2359 if (IS_ERR(folio)) {
2360 error = PTR_ERR(folio);
2361 if (error == -EEXIST)
2362 goto repeat;
2363 folio = NULL;
2364 goto unlock;
2365 }
2366
2367 alloced:
2368 alloced = true;
2369 if (folio_test_large(folio) &&
2370 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
2371 folio_next_index(folio)) {
2372 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2373 struct shmem_inode_info *info = SHMEM_I(inode);
2374 /*
2375 * Part of the large folio is beyond i_size: subject
2376 * to shrink under memory pressure.
2377 */
2378 spin_lock(&sbinfo->shrinklist_lock);
2379 /*
2380 * _careful to defend against unlocked access to
2381 * ->shrink_list in shmem_unused_huge_shrink()
2382 */
2383 if (list_empty_careful(&info->shrinklist)) {
2384 list_add_tail(&info->shrinklist,
2385 &sbinfo->shrinklist);
2386 sbinfo->shrinklist_len++;
2387 }
2388 spin_unlock(&sbinfo->shrinklist_lock);
2389 }
2390
2391 if (sgp == SGP_WRITE)
2392 folio_set_referenced(folio);
2393 /*
2394 * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio.
2395 */
2396 if (sgp == SGP_FALLOC)
2397 sgp = SGP_WRITE;
2398 clear:
2399 /*
2400 * Let SGP_WRITE caller clear ends if write does not fill folio;
2401 * but SGP_FALLOC on a folio fallocated earlier must initialize
2402 * it now, lest undo on failure cancel our earlier guarantee.
2403 */
2404 if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
2405 long i, n = folio_nr_pages(folio);
2406
2407 for (i = 0; i < n; i++)
2408 clear_highpage(folio_page(folio, i));
2409 flush_dcache_folio(folio);
2410 folio_mark_uptodate(folio);
2411 }
2412
2413 /* Perhaps the file has been truncated since we checked */
2414 if (sgp <= SGP_CACHE &&
2415 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
2416 error = -EINVAL;
2417 goto unlock;
2418 }
2419 out:
2420 *foliop = folio;
2421 return 0;
2422
2423 /*
2424 * Error recovery.
2425 */
2426 unlock:
2427 if (alloced)
2428 filemap_remove_folio(folio);
2429 shmem_recalc_inode(inode, 0, 0);
2430 if (folio) {
2431 folio_unlock(folio);
2432 folio_put(folio);
2433 }
2434 return error;
2435 }
2436
2437 /**
2438 * shmem_get_folio - find, and lock a shmem folio.
2439 * @inode: inode to search
2440 * @index: the page index.
2441 * @write_end: end of a write, could extend inode size
2442 * @foliop: pointer to the folio if found
2443 * @sgp: SGP_* flags to control behavior
2444 *
2445 * Looks up the page cache entry at @inode & @index. If a folio is
2446 * present, it is returned locked with an increased refcount.
2447 *
2448 * If the caller modifies data in the folio, it must call folio_mark_dirty()
2449 * before unlocking the folio to ensure that the folio is not reclaimed.
2450 * There is no need to reserve space before calling folio_mark_dirty().
2451 *
2452 * When no folio is found, the behavior depends on @sgp:
2453 * - for SGP_READ, *@foliop is %NULL and 0 is returned
2454 * - for SGP_NOALLOC, *@foliop is %NULL and -ENOENT is returned
2455 * - for all other flags a new folio is allocated, inserted into the
2456 * page cache and returned locked in @foliop.
2457 *
2458 * Context: May sleep.
2459 * Return: 0 if successful, else a negative error code.
2460 */
shmem_get_folio(struct inode * inode,pgoff_t index,loff_t write_end,struct folio ** foliop,enum sgp_type sgp)2461 int shmem_get_folio(struct inode *inode, pgoff_t index, loff_t write_end,
2462 struct folio **foliop, enum sgp_type sgp)
2463 {
2464 return shmem_get_folio_gfp(inode, index, write_end, foliop, sgp,
2465 mapping_gfp_mask(inode->i_mapping), NULL, NULL);
2466 }
2467 EXPORT_SYMBOL_GPL(shmem_get_folio);
2468
2469 /*
2470 * This is like autoremove_wake_function, but it removes the wait queue
2471 * entry unconditionally - even if something else had already woken the
2472 * target.
2473 */
synchronous_wake_function(wait_queue_entry_t * wait,unsigned int mode,int sync,void * key)2474 static int synchronous_wake_function(wait_queue_entry_t *wait,
2475 unsigned int mode, int sync, void *key)
2476 {
2477 int ret = default_wake_function(wait, mode, sync, key);
2478 list_del_init(&wait->entry);
2479 return ret;
2480 }
2481
2482 /*
2483 * Trinity finds that probing a hole which tmpfs is punching can
2484 * prevent the hole-punch from ever completing: which in turn
2485 * locks writers out with its hold on i_rwsem. So refrain from
2486 * faulting pages into the hole while it's being punched. Although
2487 * shmem_undo_range() does remove the additions, it may be unable to
2488 * keep up, as each new page needs its own unmap_mapping_range() call,
2489 * and the i_mmap tree grows ever slower to scan if new vmas are added.
2490 *
2491 * It does not matter if we sometimes reach this check just before the
2492 * hole-punch begins, so that one fault then races with the punch:
2493 * we just need to make racing faults a rare case.
2494 *
2495 * The implementation below would be much simpler if we just used a
2496 * standard mutex or completion: but we cannot take i_rwsem in fault,
2497 * and bloating every shmem inode for this unlikely case would be sad.
2498 */
shmem_falloc_wait(struct vm_fault * vmf,struct inode * inode)2499 static vm_fault_t shmem_falloc_wait(struct vm_fault *vmf, struct inode *inode)
2500 {
2501 struct shmem_falloc *shmem_falloc;
2502 struct file *fpin = NULL;
2503 vm_fault_t ret = 0;
2504
2505 spin_lock(&inode->i_lock);
2506 shmem_falloc = inode->i_private;
2507 if (shmem_falloc &&
2508 shmem_falloc->waitq &&
2509 vmf->pgoff >= shmem_falloc->start &&
2510 vmf->pgoff < shmem_falloc->next) {
2511 wait_queue_head_t *shmem_falloc_waitq;
2512 DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
2513
2514 ret = VM_FAULT_NOPAGE;
2515 fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2516 shmem_falloc_waitq = shmem_falloc->waitq;
2517 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2518 TASK_UNINTERRUPTIBLE);
2519 spin_unlock(&inode->i_lock);
2520 schedule();
2521
2522 /*
2523 * shmem_falloc_waitq points into the shmem_fallocate()
2524 * stack of the hole-punching task: shmem_falloc_waitq
2525 * is usually invalid by the time we reach here, but
2526 * finish_wait() does not dereference it in that case;
2527 * though i_lock needed lest racing with wake_up_all().
2528 */
2529 spin_lock(&inode->i_lock);
2530 finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2531 }
2532 spin_unlock(&inode->i_lock);
2533 if (fpin) {
2534 fput(fpin);
2535 ret = VM_FAULT_RETRY;
2536 }
2537 return ret;
2538 }
2539
shmem_fault(struct vm_fault * vmf)2540 static vm_fault_t shmem_fault(struct vm_fault *vmf)
2541 {
2542 struct inode *inode = file_inode(vmf->vma->vm_file);
2543 gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
2544 struct folio *folio = NULL;
2545 vm_fault_t ret = 0;
2546 int err;
2547
2548 /*
2549 * Trinity finds that probing a hole which tmpfs is punching can
2550 * prevent the hole-punch from ever completing: noted in i_private.
2551 */
2552 if (unlikely(inode->i_private)) {
2553 ret = shmem_falloc_wait(vmf, inode);
2554 if (ret)
2555 return ret;
2556 }
2557
2558 WARN_ON_ONCE(vmf->page != NULL);
2559 err = shmem_get_folio_gfp(inode, vmf->pgoff, 0, &folio, SGP_CACHE,
2560 gfp, vmf, &ret);
2561 if (err)
2562 return vmf_error(err);
2563 if (folio) {
2564 vmf->page = folio_file_page(folio, vmf->pgoff);
2565 ret |= VM_FAULT_LOCKED;
2566 }
2567 return ret;
2568 }
2569
shmem_get_unmapped_area(struct file * file,unsigned long uaddr,unsigned long len,unsigned long pgoff,unsigned long flags)2570 unsigned long shmem_get_unmapped_area(struct file *file,
2571 unsigned long uaddr, unsigned long len,
2572 unsigned long pgoff, unsigned long flags)
2573 {
2574 unsigned long addr;
2575 unsigned long offset;
2576 unsigned long inflated_len;
2577 unsigned long inflated_addr;
2578 unsigned long inflated_offset;
2579 unsigned long hpage_size;
2580
2581 if (len > TASK_SIZE)
2582 return -ENOMEM;
2583
2584 addr = mm_get_unmapped_area(current->mm, file, uaddr, len, pgoff,
2585 flags);
2586
2587 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2588 return addr;
2589 if (IS_ERR_VALUE(addr))
2590 return addr;
2591 if (addr & ~PAGE_MASK)
2592 return addr;
2593 if (addr > TASK_SIZE - len)
2594 return addr;
2595
2596 if (shmem_huge == SHMEM_HUGE_DENY)
2597 return addr;
2598 if (flags & MAP_FIXED)
2599 return addr;
2600 /*
2601 * Our priority is to support MAP_SHARED mapped hugely;
2602 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2603 * But if caller specified an address hint and we allocated area there
2604 * successfully, respect that as before.
2605 */
2606 if (uaddr == addr)
2607 return addr;
2608
2609 hpage_size = HPAGE_PMD_SIZE;
2610 if (shmem_huge != SHMEM_HUGE_FORCE) {
2611 struct super_block *sb;
2612 unsigned long __maybe_unused hpage_orders;
2613 int order = 0;
2614
2615 if (file) {
2616 VM_BUG_ON(file->f_op != &shmem_file_operations);
2617 sb = file_inode(file)->i_sb;
2618 } else {
2619 /*
2620 * Called directly from mm/mmap.c, or drivers/char/mem.c
2621 * for "/dev/zero", to create a shared anonymous object.
2622 */
2623 if (IS_ERR(shm_mnt))
2624 return addr;
2625 sb = shm_mnt->mnt_sb;
2626
2627 /*
2628 * Find the highest mTHP order used for anonymous shmem to
2629 * provide a suitable alignment address.
2630 */
2631 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2632 hpage_orders = READ_ONCE(huge_shmem_orders_always);
2633 hpage_orders |= READ_ONCE(huge_shmem_orders_within_size);
2634 hpage_orders |= READ_ONCE(huge_shmem_orders_madvise);
2635 if (SHMEM_SB(sb)->huge != SHMEM_HUGE_NEVER)
2636 hpage_orders |= READ_ONCE(huge_shmem_orders_inherit);
2637
2638 if (hpage_orders > 0) {
2639 order = highest_order(hpage_orders);
2640 hpage_size = PAGE_SIZE << order;
2641 }
2642 #endif
2643 }
2644 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER && !order)
2645 return addr;
2646 }
2647
2648 if (len < hpage_size)
2649 return addr;
2650
2651 offset = (pgoff << PAGE_SHIFT) & (hpage_size - 1);
2652 if (offset && offset + len < 2 * hpage_size)
2653 return addr;
2654 if ((addr & (hpage_size - 1)) == offset)
2655 return addr;
2656
2657 inflated_len = len + hpage_size - PAGE_SIZE;
2658 if (inflated_len > TASK_SIZE)
2659 return addr;
2660 if (inflated_len < len)
2661 return addr;
2662
2663 inflated_addr = mm_get_unmapped_area(current->mm, NULL, uaddr,
2664 inflated_len, 0, flags);
2665 if (IS_ERR_VALUE(inflated_addr))
2666 return addr;
2667 if (inflated_addr & ~PAGE_MASK)
2668 return addr;
2669
2670 inflated_offset = inflated_addr & (hpage_size - 1);
2671 inflated_addr += offset - inflated_offset;
2672 if (inflated_offset > offset)
2673 inflated_addr += hpage_size;
2674
2675 if (inflated_addr > TASK_SIZE - len)
2676 return addr;
2677 return inflated_addr;
2678 }
2679
2680 #ifdef CONFIG_NUMA
shmem_set_policy(struct vm_area_struct * vma,struct mempolicy * mpol)2681 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2682 {
2683 struct inode *inode = file_inode(vma->vm_file);
2684 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2685 }
2686
shmem_get_policy(struct vm_area_struct * vma,unsigned long addr,pgoff_t * ilx)2687 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2688 unsigned long addr, pgoff_t *ilx)
2689 {
2690 struct inode *inode = file_inode(vma->vm_file);
2691 pgoff_t index;
2692
2693 /*
2694 * Bias interleave by inode number to distribute better across nodes;
2695 * but this interface is independent of which page order is used, so
2696 * supplies only that bias, letting caller apply the offset (adjusted
2697 * by page order, as in shmem_get_pgoff_policy() and get_vma_policy()).
2698 */
2699 *ilx = inode->i_ino;
2700 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2701 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2702 }
2703
shmem_get_pgoff_policy(struct shmem_inode_info * info,pgoff_t index,unsigned int order,pgoff_t * ilx)2704 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
2705 pgoff_t index, unsigned int order, pgoff_t *ilx)
2706 {
2707 struct mempolicy *mpol;
2708
2709 /* Bias interleave by inode number to distribute better across nodes */
2710 *ilx = info->vfs_inode.i_ino + (index >> order);
2711
2712 mpol = mpol_shared_policy_lookup(&info->policy, index);
2713 return mpol ? mpol : get_task_policy(current);
2714 }
2715 #else
shmem_get_pgoff_policy(struct shmem_inode_info * info,pgoff_t index,unsigned int order,pgoff_t * ilx)2716 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
2717 pgoff_t index, unsigned int order, pgoff_t *ilx)
2718 {
2719 *ilx = 0;
2720 return NULL;
2721 }
2722 #endif /* CONFIG_NUMA */
2723
shmem_lock(struct file * file,int lock,struct ucounts * ucounts)2724 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
2725 {
2726 struct inode *inode = file_inode(file);
2727 struct shmem_inode_info *info = SHMEM_I(inode);
2728 int retval = -ENOMEM;
2729
2730 /*
2731 * What serializes the accesses to info->flags?
2732 * ipc_lock_object() when called from shmctl_do_lock(),
2733 * no serialization needed when called from shm_destroy().
2734 */
2735 if (lock && !(info->flags & VM_LOCKED)) {
2736 if (!user_shm_lock(inode->i_size, ucounts))
2737 goto out_nomem;
2738 info->flags |= VM_LOCKED;
2739 mapping_set_unevictable(file->f_mapping);
2740 }
2741 if (!lock && (info->flags & VM_LOCKED) && ucounts) {
2742 user_shm_unlock(inode->i_size, ucounts);
2743 info->flags &= ~VM_LOCKED;
2744 mapping_clear_unevictable(file->f_mapping);
2745 }
2746 retval = 0;
2747
2748 out_nomem:
2749 return retval;
2750 }
2751
shmem_mmap(struct file * file,struct vm_area_struct * vma)2752 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2753 {
2754 struct inode *inode = file_inode(file);
2755 struct shmem_inode_info *info = SHMEM_I(inode);
2756 int ret;
2757
2758 ret = seal_check_write(info->seals, vma);
2759 if (ret)
2760 return ret;
2761
2762 file_accessed(file);
2763 /* This is anonymous shared memory if it is unlinked at the time of mmap */
2764 if (inode->i_nlink)
2765 vma->vm_ops = &shmem_vm_ops;
2766 else
2767 vma->vm_ops = &shmem_anon_vm_ops;
2768 return 0;
2769 }
2770
shmem_file_open(struct inode * inode,struct file * file)2771 static int shmem_file_open(struct inode *inode, struct file *file)
2772 {
2773 file->f_mode |= FMODE_CAN_ODIRECT;
2774 return generic_file_open(inode, file);
2775 }
2776
2777 #ifdef CONFIG_TMPFS_XATTR
2778 static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2779
2780 #if IS_ENABLED(CONFIG_UNICODE)
2781 /*
2782 * shmem_inode_casefold_flags - Deal with casefold file attribute flag
2783 *
2784 * The casefold file attribute needs some special checks. I can just be added to
2785 * an empty dir, and can't be removed from a non-empty dir.
2786 */
shmem_inode_casefold_flags(struct inode * inode,unsigned int fsflags,struct dentry * dentry,unsigned int * i_flags)2787 static int shmem_inode_casefold_flags(struct inode *inode, unsigned int fsflags,
2788 struct dentry *dentry, unsigned int *i_flags)
2789 {
2790 unsigned int old = inode->i_flags;
2791 struct super_block *sb = inode->i_sb;
2792
2793 if (fsflags & FS_CASEFOLD_FL) {
2794 if (!(old & S_CASEFOLD)) {
2795 if (!sb->s_encoding)
2796 return -EOPNOTSUPP;
2797
2798 if (!S_ISDIR(inode->i_mode))
2799 return -ENOTDIR;
2800
2801 if (dentry && !simple_empty(dentry))
2802 return -ENOTEMPTY;
2803 }
2804
2805 *i_flags = *i_flags | S_CASEFOLD;
2806 } else if (old & S_CASEFOLD) {
2807 if (dentry && !simple_empty(dentry))
2808 return -ENOTEMPTY;
2809 }
2810
2811 return 0;
2812 }
2813 #else
shmem_inode_casefold_flags(struct inode * inode,unsigned int fsflags,struct dentry * dentry,unsigned int * i_flags)2814 static int shmem_inode_casefold_flags(struct inode *inode, unsigned int fsflags,
2815 struct dentry *dentry, unsigned int *i_flags)
2816 {
2817 if (fsflags & FS_CASEFOLD_FL)
2818 return -EOPNOTSUPP;
2819
2820 return 0;
2821 }
2822 #endif
2823
2824 /*
2825 * chattr's fsflags are unrelated to extended attributes,
2826 * but tmpfs has chosen to enable them under the same config option.
2827 */
shmem_set_inode_flags(struct inode * inode,unsigned int fsflags,struct dentry * dentry)2828 static int shmem_set_inode_flags(struct inode *inode, unsigned int fsflags, struct dentry *dentry)
2829 {
2830 unsigned int i_flags = 0;
2831 int ret;
2832
2833 ret = shmem_inode_casefold_flags(inode, fsflags, dentry, &i_flags);
2834 if (ret)
2835 return ret;
2836
2837 if (fsflags & FS_NOATIME_FL)
2838 i_flags |= S_NOATIME;
2839 if (fsflags & FS_APPEND_FL)
2840 i_flags |= S_APPEND;
2841 if (fsflags & FS_IMMUTABLE_FL)
2842 i_flags |= S_IMMUTABLE;
2843 /*
2844 * But FS_NODUMP_FL does not require any action in i_flags.
2845 */
2846 inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE | S_CASEFOLD);
2847
2848 return 0;
2849 }
2850 #else
shmem_set_inode_flags(struct inode * inode,unsigned int fsflags,struct dentry * dentry)2851 static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags, struct dentry *dentry)
2852 {
2853 }
2854 #define shmem_initxattrs NULL
2855 #endif
2856
shmem_get_offset_ctx(struct inode * inode)2857 static struct offset_ctx *shmem_get_offset_ctx(struct inode *inode)
2858 {
2859 return &SHMEM_I(inode)->dir_offsets;
2860 }
2861
__shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,unsigned long flags)2862 static struct inode *__shmem_get_inode(struct mnt_idmap *idmap,
2863 struct super_block *sb,
2864 struct inode *dir, umode_t mode,
2865 dev_t dev, unsigned long flags)
2866 {
2867 struct inode *inode;
2868 struct shmem_inode_info *info;
2869 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2870 ino_t ino;
2871 int err;
2872
2873 err = shmem_reserve_inode(sb, &ino);
2874 if (err)
2875 return ERR_PTR(err);
2876
2877 inode = new_inode(sb);
2878 if (!inode) {
2879 shmem_free_inode(sb, 0);
2880 return ERR_PTR(-ENOSPC);
2881 }
2882
2883 inode->i_ino = ino;
2884 inode_init_owner(idmap, inode, dir, mode);
2885 inode->i_blocks = 0;
2886 simple_inode_init_ts(inode);
2887 inode->i_generation = get_random_u32();
2888 info = SHMEM_I(inode);
2889 memset(info, 0, (char *)inode - (char *)info);
2890 spin_lock_init(&info->lock);
2891 atomic_set(&info->stop_eviction, 0);
2892 info->seals = F_SEAL_SEAL;
2893 info->flags = flags & VM_NORESERVE;
2894 info->i_crtime = inode_get_mtime(inode);
2895 info->fsflags = (dir == NULL) ? 0 :
2896 SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
2897 if (info->fsflags)
2898 shmem_set_inode_flags(inode, info->fsflags, NULL);
2899 INIT_LIST_HEAD(&info->shrinklist);
2900 INIT_LIST_HEAD(&info->swaplist);
2901 simple_xattrs_init(&info->xattrs);
2902 cache_no_acl(inode);
2903 if (sbinfo->noswap)
2904 mapping_set_unevictable(inode->i_mapping);
2905
2906 /* Don't consider 'deny' for emergencies and 'force' for testing */
2907 if (sbinfo->huge)
2908 mapping_set_large_folios(inode->i_mapping);
2909
2910 switch (mode & S_IFMT) {
2911 default:
2912 inode->i_op = &shmem_special_inode_operations;
2913 init_special_inode(inode, mode, dev);
2914 break;
2915 case S_IFREG:
2916 inode->i_mapping->a_ops = &shmem_aops;
2917 inode->i_op = &shmem_inode_operations;
2918 inode->i_fop = &shmem_file_operations;
2919 mpol_shared_policy_init(&info->policy,
2920 shmem_get_sbmpol(sbinfo));
2921 break;
2922 case S_IFDIR:
2923 inc_nlink(inode);
2924 /* Some things misbehave if size == 0 on a directory */
2925 inode->i_size = 2 * BOGO_DIRENT_SIZE;
2926 inode->i_op = &shmem_dir_inode_operations;
2927 inode->i_fop = &simple_offset_dir_operations;
2928 simple_offset_init(shmem_get_offset_ctx(inode));
2929 break;
2930 case S_IFLNK:
2931 /*
2932 * Must not load anything in the rbtree,
2933 * mpol_free_shared_policy will not be called.
2934 */
2935 mpol_shared_policy_init(&info->policy, NULL);
2936 break;
2937 }
2938
2939 lockdep_annotate_inode_mutex_key(inode);
2940 return inode;
2941 }
2942
2943 #ifdef CONFIG_TMPFS_QUOTA
shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,unsigned long flags)2944 static struct inode *shmem_get_inode(struct mnt_idmap *idmap,
2945 struct super_block *sb, struct inode *dir,
2946 umode_t mode, dev_t dev, unsigned long flags)
2947 {
2948 int err;
2949 struct inode *inode;
2950
2951 inode = __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
2952 if (IS_ERR(inode))
2953 return inode;
2954
2955 err = dquot_initialize(inode);
2956 if (err)
2957 goto errout;
2958
2959 err = dquot_alloc_inode(inode);
2960 if (err) {
2961 dquot_drop(inode);
2962 goto errout;
2963 }
2964 return inode;
2965
2966 errout:
2967 inode->i_flags |= S_NOQUOTA;
2968 iput(inode);
2969 return ERR_PTR(err);
2970 }
2971 #else
shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,unsigned long flags)2972 static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
2973 struct super_block *sb, struct inode *dir,
2974 umode_t mode, dev_t dev, unsigned long flags)
2975 {
2976 return __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
2977 }
2978 #endif /* CONFIG_TMPFS_QUOTA */
2979
2980 #ifdef CONFIG_USERFAULTFD
shmem_mfill_atomic_pte(pmd_t * dst_pmd,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,uffd_flags_t flags,struct folio ** foliop)2981 int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
2982 struct vm_area_struct *dst_vma,
2983 unsigned long dst_addr,
2984 unsigned long src_addr,
2985 uffd_flags_t flags,
2986 struct folio **foliop)
2987 {
2988 struct inode *inode = file_inode(dst_vma->vm_file);
2989 struct shmem_inode_info *info = SHMEM_I(inode);
2990 struct address_space *mapping = inode->i_mapping;
2991 gfp_t gfp = mapping_gfp_mask(mapping);
2992 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
2993 void *page_kaddr;
2994 struct folio *folio;
2995 int ret;
2996 pgoff_t max_off;
2997
2998 if (shmem_inode_acct_blocks(inode, 1)) {
2999 /*
3000 * We may have got a page, returned -ENOENT triggering a retry,
3001 * and now we find ourselves with -ENOMEM. Release the page, to
3002 * avoid a BUG_ON in our caller.
3003 */
3004 if (unlikely(*foliop)) {
3005 folio_put(*foliop);
3006 *foliop = NULL;
3007 }
3008 return -ENOMEM;
3009 }
3010
3011 if (!*foliop) {
3012 ret = -ENOMEM;
3013 folio = shmem_alloc_folio(gfp, 0, info, pgoff);
3014 if (!folio)
3015 goto out_unacct_blocks;
3016
3017 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
3018 page_kaddr = kmap_local_folio(folio, 0);
3019 /*
3020 * The read mmap_lock is held here. Despite the
3021 * mmap_lock being read recursive a deadlock is still
3022 * possible if a writer has taken a lock. For example:
3023 *
3024 * process A thread 1 takes read lock on own mmap_lock
3025 * process A thread 2 calls mmap, blocks taking write lock
3026 * process B thread 1 takes page fault, read lock on own mmap lock
3027 * process B thread 2 calls mmap, blocks taking write lock
3028 * process A thread 1 blocks taking read lock on process B
3029 * process B thread 1 blocks taking read lock on process A
3030 *
3031 * Disable page faults to prevent potential deadlock
3032 * and retry the copy outside the mmap_lock.
3033 */
3034 pagefault_disable();
3035 ret = copy_from_user(page_kaddr,
3036 (const void __user *)src_addr,
3037 PAGE_SIZE);
3038 pagefault_enable();
3039 kunmap_local(page_kaddr);
3040
3041 /* fallback to copy_from_user outside mmap_lock */
3042 if (unlikely(ret)) {
3043 *foliop = folio;
3044 ret = -ENOENT;
3045 /* don't free the page */
3046 goto out_unacct_blocks;
3047 }
3048
3049 flush_dcache_folio(folio);
3050 } else { /* ZEROPAGE */
3051 clear_user_highpage(&folio->page, dst_addr);
3052 }
3053 } else {
3054 folio = *foliop;
3055 VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
3056 *foliop = NULL;
3057 }
3058
3059 VM_BUG_ON(folio_test_locked(folio));
3060 VM_BUG_ON(folio_test_swapbacked(folio));
3061 __folio_set_locked(folio);
3062 __folio_set_swapbacked(folio);
3063 __folio_mark_uptodate(folio);
3064
3065 ret = -EFAULT;
3066 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3067 if (unlikely(pgoff >= max_off))
3068 goto out_release;
3069
3070 ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp);
3071 if (ret)
3072 goto out_release;
3073 ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp);
3074 if (ret)
3075 goto out_release;
3076
3077 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
3078 &folio->page, true, flags);
3079 if (ret)
3080 goto out_delete_from_cache;
3081
3082 shmem_recalc_inode(inode, 1, 0);
3083 folio_unlock(folio);
3084 return 0;
3085 out_delete_from_cache:
3086 filemap_remove_folio(folio);
3087 out_release:
3088 folio_unlock(folio);
3089 folio_put(folio);
3090 out_unacct_blocks:
3091 shmem_inode_unacct_blocks(inode, 1);
3092 return ret;
3093 }
3094 #endif /* CONFIG_USERFAULTFD */
3095
3096 #ifdef CONFIG_TMPFS
3097 static const struct inode_operations shmem_symlink_inode_operations;
3098 static const struct inode_operations shmem_short_symlink_operations;
3099
3100 static int
shmem_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata)3101 shmem_write_begin(struct file *file, struct address_space *mapping,
3102 loff_t pos, unsigned len,
3103 struct folio **foliop, void **fsdata)
3104 {
3105 struct inode *inode = mapping->host;
3106 struct shmem_inode_info *info = SHMEM_I(inode);
3107 pgoff_t index = pos >> PAGE_SHIFT;
3108 struct folio *folio;
3109 int ret = 0;
3110
3111 /* i_rwsem is held by caller */
3112 if (unlikely(info->seals & (F_SEAL_GROW |
3113 F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
3114 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
3115 return -EPERM;
3116 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
3117 return -EPERM;
3118 }
3119
3120 ret = shmem_get_folio(inode, index, pos + len, &folio, SGP_WRITE);
3121 if (ret)
3122 return ret;
3123
3124 if (folio_test_hwpoison(folio) ||
3125 (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) {
3126 folio_unlock(folio);
3127 folio_put(folio);
3128 return -EIO;
3129 }
3130
3131 *foliop = folio;
3132 return 0;
3133 }
3134
3135 static int
shmem_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)3136 shmem_write_end(struct file *file, struct address_space *mapping,
3137 loff_t pos, unsigned len, unsigned copied,
3138 struct folio *folio, void *fsdata)
3139 {
3140 struct inode *inode = mapping->host;
3141
3142 if (pos + copied > inode->i_size)
3143 i_size_write(inode, pos + copied);
3144
3145 if (!folio_test_uptodate(folio)) {
3146 if (copied < folio_size(folio)) {
3147 size_t from = offset_in_folio(folio, pos);
3148 folio_zero_segments(folio, 0, from,
3149 from + copied, folio_size(folio));
3150 }
3151 folio_mark_uptodate(folio);
3152 }
3153 folio_mark_dirty(folio);
3154 folio_unlock(folio);
3155 folio_put(folio);
3156
3157 return copied;
3158 }
3159
shmem_file_read_iter(struct kiocb * iocb,struct iov_iter * to)3160 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
3161 {
3162 struct file *file = iocb->ki_filp;
3163 struct inode *inode = file_inode(file);
3164 struct address_space *mapping = inode->i_mapping;
3165 pgoff_t index;
3166 unsigned long offset;
3167 int error = 0;
3168 ssize_t retval = 0;
3169
3170 for (;;) {
3171 struct folio *folio = NULL;
3172 struct page *page = NULL;
3173 unsigned long nr, ret;
3174 loff_t end_offset, i_size = i_size_read(inode);
3175 bool fallback_page_copy = false;
3176 size_t fsize;
3177
3178 if (unlikely(iocb->ki_pos >= i_size))
3179 break;
3180
3181 index = iocb->ki_pos >> PAGE_SHIFT;
3182 error = shmem_get_folio(inode, index, 0, &folio, SGP_READ);
3183 if (error) {
3184 if (error == -EINVAL)
3185 error = 0;
3186 break;
3187 }
3188 if (folio) {
3189 folio_unlock(folio);
3190
3191 page = folio_file_page(folio, index);
3192 if (PageHWPoison(page)) {
3193 folio_put(folio);
3194 error = -EIO;
3195 break;
3196 }
3197
3198 if (folio_test_large(folio) &&
3199 folio_test_has_hwpoisoned(folio))
3200 fallback_page_copy = true;
3201 }
3202
3203 /*
3204 * We must evaluate after, since reads (unlike writes)
3205 * are called without i_rwsem protection against truncate
3206 */
3207 i_size = i_size_read(inode);
3208 if (unlikely(iocb->ki_pos >= i_size)) {
3209 if (folio)
3210 folio_put(folio);
3211 break;
3212 }
3213 end_offset = min_t(loff_t, i_size, iocb->ki_pos + to->count);
3214 if (folio && likely(!fallback_page_copy))
3215 fsize = folio_size(folio);
3216 else
3217 fsize = PAGE_SIZE;
3218 offset = iocb->ki_pos & (fsize - 1);
3219 nr = min_t(loff_t, end_offset - iocb->ki_pos, fsize - offset);
3220
3221 if (folio) {
3222 /*
3223 * If users can be writing to this page using arbitrary
3224 * virtual addresses, take care about potential aliasing
3225 * before reading the page on the kernel side.
3226 */
3227 if (mapping_writably_mapped(mapping)) {
3228 if (likely(!fallback_page_copy))
3229 flush_dcache_folio(folio);
3230 else
3231 flush_dcache_page(page);
3232 }
3233
3234 /*
3235 * Mark the folio accessed if we read the beginning.
3236 */
3237 if (!offset)
3238 folio_mark_accessed(folio);
3239 /*
3240 * Ok, we have the page, and it's up-to-date, so
3241 * now we can copy it to user space...
3242 */
3243 if (likely(!fallback_page_copy))
3244 ret = copy_folio_to_iter(folio, offset, nr, to);
3245 else
3246 ret = copy_page_to_iter(page, offset, nr, to);
3247 folio_put(folio);
3248 } else if (user_backed_iter(to)) {
3249 /*
3250 * Copy to user tends to be so well optimized, but
3251 * clear_user() not so much, that it is noticeably
3252 * faster to copy the zero page instead of clearing.
3253 */
3254 ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to);
3255 } else {
3256 /*
3257 * But submitting the same page twice in a row to
3258 * splice() - or others? - can result in confusion:
3259 * so don't attempt that optimization on pipes etc.
3260 */
3261 ret = iov_iter_zero(nr, to);
3262 }
3263
3264 retval += ret;
3265 iocb->ki_pos += ret;
3266
3267 if (!iov_iter_count(to))
3268 break;
3269 if (ret < nr) {
3270 error = -EFAULT;
3271 break;
3272 }
3273 cond_resched();
3274 }
3275
3276 file_accessed(file);
3277 return retval ? retval : error;
3278 }
3279
shmem_file_write_iter(struct kiocb * iocb,struct iov_iter * from)3280 static ssize_t shmem_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3281 {
3282 struct file *file = iocb->ki_filp;
3283 struct inode *inode = file->f_mapping->host;
3284 ssize_t ret;
3285
3286 inode_lock(inode);
3287 ret = generic_write_checks(iocb, from);
3288 if (ret <= 0)
3289 goto unlock;
3290 ret = file_remove_privs(file);
3291 if (ret)
3292 goto unlock;
3293 ret = file_update_time(file);
3294 if (ret)
3295 goto unlock;
3296 ret = generic_perform_write(iocb, from);
3297 unlock:
3298 inode_unlock(inode);
3299 return ret;
3300 }
3301
zero_pipe_buf_get(struct pipe_inode_info * pipe,struct pipe_buffer * buf)3302 static bool zero_pipe_buf_get(struct pipe_inode_info *pipe,
3303 struct pipe_buffer *buf)
3304 {
3305 return true;
3306 }
3307
zero_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)3308 static void zero_pipe_buf_release(struct pipe_inode_info *pipe,
3309 struct pipe_buffer *buf)
3310 {
3311 }
3312
zero_pipe_buf_try_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)3313 static bool zero_pipe_buf_try_steal(struct pipe_inode_info *pipe,
3314 struct pipe_buffer *buf)
3315 {
3316 return false;
3317 }
3318
3319 static const struct pipe_buf_operations zero_pipe_buf_ops = {
3320 .release = zero_pipe_buf_release,
3321 .try_steal = zero_pipe_buf_try_steal,
3322 .get = zero_pipe_buf_get,
3323 };
3324
splice_zeropage_into_pipe(struct pipe_inode_info * pipe,loff_t fpos,size_t size)3325 static size_t splice_zeropage_into_pipe(struct pipe_inode_info *pipe,
3326 loff_t fpos, size_t size)
3327 {
3328 size_t offset = fpos & ~PAGE_MASK;
3329
3330 size = min_t(size_t, size, PAGE_SIZE - offset);
3331
3332 if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) {
3333 struct pipe_buffer *buf = pipe_head_buf(pipe);
3334
3335 *buf = (struct pipe_buffer) {
3336 .ops = &zero_pipe_buf_ops,
3337 .page = ZERO_PAGE(0),
3338 .offset = offset,
3339 .len = size,
3340 };
3341 pipe->head++;
3342 }
3343
3344 return size;
3345 }
3346
shmem_file_splice_read(struct file * in,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)3347 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
3348 struct pipe_inode_info *pipe,
3349 size_t len, unsigned int flags)
3350 {
3351 struct inode *inode = file_inode(in);
3352 struct address_space *mapping = inode->i_mapping;
3353 struct folio *folio = NULL;
3354 size_t total_spliced = 0, used, npages, n, part;
3355 loff_t isize;
3356 int error = 0;
3357
3358 /* Work out how much data we can actually add into the pipe */
3359 used = pipe_occupancy(pipe->head, pipe->tail);
3360 npages = max_t(ssize_t, pipe->max_usage - used, 0);
3361 len = min_t(size_t, len, npages * PAGE_SIZE);
3362
3363 do {
3364 bool fallback_page_splice = false;
3365 struct page *page = NULL;
3366 pgoff_t index;
3367 size_t size;
3368
3369 if (*ppos >= i_size_read(inode))
3370 break;
3371
3372 index = *ppos >> PAGE_SHIFT;
3373 error = shmem_get_folio(inode, index, 0, &folio, SGP_READ);
3374 if (error) {
3375 if (error == -EINVAL)
3376 error = 0;
3377 break;
3378 }
3379 if (folio) {
3380 folio_unlock(folio);
3381
3382 page = folio_file_page(folio, index);
3383 if (PageHWPoison(page)) {
3384 error = -EIO;
3385 break;
3386 }
3387
3388 if (folio_test_large(folio) &&
3389 folio_test_has_hwpoisoned(folio))
3390 fallback_page_splice = true;
3391 }
3392
3393 /*
3394 * i_size must be checked after we know the pages are Uptodate.
3395 *
3396 * Checking i_size after the check allows us to calculate
3397 * the correct value for "nr", which means the zero-filled
3398 * part of the page is not copied back to userspace (unless
3399 * another truncate extends the file - this is desired though).
3400 */
3401 isize = i_size_read(inode);
3402 if (unlikely(*ppos >= isize))
3403 break;
3404 /*
3405 * Fallback to PAGE_SIZE splice if the large folio has hwpoisoned
3406 * pages.
3407 */
3408 size = len;
3409 if (unlikely(fallback_page_splice)) {
3410 size_t offset = *ppos & ~PAGE_MASK;
3411
3412 size = umin(size, PAGE_SIZE - offset);
3413 }
3414 part = min_t(loff_t, isize - *ppos, size);
3415
3416 if (folio) {
3417 /*
3418 * If users can be writing to this page using arbitrary
3419 * virtual addresses, take care about potential aliasing
3420 * before reading the page on the kernel side.
3421 */
3422 if (mapping_writably_mapped(mapping)) {
3423 if (likely(!fallback_page_splice))
3424 flush_dcache_folio(folio);
3425 else
3426 flush_dcache_page(page);
3427 }
3428 folio_mark_accessed(folio);
3429 /*
3430 * Ok, we have the page, and it's up-to-date, so we can
3431 * now splice it into the pipe.
3432 */
3433 n = splice_folio_into_pipe(pipe, folio, *ppos, part);
3434 folio_put(folio);
3435 folio = NULL;
3436 } else {
3437 n = splice_zeropage_into_pipe(pipe, *ppos, part);
3438 }
3439
3440 if (!n)
3441 break;
3442 len -= n;
3443 total_spliced += n;
3444 *ppos += n;
3445 in->f_ra.prev_pos = *ppos;
3446 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
3447 break;
3448
3449 cond_resched();
3450 } while (len);
3451
3452 if (folio)
3453 folio_put(folio);
3454
3455 file_accessed(in);
3456 return total_spliced ? total_spliced : error;
3457 }
3458
shmem_file_llseek(struct file * file,loff_t offset,int whence)3459 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
3460 {
3461 struct address_space *mapping = file->f_mapping;
3462 struct inode *inode = mapping->host;
3463
3464 if (whence != SEEK_DATA && whence != SEEK_HOLE)
3465 return generic_file_llseek_size(file, offset, whence,
3466 MAX_LFS_FILESIZE, i_size_read(inode));
3467 if (offset < 0)
3468 return -ENXIO;
3469
3470 inode_lock(inode);
3471 /* We're holding i_rwsem so we can access i_size directly */
3472 offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
3473 if (offset >= 0)
3474 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
3475 inode_unlock(inode);
3476 return offset;
3477 }
3478
shmem_fallocate(struct file * file,int mode,loff_t offset,loff_t len)3479 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
3480 loff_t len)
3481 {
3482 struct inode *inode = file_inode(file);
3483 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3484 struct shmem_inode_info *info = SHMEM_I(inode);
3485 struct shmem_falloc shmem_falloc;
3486 pgoff_t start, index, end, undo_fallocend;
3487 int error;
3488
3489 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
3490 return -EOPNOTSUPP;
3491
3492 inode_lock(inode);
3493
3494 if (mode & FALLOC_FL_PUNCH_HOLE) {
3495 struct address_space *mapping = file->f_mapping;
3496 loff_t unmap_start = round_up(offset, PAGE_SIZE);
3497 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
3498 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
3499
3500 /* protected by i_rwsem */
3501 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
3502 error = -EPERM;
3503 goto out;
3504 }
3505
3506 shmem_falloc.waitq = &shmem_falloc_waitq;
3507 shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
3508 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
3509 spin_lock(&inode->i_lock);
3510 inode->i_private = &shmem_falloc;
3511 spin_unlock(&inode->i_lock);
3512
3513 if ((u64)unmap_end > (u64)unmap_start)
3514 unmap_mapping_range(mapping, unmap_start,
3515 1 + unmap_end - unmap_start, 0);
3516 shmem_truncate_range(inode, offset, offset + len - 1);
3517 /* No need to unmap again: hole-punching leaves COWed pages */
3518
3519 spin_lock(&inode->i_lock);
3520 inode->i_private = NULL;
3521 wake_up_all(&shmem_falloc_waitq);
3522 WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
3523 spin_unlock(&inode->i_lock);
3524 error = 0;
3525 goto out;
3526 }
3527
3528 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
3529 error = inode_newsize_ok(inode, offset + len);
3530 if (error)
3531 goto out;
3532
3533 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
3534 error = -EPERM;
3535 goto out;
3536 }
3537
3538 start = offset >> PAGE_SHIFT;
3539 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
3540 /* Try to avoid a swapstorm if len is impossible to satisfy */
3541 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
3542 error = -ENOSPC;
3543 goto out;
3544 }
3545
3546 shmem_falloc.waitq = NULL;
3547 shmem_falloc.start = start;
3548 shmem_falloc.next = start;
3549 shmem_falloc.nr_falloced = 0;
3550 shmem_falloc.nr_unswapped = 0;
3551 spin_lock(&inode->i_lock);
3552 inode->i_private = &shmem_falloc;
3553 spin_unlock(&inode->i_lock);
3554
3555 /*
3556 * info->fallocend is only relevant when huge pages might be
3557 * involved: to prevent split_huge_page() freeing fallocated
3558 * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size.
3559 */
3560 undo_fallocend = info->fallocend;
3561 if (info->fallocend < end)
3562 info->fallocend = end;
3563
3564 for (index = start; index < end; ) {
3565 struct folio *folio;
3566
3567 /*
3568 * Check for fatal signal so that we abort early in OOM
3569 * situations. We don't want to abort in case of non-fatal
3570 * signals as large fallocate can take noticeable time and
3571 * e.g. periodic timers may result in fallocate constantly
3572 * restarting.
3573 */
3574 if (fatal_signal_pending(current))
3575 error = -EINTR;
3576 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
3577 error = -ENOMEM;
3578 else
3579 error = shmem_get_folio(inode, index, offset + len,
3580 &folio, SGP_FALLOC);
3581 if (error) {
3582 info->fallocend = undo_fallocend;
3583 /* Remove the !uptodate folios we added */
3584 if (index > start) {
3585 shmem_undo_range(inode,
3586 (loff_t)start << PAGE_SHIFT,
3587 ((loff_t)index << PAGE_SHIFT) - 1, true);
3588 }
3589 goto undone;
3590 }
3591
3592 /*
3593 * Here is a more important optimization than it appears:
3594 * a second SGP_FALLOC on the same large folio will clear it,
3595 * making it uptodate and un-undoable if we fail later.
3596 */
3597 index = folio_next_index(folio);
3598 /* Beware 32-bit wraparound */
3599 if (!index)
3600 index--;
3601
3602 /*
3603 * Inform shmem_writepage() how far we have reached.
3604 * No need for lock or barrier: we have the page lock.
3605 */
3606 if (!folio_test_uptodate(folio))
3607 shmem_falloc.nr_falloced += index - shmem_falloc.next;
3608 shmem_falloc.next = index;
3609
3610 /*
3611 * If !uptodate, leave it that way so that freeable folios
3612 * can be recognized if we need to rollback on error later.
3613 * But mark it dirty so that memory pressure will swap rather
3614 * than free the folios we are allocating (and SGP_CACHE folios
3615 * might still be clean: we now need to mark those dirty too).
3616 */
3617 folio_mark_dirty(folio);
3618 folio_unlock(folio);
3619 folio_put(folio);
3620 cond_resched();
3621 }
3622
3623 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
3624 i_size_write(inode, offset + len);
3625 undone:
3626 spin_lock(&inode->i_lock);
3627 inode->i_private = NULL;
3628 spin_unlock(&inode->i_lock);
3629 out:
3630 if (!error)
3631 file_modified(file);
3632 inode_unlock(inode);
3633 return error;
3634 }
3635
shmem_statfs(struct dentry * dentry,struct kstatfs * buf)3636 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
3637 {
3638 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
3639
3640 buf->f_type = TMPFS_MAGIC;
3641 buf->f_bsize = PAGE_SIZE;
3642 buf->f_namelen = NAME_MAX;
3643 if (sbinfo->max_blocks) {
3644 buf->f_blocks = sbinfo->max_blocks;
3645 buf->f_bavail =
3646 buf->f_bfree = sbinfo->max_blocks -
3647 percpu_counter_sum(&sbinfo->used_blocks);
3648 }
3649 if (sbinfo->max_inodes) {
3650 buf->f_files = sbinfo->max_inodes;
3651 buf->f_ffree = sbinfo->free_ispace / BOGO_INODE_SIZE;
3652 }
3653 /* else leave those fields 0 like simple_statfs */
3654
3655 buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
3656
3657 return 0;
3658 }
3659
3660 /*
3661 * File creation. Allocate an inode, and we're done..
3662 */
3663 static int
shmem_mknod(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode,dev_t dev)3664 shmem_mknod(struct mnt_idmap *idmap, struct inode *dir,
3665 struct dentry *dentry, umode_t mode, dev_t dev)
3666 {
3667 struct inode *inode;
3668 int error;
3669
3670 if (!generic_ci_validate_strict_name(dir, &dentry->d_name))
3671 return -EINVAL;
3672
3673 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, dev, VM_NORESERVE);
3674 if (IS_ERR(inode))
3675 return PTR_ERR(inode);
3676
3677 error = simple_acl_create(dir, inode);
3678 if (error)
3679 goto out_iput;
3680 error = security_inode_init_security(inode, dir, &dentry->d_name,
3681 shmem_initxattrs, NULL);
3682 if (error && error != -EOPNOTSUPP)
3683 goto out_iput;
3684
3685 error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3686 if (error)
3687 goto out_iput;
3688
3689 dir->i_size += BOGO_DIRENT_SIZE;
3690 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
3691 inode_inc_iversion(dir);
3692
3693 if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
3694 d_add(dentry, inode);
3695 else
3696 d_instantiate(dentry, inode);
3697
3698 dget(dentry); /* Extra count - pin the dentry in core */
3699 return error;
3700
3701 out_iput:
3702 iput(inode);
3703 return error;
3704 }
3705
3706 static int
shmem_tmpfile(struct mnt_idmap * idmap,struct inode * dir,struct file * file,umode_t mode)3707 shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
3708 struct file *file, umode_t mode)
3709 {
3710 struct inode *inode;
3711 int error;
3712
3713 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE);
3714 if (IS_ERR(inode)) {
3715 error = PTR_ERR(inode);
3716 goto err_out;
3717 }
3718 error = security_inode_init_security(inode, dir, NULL,
3719 shmem_initxattrs, NULL);
3720 if (error && error != -EOPNOTSUPP)
3721 goto out_iput;
3722 error = simple_acl_create(dir, inode);
3723 if (error)
3724 goto out_iput;
3725 d_tmpfile(file, inode);
3726
3727 err_out:
3728 return finish_open_simple(file, error);
3729 out_iput:
3730 iput(inode);
3731 return error;
3732 }
3733
shmem_mkdir(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode)3734 static int shmem_mkdir(struct mnt_idmap *idmap, struct inode *dir,
3735 struct dentry *dentry, umode_t mode)
3736 {
3737 int error;
3738
3739 error = shmem_mknod(idmap, dir, dentry, mode | S_IFDIR, 0);
3740 if (error)
3741 return error;
3742 inc_nlink(dir);
3743 return 0;
3744 }
3745
shmem_create(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode,bool excl)3746 static int shmem_create(struct mnt_idmap *idmap, struct inode *dir,
3747 struct dentry *dentry, umode_t mode, bool excl)
3748 {
3749 return shmem_mknod(idmap, dir, dentry, mode | S_IFREG, 0);
3750 }
3751
3752 /*
3753 * Link a file..
3754 */
shmem_link(struct dentry * old_dentry,struct inode * dir,struct dentry * dentry)3755 static int shmem_link(struct dentry *old_dentry, struct inode *dir,
3756 struct dentry *dentry)
3757 {
3758 struct inode *inode = d_inode(old_dentry);
3759 int ret = 0;
3760
3761 /*
3762 * No ordinary (disk based) filesystem counts links as inodes;
3763 * but each new link needs a new dentry, pinning lowmem, and
3764 * tmpfs dentries cannot be pruned until they are unlinked.
3765 * But if an O_TMPFILE file is linked into the tmpfs, the
3766 * first link must skip that, to get the accounting right.
3767 */
3768 if (inode->i_nlink) {
3769 ret = shmem_reserve_inode(inode->i_sb, NULL);
3770 if (ret)
3771 goto out;
3772 }
3773
3774 ret = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3775 if (ret) {
3776 if (inode->i_nlink)
3777 shmem_free_inode(inode->i_sb, 0);
3778 goto out;
3779 }
3780
3781 dir->i_size += BOGO_DIRENT_SIZE;
3782 inode_set_mtime_to_ts(dir,
3783 inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
3784 inode_inc_iversion(dir);
3785 inc_nlink(inode);
3786 ihold(inode); /* New dentry reference */
3787 dget(dentry); /* Extra pinning count for the created dentry */
3788 if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
3789 d_add(dentry, inode);
3790 else
3791 d_instantiate(dentry, inode);
3792 out:
3793 return ret;
3794 }
3795
shmem_unlink(struct inode * dir,struct dentry * dentry)3796 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
3797 {
3798 struct inode *inode = d_inode(dentry);
3799
3800 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
3801 shmem_free_inode(inode->i_sb, 0);
3802
3803 simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
3804
3805 dir->i_size -= BOGO_DIRENT_SIZE;
3806 inode_set_mtime_to_ts(dir,
3807 inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
3808 inode_inc_iversion(dir);
3809 drop_nlink(inode);
3810 dput(dentry); /* Undo the count from "create" - does all the work */
3811
3812 /*
3813 * For now, VFS can't deal with case-insensitive negative dentries, so
3814 * we invalidate them
3815 */
3816 if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
3817 d_invalidate(dentry);
3818
3819 return 0;
3820 }
3821
shmem_rmdir(struct inode * dir,struct dentry * dentry)3822 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
3823 {
3824 if (!simple_offset_empty(dentry))
3825 return -ENOTEMPTY;
3826
3827 drop_nlink(d_inode(dentry));
3828 drop_nlink(dir);
3829 return shmem_unlink(dir, dentry);
3830 }
3831
shmem_whiteout(struct mnt_idmap * idmap,struct inode * old_dir,struct dentry * old_dentry)3832 static int shmem_whiteout(struct mnt_idmap *idmap,
3833 struct inode *old_dir, struct dentry *old_dentry)
3834 {
3835 struct dentry *whiteout;
3836 int error;
3837
3838 whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
3839 if (!whiteout)
3840 return -ENOMEM;
3841
3842 error = shmem_mknod(idmap, old_dir, whiteout,
3843 S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
3844 dput(whiteout);
3845 if (error)
3846 return error;
3847
3848 /*
3849 * Cheat and hash the whiteout while the old dentry is still in
3850 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
3851 *
3852 * d_lookup() will consistently find one of them at this point,
3853 * not sure which one, but that isn't even important.
3854 */
3855 d_rehash(whiteout);
3856 return 0;
3857 }
3858
3859 /*
3860 * The VFS layer already does all the dentry stuff for rename,
3861 * we just have to decrement the usage count for the target if
3862 * it exists so that the VFS layer correctly free's it when it
3863 * gets overwritten.
3864 */
shmem_rename2(struct mnt_idmap * idmap,struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry,unsigned int flags)3865 static int shmem_rename2(struct mnt_idmap *idmap,
3866 struct inode *old_dir, struct dentry *old_dentry,
3867 struct inode *new_dir, struct dentry *new_dentry,
3868 unsigned int flags)
3869 {
3870 struct inode *inode = d_inode(old_dentry);
3871 int they_are_dirs = S_ISDIR(inode->i_mode);
3872 int error;
3873
3874 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3875 return -EINVAL;
3876
3877 if (flags & RENAME_EXCHANGE)
3878 return simple_offset_rename_exchange(old_dir, old_dentry,
3879 new_dir, new_dentry);
3880
3881 if (!simple_offset_empty(new_dentry))
3882 return -ENOTEMPTY;
3883
3884 if (flags & RENAME_WHITEOUT) {
3885 error = shmem_whiteout(idmap, old_dir, old_dentry);
3886 if (error)
3887 return error;
3888 }
3889
3890 error = simple_offset_rename(old_dir, old_dentry, new_dir, new_dentry);
3891 if (error)
3892 return error;
3893
3894 if (d_really_is_positive(new_dentry)) {
3895 (void) shmem_unlink(new_dir, new_dentry);
3896 if (they_are_dirs) {
3897 drop_nlink(d_inode(new_dentry));
3898 drop_nlink(old_dir);
3899 }
3900 } else if (they_are_dirs) {
3901 drop_nlink(old_dir);
3902 inc_nlink(new_dir);
3903 }
3904
3905 old_dir->i_size -= BOGO_DIRENT_SIZE;
3906 new_dir->i_size += BOGO_DIRENT_SIZE;
3907 simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
3908 inode_inc_iversion(old_dir);
3909 inode_inc_iversion(new_dir);
3910 return 0;
3911 }
3912
shmem_symlink(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,const char * symname)3913 static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir,
3914 struct dentry *dentry, const char *symname)
3915 {
3916 int error;
3917 int len;
3918 struct inode *inode;
3919 struct folio *folio;
3920
3921 len = strlen(symname) + 1;
3922 if (len > PAGE_SIZE)
3923 return -ENAMETOOLONG;
3924
3925 inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0,
3926 VM_NORESERVE);
3927 if (IS_ERR(inode))
3928 return PTR_ERR(inode);
3929
3930 error = security_inode_init_security(inode, dir, &dentry->d_name,
3931 shmem_initxattrs, NULL);
3932 if (error && error != -EOPNOTSUPP)
3933 goto out_iput;
3934
3935 error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3936 if (error)
3937 goto out_iput;
3938
3939 inode->i_size = len-1;
3940 if (len <= SHORT_SYMLINK_LEN) {
3941 inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3942 if (!inode->i_link) {
3943 error = -ENOMEM;
3944 goto out_remove_offset;
3945 }
3946 inode->i_op = &shmem_short_symlink_operations;
3947 } else {
3948 inode_nohighmem(inode);
3949 inode->i_mapping->a_ops = &shmem_aops;
3950 error = shmem_get_folio(inode, 0, 0, &folio, SGP_WRITE);
3951 if (error)
3952 goto out_remove_offset;
3953 inode->i_op = &shmem_symlink_inode_operations;
3954 memcpy(folio_address(folio), symname, len);
3955 folio_mark_uptodate(folio);
3956 folio_mark_dirty(folio);
3957 folio_unlock(folio);
3958 folio_put(folio);
3959 }
3960 dir->i_size += BOGO_DIRENT_SIZE;
3961 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
3962 inode_inc_iversion(dir);
3963 if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
3964 d_add(dentry, inode);
3965 else
3966 d_instantiate(dentry, inode);
3967 dget(dentry);
3968 return 0;
3969
3970 out_remove_offset:
3971 simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
3972 out_iput:
3973 iput(inode);
3974 return error;
3975 }
3976
shmem_put_link(void * arg)3977 static void shmem_put_link(void *arg)
3978 {
3979 folio_mark_accessed(arg);
3980 folio_put(arg);
3981 }
3982
shmem_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)3983 static const char *shmem_get_link(struct dentry *dentry, struct inode *inode,
3984 struct delayed_call *done)
3985 {
3986 struct folio *folio = NULL;
3987 int error;
3988
3989 if (!dentry) {
3990 folio = filemap_get_folio(inode->i_mapping, 0);
3991 if (IS_ERR(folio))
3992 return ERR_PTR(-ECHILD);
3993 if (PageHWPoison(folio_page(folio, 0)) ||
3994 !folio_test_uptodate(folio)) {
3995 folio_put(folio);
3996 return ERR_PTR(-ECHILD);
3997 }
3998 } else {
3999 error = shmem_get_folio(inode, 0, 0, &folio, SGP_READ);
4000 if (error)
4001 return ERR_PTR(error);
4002 if (!folio)
4003 return ERR_PTR(-ECHILD);
4004 if (PageHWPoison(folio_page(folio, 0))) {
4005 folio_unlock(folio);
4006 folio_put(folio);
4007 return ERR_PTR(-ECHILD);
4008 }
4009 folio_unlock(folio);
4010 }
4011 set_delayed_call(done, shmem_put_link, folio);
4012 return folio_address(folio);
4013 }
4014
4015 #ifdef CONFIG_TMPFS_XATTR
4016
shmem_fileattr_get(struct dentry * dentry,struct fileattr * fa)4017 static int shmem_fileattr_get(struct dentry *dentry, struct fileattr *fa)
4018 {
4019 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
4020
4021 fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE);
4022
4023 return 0;
4024 }
4025
shmem_fileattr_set(struct mnt_idmap * idmap,struct dentry * dentry,struct fileattr * fa)4026 static int shmem_fileattr_set(struct mnt_idmap *idmap,
4027 struct dentry *dentry, struct fileattr *fa)
4028 {
4029 struct inode *inode = d_inode(dentry);
4030 struct shmem_inode_info *info = SHMEM_I(inode);
4031 int ret, flags;
4032
4033 if (fileattr_has_fsx(fa))
4034 return -EOPNOTSUPP;
4035 if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE)
4036 return -EOPNOTSUPP;
4037
4038 flags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) |
4039 (fa->flags & SHMEM_FL_USER_MODIFIABLE);
4040
4041 ret = shmem_set_inode_flags(inode, flags, dentry);
4042
4043 if (ret)
4044 return ret;
4045
4046 info->fsflags = flags;
4047
4048 inode_set_ctime_current(inode);
4049 inode_inc_iversion(inode);
4050 return 0;
4051 }
4052
4053 /*
4054 * Superblocks without xattr inode operations may get some security.* xattr
4055 * support from the LSM "for free". As soon as we have any other xattrs
4056 * like ACLs, we also need to implement the security.* handlers at
4057 * filesystem level, though.
4058 */
4059
4060 /*
4061 * Callback for security_inode_init_security() for acquiring xattrs.
4062 */
shmem_initxattrs(struct inode * inode,const struct xattr * xattr_array,void * fs_info)4063 static int shmem_initxattrs(struct inode *inode,
4064 const struct xattr *xattr_array, void *fs_info)
4065 {
4066 struct shmem_inode_info *info = SHMEM_I(inode);
4067 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
4068 const struct xattr *xattr;
4069 struct simple_xattr *new_xattr;
4070 size_t ispace = 0;
4071 size_t len;
4072
4073 if (sbinfo->max_inodes) {
4074 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
4075 ispace += simple_xattr_space(xattr->name,
4076 xattr->value_len + XATTR_SECURITY_PREFIX_LEN);
4077 }
4078 if (ispace) {
4079 raw_spin_lock(&sbinfo->stat_lock);
4080 if (sbinfo->free_ispace < ispace)
4081 ispace = 0;
4082 else
4083 sbinfo->free_ispace -= ispace;
4084 raw_spin_unlock(&sbinfo->stat_lock);
4085 if (!ispace)
4086 return -ENOSPC;
4087 }
4088 }
4089
4090 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
4091 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
4092 if (!new_xattr)
4093 break;
4094
4095 len = strlen(xattr->name) + 1;
4096 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
4097 GFP_KERNEL_ACCOUNT);
4098 if (!new_xattr->name) {
4099 kvfree(new_xattr);
4100 break;
4101 }
4102
4103 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
4104 XATTR_SECURITY_PREFIX_LEN);
4105 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
4106 xattr->name, len);
4107
4108 simple_xattr_add(&info->xattrs, new_xattr);
4109 }
4110
4111 if (xattr->name != NULL) {
4112 if (ispace) {
4113 raw_spin_lock(&sbinfo->stat_lock);
4114 sbinfo->free_ispace += ispace;
4115 raw_spin_unlock(&sbinfo->stat_lock);
4116 }
4117 simple_xattrs_free(&info->xattrs, NULL);
4118 return -ENOMEM;
4119 }
4120
4121 return 0;
4122 }
4123
shmem_xattr_handler_get(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * name,void * buffer,size_t size)4124 static int shmem_xattr_handler_get(const struct xattr_handler *handler,
4125 struct dentry *unused, struct inode *inode,
4126 const char *name, void *buffer, size_t size)
4127 {
4128 struct shmem_inode_info *info = SHMEM_I(inode);
4129
4130 name = xattr_full_name(handler, name);
4131 return simple_xattr_get(&info->xattrs, name, buffer, size);
4132 }
4133
shmem_xattr_handler_set(const struct xattr_handler * handler,struct mnt_idmap * idmap,struct dentry * unused,struct inode * inode,const char * name,const void * value,size_t size,int flags)4134 static int shmem_xattr_handler_set(const struct xattr_handler *handler,
4135 struct mnt_idmap *idmap,
4136 struct dentry *unused, struct inode *inode,
4137 const char *name, const void *value,
4138 size_t size, int flags)
4139 {
4140 struct shmem_inode_info *info = SHMEM_I(inode);
4141 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
4142 struct simple_xattr *old_xattr;
4143 size_t ispace = 0;
4144
4145 name = xattr_full_name(handler, name);
4146 if (value && sbinfo->max_inodes) {
4147 ispace = simple_xattr_space(name, size);
4148 raw_spin_lock(&sbinfo->stat_lock);
4149 if (sbinfo->free_ispace < ispace)
4150 ispace = 0;
4151 else
4152 sbinfo->free_ispace -= ispace;
4153 raw_spin_unlock(&sbinfo->stat_lock);
4154 if (!ispace)
4155 return -ENOSPC;
4156 }
4157
4158 old_xattr = simple_xattr_set(&info->xattrs, name, value, size, flags);
4159 if (!IS_ERR(old_xattr)) {
4160 ispace = 0;
4161 if (old_xattr && sbinfo->max_inodes)
4162 ispace = simple_xattr_space(old_xattr->name,
4163 old_xattr->size);
4164 simple_xattr_free(old_xattr);
4165 old_xattr = NULL;
4166 inode_set_ctime_current(inode);
4167 inode_inc_iversion(inode);
4168 }
4169 if (ispace) {
4170 raw_spin_lock(&sbinfo->stat_lock);
4171 sbinfo->free_ispace += ispace;
4172 raw_spin_unlock(&sbinfo->stat_lock);
4173 }
4174 return PTR_ERR(old_xattr);
4175 }
4176
4177 static const struct xattr_handler shmem_security_xattr_handler = {
4178 .prefix = XATTR_SECURITY_PREFIX,
4179 .get = shmem_xattr_handler_get,
4180 .set = shmem_xattr_handler_set,
4181 };
4182
4183 static const struct xattr_handler shmem_trusted_xattr_handler = {
4184 .prefix = XATTR_TRUSTED_PREFIX,
4185 .get = shmem_xattr_handler_get,
4186 .set = shmem_xattr_handler_set,
4187 };
4188
4189 static const struct xattr_handler shmem_user_xattr_handler = {
4190 .prefix = XATTR_USER_PREFIX,
4191 .get = shmem_xattr_handler_get,
4192 .set = shmem_xattr_handler_set,
4193 };
4194
4195 static const struct xattr_handler * const shmem_xattr_handlers[] = {
4196 &shmem_security_xattr_handler,
4197 &shmem_trusted_xattr_handler,
4198 &shmem_user_xattr_handler,
4199 NULL
4200 };
4201
shmem_listxattr(struct dentry * dentry,char * buffer,size_t size)4202 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
4203 {
4204 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
4205 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
4206 }
4207 #endif /* CONFIG_TMPFS_XATTR */
4208
4209 static const struct inode_operations shmem_short_symlink_operations = {
4210 .getattr = shmem_getattr,
4211 .setattr = shmem_setattr,
4212 .get_link = simple_get_link,
4213 #ifdef CONFIG_TMPFS_XATTR
4214 .listxattr = shmem_listxattr,
4215 #endif
4216 };
4217
4218 static const struct inode_operations shmem_symlink_inode_operations = {
4219 .getattr = shmem_getattr,
4220 .setattr = shmem_setattr,
4221 .get_link = shmem_get_link,
4222 #ifdef CONFIG_TMPFS_XATTR
4223 .listxattr = shmem_listxattr,
4224 #endif
4225 };
4226
shmem_get_parent(struct dentry * child)4227 static struct dentry *shmem_get_parent(struct dentry *child)
4228 {
4229 return ERR_PTR(-ESTALE);
4230 }
4231
shmem_match(struct inode * ino,void * vfh)4232 static int shmem_match(struct inode *ino, void *vfh)
4233 {
4234 __u32 *fh = vfh;
4235 __u64 inum = fh[2];
4236 inum = (inum << 32) | fh[1];
4237 return ino->i_ino == inum && fh[0] == ino->i_generation;
4238 }
4239
4240 /* Find any alias of inode, but prefer a hashed alias */
shmem_find_alias(struct inode * inode)4241 static struct dentry *shmem_find_alias(struct inode *inode)
4242 {
4243 struct dentry *alias = d_find_alias(inode);
4244
4245 return alias ?: d_find_any_alias(inode);
4246 }
4247
shmem_fh_to_dentry(struct super_block * sb,struct fid * fid,int fh_len,int fh_type)4248 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
4249 struct fid *fid, int fh_len, int fh_type)
4250 {
4251 struct inode *inode;
4252 struct dentry *dentry = NULL;
4253 u64 inum;
4254
4255 if (fh_len < 3)
4256 return NULL;
4257
4258 inum = fid->raw[2];
4259 inum = (inum << 32) | fid->raw[1];
4260
4261 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
4262 shmem_match, fid->raw);
4263 if (inode) {
4264 dentry = shmem_find_alias(inode);
4265 iput(inode);
4266 }
4267
4268 return dentry;
4269 }
4270
shmem_encode_fh(struct inode * inode,__u32 * fh,int * len,struct inode * parent)4271 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
4272 struct inode *parent)
4273 {
4274 if (*len < 3) {
4275 *len = 3;
4276 return FILEID_INVALID;
4277 }
4278
4279 if (inode_unhashed(inode)) {
4280 /* Unfortunately insert_inode_hash is not idempotent,
4281 * so as we hash inodes here rather than at creation
4282 * time, we need a lock to ensure we only try
4283 * to do it once
4284 */
4285 static DEFINE_SPINLOCK(lock);
4286 spin_lock(&lock);
4287 if (inode_unhashed(inode))
4288 __insert_inode_hash(inode,
4289 inode->i_ino + inode->i_generation);
4290 spin_unlock(&lock);
4291 }
4292
4293 fh[0] = inode->i_generation;
4294 fh[1] = inode->i_ino;
4295 fh[2] = ((__u64)inode->i_ino) >> 32;
4296
4297 *len = 3;
4298 return 1;
4299 }
4300
4301 static const struct export_operations shmem_export_ops = {
4302 .get_parent = shmem_get_parent,
4303 .encode_fh = shmem_encode_fh,
4304 .fh_to_dentry = shmem_fh_to_dentry,
4305 };
4306
4307 enum shmem_param {
4308 Opt_gid,
4309 Opt_huge,
4310 Opt_mode,
4311 Opt_mpol,
4312 Opt_nr_blocks,
4313 Opt_nr_inodes,
4314 Opt_size,
4315 Opt_uid,
4316 Opt_inode32,
4317 Opt_inode64,
4318 Opt_noswap,
4319 Opt_quota,
4320 Opt_usrquota,
4321 Opt_grpquota,
4322 Opt_usrquota_block_hardlimit,
4323 Opt_usrquota_inode_hardlimit,
4324 Opt_grpquota_block_hardlimit,
4325 Opt_grpquota_inode_hardlimit,
4326 Opt_casefold_version,
4327 Opt_casefold,
4328 Opt_strict_encoding,
4329 };
4330
4331 static const struct constant_table shmem_param_enums_huge[] = {
4332 {"never", SHMEM_HUGE_NEVER },
4333 {"always", SHMEM_HUGE_ALWAYS },
4334 {"within_size", SHMEM_HUGE_WITHIN_SIZE },
4335 {"advise", SHMEM_HUGE_ADVISE },
4336 {}
4337 };
4338
4339 const struct fs_parameter_spec shmem_fs_parameters[] = {
4340 fsparam_gid ("gid", Opt_gid),
4341 fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge),
4342 fsparam_u32oct("mode", Opt_mode),
4343 fsparam_string("mpol", Opt_mpol),
4344 fsparam_string("nr_blocks", Opt_nr_blocks),
4345 fsparam_string("nr_inodes", Opt_nr_inodes),
4346 fsparam_string("size", Opt_size),
4347 fsparam_uid ("uid", Opt_uid),
4348 fsparam_flag ("inode32", Opt_inode32),
4349 fsparam_flag ("inode64", Opt_inode64),
4350 fsparam_flag ("noswap", Opt_noswap),
4351 #ifdef CONFIG_TMPFS_QUOTA
4352 fsparam_flag ("quota", Opt_quota),
4353 fsparam_flag ("usrquota", Opt_usrquota),
4354 fsparam_flag ("grpquota", Opt_grpquota),
4355 fsparam_string("usrquota_block_hardlimit", Opt_usrquota_block_hardlimit),
4356 fsparam_string("usrquota_inode_hardlimit", Opt_usrquota_inode_hardlimit),
4357 fsparam_string("grpquota_block_hardlimit", Opt_grpquota_block_hardlimit),
4358 fsparam_string("grpquota_inode_hardlimit", Opt_grpquota_inode_hardlimit),
4359 #endif
4360 fsparam_string("casefold", Opt_casefold_version),
4361 fsparam_flag ("casefold", Opt_casefold),
4362 fsparam_flag ("strict_encoding", Opt_strict_encoding),
4363 {}
4364 };
4365
4366 #if IS_ENABLED(CONFIG_UNICODE)
shmem_parse_opt_casefold(struct fs_context * fc,struct fs_parameter * param,bool latest_version)4367 static int shmem_parse_opt_casefold(struct fs_context *fc, struct fs_parameter *param,
4368 bool latest_version)
4369 {
4370 struct shmem_options *ctx = fc->fs_private;
4371 unsigned int version = UTF8_LATEST;
4372 struct unicode_map *encoding;
4373 char *version_str = param->string + 5;
4374
4375 if (!latest_version) {
4376 if (strncmp(param->string, "utf8-", 5))
4377 return invalfc(fc, "Only UTF-8 encodings are supported "
4378 "in the format: utf8-<version number>");
4379
4380 version = utf8_parse_version(version_str);
4381 if (version < 0)
4382 return invalfc(fc, "Invalid UTF-8 version: %s", version_str);
4383 }
4384
4385 encoding = utf8_load(version);
4386
4387 if (IS_ERR(encoding)) {
4388 return invalfc(fc, "Failed loading UTF-8 version: utf8-%u.%u.%u\n",
4389 unicode_major(version), unicode_minor(version),
4390 unicode_rev(version));
4391 }
4392
4393 pr_info("tmpfs: Using encoding : utf8-%u.%u.%u\n",
4394 unicode_major(version), unicode_minor(version), unicode_rev(version));
4395
4396 ctx->encoding = encoding;
4397
4398 return 0;
4399 }
4400 #else
shmem_parse_opt_casefold(struct fs_context * fc,struct fs_parameter * param,bool latest_version)4401 static int shmem_parse_opt_casefold(struct fs_context *fc, struct fs_parameter *param,
4402 bool latest_version)
4403 {
4404 return invalfc(fc, "tmpfs: Kernel not built with CONFIG_UNICODE\n");
4405 }
4406 #endif
4407
shmem_parse_one(struct fs_context * fc,struct fs_parameter * param)4408 static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
4409 {
4410 struct shmem_options *ctx = fc->fs_private;
4411 struct fs_parse_result result;
4412 unsigned long long size;
4413 char *rest;
4414 int opt;
4415 kuid_t kuid;
4416 kgid_t kgid;
4417
4418 opt = fs_parse(fc, shmem_fs_parameters, param, &result);
4419 if (opt < 0)
4420 return opt;
4421
4422 switch (opt) {
4423 case Opt_size:
4424 size = memparse(param->string, &rest);
4425 if (*rest == '%') {
4426 size <<= PAGE_SHIFT;
4427 size *= totalram_pages();
4428 do_div(size, 100);
4429 rest++;
4430 }
4431 if (*rest)
4432 goto bad_value;
4433 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
4434 ctx->seen |= SHMEM_SEEN_BLOCKS;
4435 break;
4436 case Opt_nr_blocks:
4437 ctx->blocks = memparse(param->string, &rest);
4438 if (*rest || ctx->blocks > LONG_MAX)
4439 goto bad_value;
4440 ctx->seen |= SHMEM_SEEN_BLOCKS;
4441 break;
4442 case Opt_nr_inodes:
4443 ctx->inodes = memparse(param->string, &rest);
4444 if (*rest || ctx->inodes > ULONG_MAX / BOGO_INODE_SIZE)
4445 goto bad_value;
4446 ctx->seen |= SHMEM_SEEN_INODES;
4447 break;
4448 case Opt_mode:
4449 ctx->mode = result.uint_32 & 07777;
4450 break;
4451 case Opt_uid:
4452 kuid = result.uid;
4453
4454 /*
4455 * The requested uid must be representable in the
4456 * filesystem's idmapping.
4457 */
4458 if (!kuid_has_mapping(fc->user_ns, kuid))
4459 goto bad_value;
4460
4461 ctx->uid = kuid;
4462 break;
4463 case Opt_gid:
4464 kgid = result.gid;
4465
4466 /*
4467 * The requested gid must be representable in the
4468 * filesystem's idmapping.
4469 */
4470 if (!kgid_has_mapping(fc->user_ns, kgid))
4471 goto bad_value;
4472
4473 ctx->gid = kgid;
4474 break;
4475 case Opt_huge:
4476 ctx->huge = result.uint_32;
4477 if (ctx->huge != SHMEM_HUGE_NEVER &&
4478 !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
4479 has_transparent_hugepage()))
4480 goto unsupported_parameter;
4481 ctx->seen |= SHMEM_SEEN_HUGE;
4482 break;
4483 case Opt_mpol:
4484 if (IS_ENABLED(CONFIG_NUMA)) {
4485 mpol_put(ctx->mpol);
4486 ctx->mpol = NULL;
4487 if (mpol_parse_str(param->string, &ctx->mpol))
4488 goto bad_value;
4489 break;
4490 }
4491 goto unsupported_parameter;
4492 case Opt_inode32:
4493 ctx->full_inums = false;
4494 ctx->seen |= SHMEM_SEEN_INUMS;
4495 break;
4496 case Opt_inode64:
4497 if (sizeof(ino_t) < 8) {
4498 return invalfc(fc,
4499 "Cannot use inode64 with <64bit inums in kernel\n");
4500 }
4501 ctx->full_inums = true;
4502 ctx->seen |= SHMEM_SEEN_INUMS;
4503 break;
4504 case Opt_noswap:
4505 if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) {
4506 return invalfc(fc,
4507 "Turning off swap in unprivileged tmpfs mounts unsupported");
4508 }
4509 ctx->noswap = true;
4510 ctx->seen |= SHMEM_SEEN_NOSWAP;
4511 break;
4512 case Opt_quota:
4513 if (fc->user_ns != &init_user_ns)
4514 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4515 ctx->seen |= SHMEM_SEEN_QUOTA;
4516 ctx->quota_types |= (QTYPE_MASK_USR | QTYPE_MASK_GRP);
4517 break;
4518 case Opt_usrquota:
4519 if (fc->user_ns != &init_user_ns)
4520 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4521 ctx->seen |= SHMEM_SEEN_QUOTA;
4522 ctx->quota_types |= QTYPE_MASK_USR;
4523 break;
4524 case Opt_grpquota:
4525 if (fc->user_ns != &init_user_ns)
4526 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4527 ctx->seen |= SHMEM_SEEN_QUOTA;
4528 ctx->quota_types |= QTYPE_MASK_GRP;
4529 break;
4530 case Opt_usrquota_block_hardlimit:
4531 size = memparse(param->string, &rest);
4532 if (*rest || !size)
4533 goto bad_value;
4534 if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
4535 return invalfc(fc,
4536 "User quota block hardlimit too large.");
4537 ctx->qlimits.usrquota_bhardlimit = size;
4538 break;
4539 case Opt_grpquota_block_hardlimit:
4540 size = memparse(param->string, &rest);
4541 if (*rest || !size)
4542 goto bad_value;
4543 if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
4544 return invalfc(fc,
4545 "Group quota block hardlimit too large.");
4546 ctx->qlimits.grpquota_bhardlimit = size;
4547 break;
4548 case Opt_usrquota_inode_hardlimit:
4549 size = memparse(param->string, &rest);
4550 if (*rest || !size)
4551 goto bad_value;
4552 if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
4553 return invalfc(fc,
4554 "User quota inode hardlimit too large.");
4555 ctx->qlimits.usrquota_ihardlimit = size;
4556 break;
4557 case Opt_grpquota_inode_hardlimit:
4558 size = memparse(param->string, &rest);
4559 if (*rest || !size)
4560 goto bad_value;
4561 if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
4562 return invalfc(fc,
4563 "Group quota inode hardlimit too large.");
4564 ctx->qlimits.grpquota_ihardlimit = size;
4565 break;
4566 case Opt_casefold_version:
4567 return shmem_parse_opt_casefold(fc, param, false);
4568 case Opt_casefold:
4569 return shmem_parse_opt_casefold(fc, param, true);
4570 case Opt_strict_encoding:
4571 #if IS_ENABLED(CONFIG_UNICODE)
4572 ctx->strict_encoding = true;
4573 break;
4574 #else
4575 return invalfc(fc, "tmpfs: Kernel not built with CONFIG_UNICODE\n");
4576 #endif
4577 }
4578 return 0;
4579
4580 unsupported_parameter:
4581 return invalfc(fc, "Unsupported parameter '%s'", param->key);
4582 bad_value:
4583 return invalfc(fc, "Bad value for '%s'", param->key);
4584 }
4585
shmem_parse_options(struct fs_context * fc,void * data)4586 static int shmem_parse_options(struct fs_context *fc, void *data)
4587 {
4588 char *options = data;
4589
4590 if (options) {
4591 int err = security_sb_eat_lsm_opts(options, &fc->security);
4592 if (err)
4593 return err;
4594 }
4595
4596 while (options != NULL) {
4597 char *this_char = options;
4598 for (;;) {
4599 /*
4600 * NUL-terminate this option: unfortunately,
4601 * mount options form a comma-separated list,
4602 * but mpol's nodelist may also contain commas.
4603 */
4604 options = strchr(options, ',');
4605 if (options == NULL)
4606 break;
4607 options++;
4608 if (!isdigit(*options)) {
4609 options[-1] = '\0';
4610 break;
4611 }
4612 }
4613 if (*this_char) {
4614 char *value = strchr(this_char, '=');
4615 size_t len = 0;
4616 int err;
4617
4618 if (value) {
4619 *value++ = '\0';
4620 len = strlen(value);
4621 }
4622 err = vfs_parse_fs_string(fc, this_char, value, len);
4623 if (err < 0)
4624 return err;
4625 }
4626 }
4627 return 0;
4628 }
4629
4630 /*
4631 * Reconfigure a shmem filesystem.
4632 */
shmem_reconfigure(struct fs_context * fc)4633 static int shmem_reconfigure(struct fs_context *fc)
4634 {
4635 struct shmem_options *ctx = fc->fs_private;
4636 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
4637 unsigned long used_isp;
4638 struct mempolicy *mpol = NULL;
4639 const char *err;
4640
4641 raw_spin_lock(&sbinfo->stat_lock);
4642 used_isp = sbinfo->max_inodes * BOGO_INODE_SIZE - sbinfo->free_ispace;
4643
4644 if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
4645 if (!sbinfo->max_blocks) {
4646 err = "Cannot retroactively limit size";
4647 goto out;
4648 }
4649 if (percpu_counter_compare(&sbinfo->used_blocks,
4650 ctx->blocks) > 0) {
4651 err = "Too small a size for current use";
4652 goto out;
4653 }
4654 }
4655 if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
4656 if (!sbinfo->max_inodes) {
4657 err = "Cannot retroactively limit inodes";
4658 goto out;
4659 }
4660 if (ctx->inodes * BOGO_INODE_SIZE < used_isp) {
4661 err = "Too few inodes for current use";
4662 goto out;
4663 }
4664 }
4665
4666 if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
4667 sbinfo->next_ino > UINT_MAX) {
4668 err = "Current inum too high to switch to 32-bit inums";
4669 goto out;
4670 }
4671 if ((ctx->seen & SHMEM_SEEN_NOSWAP) && ctx->noswap && !sbinfo->noswap) {
4672 err = "Cannot disable swap on remount";
4673 goto out;
4674 }
4675 if (!(ctx->seen & SHMEM_SEEN_NOSWAP) && !ctx->noswap && sbinfo->noswap) {
4676 err = "Cannot enable swap on remount if it was disabled on first mount";
4677 goto out;
4678 }
4679
4680 if (ctx->seen & SHMEM_SEEN_QUOTA &&
4681 !sb_any_quota_loaded(fc->root->d_sb)) {
4682 err = "Cannot enable quota on remount";
4683 goto out;
4684 }
4685
4686 #ifdef CONFIG_TMPFS_QUOTA
4687 #define CHANGED_LIMIT(name) \
4688 (ctx->qlimits.name## hardlimit && \
4689 (ctx->qlimits.name## hardlimit != sbinfo->qlimits.name## hardlimit))
4690
4691 if (CHANGED_LIMIT(usrquota_b) || CHANGED_LIMIT(usrquota_i) ||
4692 CHANGED_LIMIT(grpquota_b) || CHANGED_LIMIT(grpquota_i)) {
4693 err = "Cannot change global quota limit on remount";
4694 goto out;
4695 }
4696 #endif /* CONFIG_TMPFS_QUOTA */
4697
4698 if (ctx->seen & SHMEM_SEEN_HUGE)
4699 sbinfo->huge = ctx->huge;
4700 if (ctx->seen & SHMEM_SEEN_INUMS)
4701 sbinfo->full_inums = ctx->full_inums;
4702 if (ctx->seen & SHMEM_SEEN_BLOCKS)
4703 sbinfo->max_blocks = ctx->blocks;
4704 if (ctx->seen & SHMEM_SEEN_INODES) {
4705 sbinfo->max_inodes = ctx->inodes;
4706 sbinfo->free_ispace = ctx->inodes * BOGO_INODE_SIZE - used_isp;
4707 }
4708
4709 /*
4710 * Preserve previous mempolicy unless mpol remount option was specified.
4711 */
4712 if (ctx->mpol) {
4713 mpol = sbinfo->mpol;
4714 sbinfo->mpol = ctx->mpol; /* transfers initial ref */
4715 ctx->mpol = NULL;
4716 }
4717
4718 if (ctx->noswap)
4719 sbinfo->noswap = true;
4720
4721 raw_spin_unlock(&sbinfo->stat_lock);
4722 mpol_put(mpol);
4723 return 0;
4724 out:
4725 raw_spin_unlock(&sbinfo->stat_lock);
4726 return invalfc(fc, "%s", err);
4727 }
4728
shmem_show_options(struct seq_file * seq,struct dentry * root)4729 static int shmem_show_options(struct seq_file *seq, struct dentry *root)
4730 {
4731 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
4732 struct mempolicy *mpol;
4733
4734 if (sbinfo->max_blocks != shmem_default_max_blocks())
4735 seq_printf(seq, ",size=%luk", K(sbinfo->max_blocks));
4736 if (sbinfo->max_inodes != shmem_default_max_inodes())
4737 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
4738 if (sbinfo->mode != (0777 | S_ISVTX))
4739 seq_printf(seq, ",mode=%03ho", sbinfo->mode);
4740 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
4741 seq_printf(seq, ",uid=%u",
4742 from_kuid_munged(&init_user_ns, sbinfo->uid));
4743 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
4744 seq_printf(seq, ",gid=%u",
4745 from_kgid_munged(&init_user_ns, sbinfo->gid));
4746
4747 /*
4748 * Showing inode{64,32} might be useful even if it's the system default,
4749 * since then people don't have to resort to checking both here and
4750 * /proc/config.gz to confirm 64-bit inums were successfully applied
4751 * (which may not even exist if IKCONFIG_PROC isn't enabled).
4752 *
4753 * We hide it when inode64 isn't the default and we are using 32-bit
4754 * inodes, since that probably just means the feature isn't even under
4755 * consideration.
4756 *
4757 * As such:
4758 *
4759 * +-----------------+-----------------+
4760 * | TMPFS_INODE64=y | TMPFS_INODE64=n |
4761 * +------------------+-----------------+-----------------+
4762 * | full_inums=true | show | show |
4763 * | full_inums=false | show | hide |
4764 * +------------------+-----------------+-----------------+
4765 *
4766 */
4767 if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
4768 seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
4769 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4770 /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
4771 if (sbinfo->huge)
4772 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
4773 #endif
4774 mpol = shmem_get_sbmpol(sbinfo);
4775 shmem_show_mpol(seq, mpol);
4776 mpol_put(mpol);
4777 if (sbinfo->noswap)
4778 seq_printf(seq, ",noswap");
4779 #ifdef CONFIG_TMPFS_QUOTA
4780 if (sb_has_quota_active(root->d_sb, USRQUOTA))
4781 seq_printf(seq, ",usrquota");
4782 if (sb_has_quota_active(root->d_sb, GRPQUOTA))
4783 seq_printf(seq, ",grpquota");
4784 if (sbinfo->qlimits.usrquota_bhardlimit)
4785 seq_printf(seq, ",usrquota_block_hardlimit=%lld",
4786 sbinfo->qlimits.usrquota_bhardlimit);
4787 if (sbinfo->qlimits.grpquota_bhardlimit)
4788 seq_printf(seq, ",grpquota_block_hardlimit=%lld",
4789 sbinfo->qlimits.grpquota_bhardlimit);
4790 if (sbinfo->qlimits.usrquota_ihardlimit)
4791 seq_printf(seq, ",usrquota_inode_hardlimit=%lld",
4792 sbinfo->qlimits.usrquota_ihardlimit);
4793 if (sbinfo->qlimits.grpquota_ihardlimit)
4794 seq_printf(seq, ",grpquota_inode_hardlimit=%lld",
4795 sbinfo->qlimits.grpquota_ihardlimit);
4796 #endif
4797 return 0;
4798 }
4799
4800 #endif /* CONFIG_TMPFS */
4801
shmem_put_super(struct super_block * sb)4802 static void shmem_put_super(struct super_block *sb)
4803 {
4804 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
4805
4806 #if IS_ENABLED(CONFIG_UNICODE)
4807 if (sb->s_encoding)
4808 utf8_unload(sb->s_encoding);
4809 #endif
4810
4811 #ifdef CONFIG_TMPFS_QUOTA
4812 shmem_disable_quotas(sb);
4813 #endif
4814 free_percpu(sbinfo->ino_batch);
4815 percpu_counter_destroy(&sbinfo->used_blocks);
4816 mpol_put(sbinfo->mpol);
4817 kfree(sbinfo);
4818 sb->s_fs_info = NULL;
4819 }
4820
4821 #if IS_ENABLED(CONFIG_UNICODE) && defined(CONFIG_TMPFS)
4822 static const struct dentry_operations shmem_ci_dentry_ops = {
4823 .d_hash = generic_ci_d_hash,
4824 .d_compare = generic_ci_d_compare,
4825 .d_delete = always_delete_dentry,
4826 };
4827 #endif
4828
shmem_fill_super(struct super_block * sb,struct fs_context * fc)4829 static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
4830 {
4831 struct shmem_options *ctx = fc->fs_private;
4832 struct inode *inode;
4833 struct shmem_sb_info *sbinfo;
4834 int error = -ENOMEM;
4835
4836 /* Round up to L1_CACHE_BYTES to resist false sharing */
4837 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
4838 L1_CACHE_BYTES), GFP_KERNEL);
4839 if (!sbinfo)
4840 return error;
4841
4842 sb->s_fs_info = sbinfo;
4843
4844 #ifdef CONFIG_TMPFS
4845 /*
4846 * Per default we only allow half of the physical ram per
4847 * tmpfs instance, limiting inodes to one per page of lowmem;
4848 * but the internal instance is left unlimited.
4849 */
4850 if (!(sb->s_flags & SB_KERNMOUNT)) {
4851 if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
4852 ctx->blocks = shmem_default_max_blocks();
4853 if (!(ctx->seen & SHMEM_SEEN_INODES))
4854 ctx->inodes = shmem_default_max_inodes();
4855 if (!(ctx->seen & SHMEM_SEEN_INUMS))
4856 ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
4857 sbinfo->noswap = ctx->noswap;
4858 } else {
4859 sb->s_flags |= SB_NOUSER;
4860 }
4861 sb->s_export_op = &shmem_export_ops;
4862 sb->s_flags |= SB_NOSEC | SB_I_VERSION;
4863
4864 #if IS_ENABLED(CONFIG_UNICODE)
4865 if (!ctx->encoding && ctx->strict_encoding) {
4866 pr_err("tmpfs: strict_encoding option without encoding is forbidden\n");
4867 error = -EINVAL;
4868 goto failed;
4869 }
4870
4871 if (ctx->encoding) {
4872 sb->s_encoding = ctx->encoding;
4873 sb->s_d_op = &shmem_ci_dentry_ops;
4874 if (ctx->strict_encoding)
4875 sb->s_encoding_flags = SB_ENC_STRICT_MODE_FL;
4876 }
4877 #endif
4878
4879 #else
4880 sb->s_flags |= SB_NOUSER;
4881 #endif /* CONFIG_TMPFS */
4882 sbinfo->max_blocks = ctx->blocks;
4883 sbinfo->max_inodes = ctx->inodes;
4884 sbinfo->free_ispace = sbinfo->max_inodes * BOGO_INODE_SIZE;
4885 if (sb->s_flags & SB_KERNMOUNT) {
4886 sbinfo->ino_batch = alloc_percpu(ino_t);
4887 if (!sbinfo->ino_batch)
4888 goto failed;
4889 }
4890 sbinfo->uid = ctx->uid;
4891 sbinfo->gid = ctx->gid;
4892 sbinfo->full_inums = ctx->full_inums;
4893 sbinfo->mode = ctx->mode;
4894 sbinfo->huge = ctx->huge;
4895 sbinfo->mpol = ctx->mpol;
4896 ctx->mpol = NULL;
4897
4898 raw_spin_lock_init(&sbinfo->stat_lock);
4899 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
4900 goto failed;
4901 spin_lock_init(&sbinfo->shrinklist_lock);
4902 INIT_LIST_HEAD(&sbinfo->shrinklist);
4903
4904 sb->s_maxbytes = MAX_LFS_FILESIZE;
4905 sb->s_blocksize = PAGE_SIZE;
4906 sb->s_blocksize_bits = PAGE_SHIFT;
4907 sb->s_magic = TMPFS_MAGIC;
4908 sb->s_op = &shmem_ops;
4909 sb->s_time_gran = 1;
4910 #ifdef CONFIG_TMPFS_XATTR
4911 sb->s_xattr = shmem_xattr_handlers;
4912 #endif
4913 #ifdef CONFIG_TMPFS_POSIX_ACL
4914 sb->s_flags |= SB_POSIXACL;
4915 #endif
4916 uuid_t uuid;
4917 uuid_gen(&uuid);
4918 super_set_uuid(sb, uuid.b, sizeof(uuid));
4919
4920 #ifdef CONFIG_TMPFS_QUOTA
4921 if (ctx->seen & SHMEM_SEEN_QUOTA) {
4922 sb->dq_op = &shmem_quota_operations;
4923 sb->s_qcop = &dquot_quotactl_sysfile_ops;
4924 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
4925
4926 /* Copy the default limits from ctx into sbinfo */
4927 memcpy(&sbinfo->qlimits, &ctx->qlimits,
4928 sizeof(struct shmem_quota_limits));
4929
4930 if (shmem_enable_quotas(sb, ctx->quota_types))
4931 goto failed;
4932 }
4933 #endif /* CONFIG_TMPFS_QUOTA */
4934
4935 inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL,
4936 S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
4937 if (IS_ERR(inode)) {
4938 error = PTR_ERR(inode);
4939 goto failed;
4940 }
4941 inode->i_uid = sbinfo->uid;
4942 inode->i_gid = sbinfo->gid;
4943 sb->s_root = d_make_root(inode);
4944 if (!sb->s_root)
4945 goto failed;
4946 return 0;
4947
4948 failed:
4949 shmem_put_super(sb);
4950 return error;
4951 }
4952
shmem_get_tree(struct fs_context * fc)4953 static int shmem_get_tree(struct fs_context *fc)
4954 {
4955 return get_tree_nodev(fc, shmem_fill_super);
4956 }
4957
shmem_free_fc(struct fs_context * fc)4958 static void shmem_free_fc(struct fs_context *fc)
4959 {
4960 struct shmem_options *ctx = fc->fs_private;
4961
4962 if (ctx) {
4963 mpol_put(ctx->mpol);
4964 kfree(ctx);
4965 }
4966 }
4967
4968 static const struct fs_context_operations shmem_fs_context_ops = {
4969 .free = shmem_free_fc,
4970 .get_tree = shmem_get_tree,
4971 #ifdef CONFIG_TMPFS
4972 .parse_monolithic = shmem_parse_options,
4973 .parse_param = shmem_parse_one,
4974 .reconfigure = shmem_reconfigure,
4975 #endif
4976 };
4977
4978 static struct kmem_cache *shmem_inode_cachep __ro_after_init;
4979
shmem_alloc_inode(struct super_block * sb)4980 static struct inode *shmem_alloc_inode(struct super_block *sb)
4981 {
4982 struct shmem_inode_info *info;
4983 info = alloc_inode_sb(sb, shmem_inode_cachep, GFP_KERNEL);
4984 if (!info)
4985 return NULL;
4986 return &info->vfs_inode;
4987 }
4988
shmem_free_in_core_inode(struct inode * inode)4989 static void shmem_free_in_core_inode(struct inode *inode)
4990 {
4991 if (S_ISLNK(inode->i_mode))
4992 kfree(inode->i_link);
4993 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
4994 }
4995
shmem_destroy_inode(struct inode * inode)4996 static void shmem_destroy_inode(struct inode *inode)
4997 {
4998 if (S_ISREG(inode->i_mode))
4999 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
5000 if (S_ISDIR(inode->i_mode))
5001 simple_offset_destroy(shmem_get_offset_ctx(inode));
5002 }
5003
shmem_init_inode(void * foo)5004 static void shmem_init_inode(void *foo)
5005 {
5006 struct shmem_inode_info *info = foo;
5007 inode_init_once(&info->vfs_inode);
5008 }
5009
shmem_init_inodecache(void)5010 static void __init shmem_init_inodecache(void)
5011 {
5012 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
5013 sizeof(struct shmem_inode_info),
5014 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
5015 }
5016
shmem_destroy_inodecache(void)5017 static void __init shmem_destroy_inodecache(void)
5018 {
5019 kmem_cache_destroy(shmem_inode_cachep);
5020 }
5021
5022 /* Keep the page in page cache instead of truncating it */
shmem_error_remove_folio(struct address_space * mapping,struct folio * folio)5023 static int shmem_error_remove_folio(struct address_space *mapping,
5024 struct folio *folio)
5025 {
5026 return 0;
5027 }
5028
5029 static const struct address_space_operations shmem_aops = {
5030 .writepage = shmem_writepage,
5031 .dirty_folio = noop_dirty_folio,
5032 #ifdef CONFIG_TMPFS
5033 .write_begin = shmem_write_begin,
5034 .write_end = shmem_write_end,
5035 #endif
5036 #ifdef CONFIG_MIGRATION
5037 .migrate_folio = migrate_folio,
5038 #endif
5039 .error_remove_folio = shmem_error_remove_folio,
5040 };
5041
5042 static const struct file_operations shmem_file_operations = {
5043 .mmap = shmem_mmap,
5044 .open = shmem_file_open,
5045 .get_unmapped_area = shmem_get_unmapped_area,
5046 #ifdef CONFIG_TMPFS
5047 .llseek = shmem_file_llseek,
5048 .read_iter = shmem_file_read_iter,
5049 .write_iter = shmem_file_write_iter,
5050 .fsync = noop_fsync,
5051 .splice_read = shmem_file_splice_read,
5052 .splice_write = iter_file_splice_write,
5053 .fallocate = shmem_fallocate,
5054 #endif
5055 };
5056
5057 static const struct inode_operations shmem_inode_operations = {
5058 .getattr = shmem_getattr,
5059 .setattr = shmem_setattr,
5060 #ifdef CONFIG_TMPFS_XATTR
5061 .listxattr = shmem_listxattr,
5062 .set_acl = simple_set_acl,
5063 .fileattr_get = shmem_fileattr_get,
5064 .fileattr_set = shmem_fileattr_set,
5065 #endif
5066 };
5067
5068 static const struct inode_operations shmem_dir_inode_operations = {
5069 #ifdef CONFIG_TMPFS
5070 .getattr = shmem_getattr,
5071 .create = shmem_create,
5072 .lookup = simple_lookup,
5073 .link = shmem_link,
5074 .unlink = shmem_unlink,
5075 .symlink = shmem_symlink,
5076 .mkdir = shmem_mkdir,
5077 .rmdir = shmem_rmdir,
5078 .mknod = shmem_mknod,
5079 .rename = shmem_rename2,
5080 .tmpfile = shmem_tmpfile,
5081 .get_offset_ctx = shmem_get_offset_ctx,
5082 #endif
5083 #ifdef CONFIG_TMPFS_XATTR
5084 .listxattr = shmem_listxattr,
5085 .fileattr_get = shmem_fileattr_get,
5086 .fileattr_set = shmem_fileattr_set,
5087 #endif
5088 #ifdef CONFIG_TMPFS_POSIX_ACL
5089 .setattr = shmem_setattr,
5090 .set_acl = simple_set_acl,
5091 #endif
5092 };
5093
5094 static const struct inode_operations shmem_special_inode_operations = {
5095 .getattr = shmem_getattr,
5096 #ifdef CONFIG_TMPFS_XATTR
5097 .listxattr = shmem_listxattr,
5098 #endif
5099 #ifdef CONFIG_TMPFS_POSIX_ACL
5100 .setattr = shmem_setattr,
5101 .set_acl = simple_set_acl,
5102 #endif
5103 };
5104
5105 static const struct super_operations shmem_ops = {
5106 .alloc_inode = shmem_alloc_inode,
5107 .free_inode = shmem_free_in_core_inode,
5108 .destroy_inode = shmem_destroy_inode,
5109 #ifdef CONFIG_TMPFS
5110 .statfs = shmem_statfs,
5111 .show_options = shmem_show_options,
5112 #endif
5113 #ifdef CONFIG_TMPFS_QUOTA
5114 .get_dquots = shmem_get_dquots,
5115 #endif
5116 .evict_inode = shmem_evict_inode,
5117 .drop_inode = generic_delete_inode,
5118 .put_super = shmem_put_super,
5119 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5120 .nr_cached_objects = shmem_unused_huge_count,
5121 .free_cached_objects = shmem_unused_huge_scan,
5122 #endif
5123 };
5124
5125 static const struct vm_operations_struct shmem_vm_ops = {
5126 .fault = shmem_fault,
5127 .map_pages = filemap_map_pages,
5128 #ifdef CONFIG_NUMA
5129 .set_policy = shmem_set_policy,
5130 .get_policy = shmem_get_policy,
5131 #endif
5132 };
5133
5134 static const struct vm_operations_struct shmem_anon_vm_ops = {
5135 .fault = shmem_fault,
5136 .map_pages = filemap_map_pages,
5137 #ifdef CONFIG_NUMA
5138 .set_policy = shmem_set_policy,
5139 .get_policy = shmem_get_policy,
5140 #endif
5141 };
5142
shmem_init_fs_context(struct fs_context * fc)5143 int shmem_init_fs_context(struct fs_context *fc)
5144 {
5145 struct shmem_options *ctx;
5146
5147 ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
5148 if (!ctx)
5149 return -ENOMEM;
5150
5151 ctx->mode = 0777 | S_ISVTX;
5152 ctx->uid = current_fsuid();
5153 ctx->gid = current_fsgid();
5154
5155 #if IS_ENABLED(CONFIG_UNICODE)
5156 ctx->encoding = NULL;
5157 #endif
5158
5159 fc->fs_private = ctx;
5160 fc->ops = &shmem_fs_context_ops;
5161 return 0;
5162 }
5163
5164 static struct file_system_type shmem_fs_type = {
5165 .owner = THIS_MODULE,
5166 .name = "tmpfs",
5167 .init_fs_context = shmem_init_fs_context,
5168 #ifdef CONFIG_TMPFS
5169 .parameters = shmem_fs_parameters,
5170 #endif
5171 .kill_sb = kill_litter_super,
5172 .fs_flags = FS_USERNS_MOUNT | FS_ALLOW_IDMAP | FS_MGTIME,
5173 };
5174
5175 #if defined(CONFIG_SYSFS) && defined(CONFIG_TMPFS)
5176
5177 #define __INIT_KOBJ_ATTR(_name, _mode, _show, _store) \
5178 { \
5179 .attr = { .name = __stringify(_name), .mode = _mode }, \
5180 .show = _show, \
5181 .store = _store, \
5182 }
5183
5184 #define TMPFS_ATTR_W(_name, _store) \
5185 static struct kobj_attribute tmpfs_attr_##_name = \
5186 __INIT_KOBJ_ATTR(_name, 0200, NULL, _store)
5187
5188 #define TMPFS_ATTR_RW(_name, _show, _store) \
5189 static struct kobj_attribute tmpfs_attr_##_name = \
5190 __INIT_KOBJ_ATTR(_name, 0644, _show, _store)
5191
5192 #define TMPFS_ATTR_RO(_name, _show) \
5193 static struct kobj_attribute tmpfs_attr_##_name = \
5194 __INIT_KOBJ_ATTR(_name, 0444, _show, NULL)
5195
5196 #if IS_ENABLED(CONFIG_UNICODE)
casefold_show(struct kobject * kobj,struct kobj_attribute * a,char * buf)5197 static ssize_t casefold_show(struct kobject *kobj, struct kobj_attribute *a,
5198 char *buf)
5199 {
5200 return sysfs_emit(buf, "supported\n");
5201 }
5202 TMPFS_ATTR_RO(casefold, casefold_show);
5203 #endif
5204
5205 static struct attribute *tmpfs_attributes[] = {
5206 #if IS_ENABLED(CONFIG_UNICODE)
5207 &tmpfs_attr_casefold.attr,
5208 #endif
5209 NULL
5210 };
5211
5212 static const struct attribute_group tmpfs_attribute_group = {
5213 .attrs = tmpfs_attributes,
5214 .name = "features"
5215 };
5216
5217 static struct kobject *tmpfs_kobj;
5218
tmpfs_sysfs_init(void)5219 static int __init tmpfs_sysfs_init(void)
5220 {
5221 int ret;
5222
5223 tmpfs_kobj = kobject_create_and_add("tmpfs", fs_kobj);
5224 if (!tmpfs_kobj)
5225 return -ENOMEM;
5226
5227 ret = sysfs_create_group(tmpfs_kobj, &tmpfs_attribute_group);
5228 if (ret)
5229 kobject_put(tmpfs_kobj);
5230
5231 return ret;
5232 }
5233 #endif /* CONFIG_SYSFS && CONFIG_TMPFS */
5234
shmem_init(void)5235 void __init shmem_init(void)
5236 {
5237 int error;
5238
5239 shmem_init_inodecache();
5240
5241 #ifdef CONFIG_TMPFS_QUOTA
5242 register_quota_format(&shmem_quota_format);
5243 #endif
5244
5245 error = register_filesystem(&shmem_fs_type);
5246 if (error) {
5247 pr_err("Could not register tmpfs\n");
5248 goto out2;
5249 }
5250
5251 shm_mnt = kern_mount(&shmem_fs_type);
5252 if (IS_ERR(shm_mnt)) {
5253 error = PTR_ERR(shm_mnt);
5254 pr_err("Could not kern_mount tmpfs\n");
5255 goto out1;
5256 }
5257
5258 #if defined(CONFIG_SYSFS) && defined(CONFIG_TMPFS)
5259 error = tmpfs_sysfs_init();
5260 if (error) {
5261 pr_err("Could not init tmpfs sysfs\n");
5262 goto out1;
5263 }
5264 #endif
5265
5266 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5267 if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
5268 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
5269 else
5270 shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
5271
5272 /*
5273 * Default to setting PMD-sized THP to inherit the global setting and
5274 * disable all other multi-size THPs.
5275 */
5276 if (!shmem_orders_configured)
5277 huge_shmem_orders_inherit = BIT(HPAGE_PMD_ORDER);
5278 #endif
5279 return;
5280
5281 out1:
5282 unregister_filesystem(&shmem_fs_type);
5283 out2:
5284 #ifdef CONFIG_TMPFS_QUOTA
5285 unregister_quota_format(&shmem_quota_format);
5286 #endif
5287 shmem_destroy_inodecache();
5288 shm_mnt = ERR_PTR(error);
5289 }
5290
5291 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
shmem_enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)5292 static ssize_t shmem_enabled_show(struct kobject *kobj,
5293 struct kobj_attribute *attr, char *buf)
5294 {
5295 static const int values[] = {
5296 SHMEM_HUGE_ALWAYS,
5297 SHMEM_HUGE_WITHIN_SIZE,
5298 SHMEM_HUGE_ADVISE,
5299 SHMEM_HUGE_NEVER,
5300 SHMEM_HUGE_DENY,
5301 SHMEM_HUGE_FORCE,
5302 };
5303 int len = 0;
5304 int i;
5305
5306 for (i = 0; i < ARRAY_SIZE(values); i++) {
5307 len += sysfs_emit_at(buf, len,
5308 shmem_huge == values[i] ? "%s[%s]" : "%s%s",
5309 i ? " " : "", shmem_format_huge(values[i]));
5310 }
5311 len += sysfs_emit_at(buf, len, "\n");
5312
5313 return len;
5314 }
5315
shmem_enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)5316 static ssize_t shmem_enabled_store(struct kobject *kobj,
5317 struct kobj_attribute *attr, const char *buf, size_t count)
5318 {
5319 char tmp[16];
5320 int huge, err;
5321
5322 if (count + 1 > sizeof(tmp))
5323 return -EINVAL;
5324 memcpy(tmp, buf, count);
5325 tmp[count] = '\0';
5326 if (count && tmp[count - 1] == '\n')
5327 tmp[count - 1] = '\0';
5328
5329 huge = shmem_parse_huge(tmp);
5330 if (huge == -EINVAL)
5331 return huge;
5332
5333 shmem_huge = huge;
5334 if (shmem_huge > SHMEM_HUGE_DENY)
5335 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
5336
5337 err = start_stop_khugepaged();
5338 return err ? err : count;
5339 }
5340
5341 struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled);
5342 static DEFINE_SPINLOCK(huge_shmem_orders_lock);
5343
thpsize_shmem_enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)5344 static ssize_t thpsize_shmem_enabled_show(struct kobject *kobj,
5345 struct kobj_attribute *attr, char *buf)
5346 {
5347 int order = to_thpsize(kobj)->order;
5348 const char *output;
5349
5350 if (test_bit(order, &huge_shmem_orders_always))
5351 output = "[always] inherit within_size advise never";
5352 else if (test_bit(order, &huge_shmem_orders_inherit))
5353 output = "always [inherit] within_size advise never";
5354 else if (test_bit(order, &huge_shmem_orders_within_size))
5355 output = "always inherit [within_size] advise never";
5356 else if (test_bit(order, &huge_shmem_orders_madvise))
5357 output = "always inherit within_size [advise] never";
5358 else
5359 output = "always inherit within_size advise [never]";
5360
5361 return sysfs_emit(buf, "%s\n", output);
5362 }
5363
thpsize_shmem_enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)5364 static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj,
5365 struct kobj_attribute *attr,
5366 const char *buf, size_t count)
5367 {
5368 int order = to_thpsize(kobj)->order;
5369 ssize_t ret = count;
5370
5371 if (sysfs_streq(buf, "always")) {
5372 spin_lock(&huge_shmem_orders_lock);
5373 clear_bit(order, &huge_shmem_orders_inherit);
5374 clear_bit(order, &huge_shmem_orders_madvise);
5375 clear_bit(order, &huge_shmem_orders_within_size);
5376 set_bit(order, &huge_shmem_orders_always);
5377 spin_unlock(&huge_shmem_orders_lock);
5378 } else if (sysfs_streq(buf, "inherit")) {
5379 /* Do not override huge allocation policy with non-PMD sized mTHP */
5380 if (shmem_huge == SHMEM_HUGE_FORCE &&
5381 order != HPAGE_PMD_ORDER)
5382 return -EINVAL;
5383
5384 spin_lock(&huge_shmem_orders_lock);
5385 clear_bit(order, &huge_shmem_orders_always);
5386 clear_bit(order, &huge_shmem_orders_madvise);
5387 clear_bit(order, &huge_shmem_orders_within_size);
5388 set_bit(order, &huge_shmem_orders_inherit);
5389 spin_unlock(&huge_shmem_orders_lock);
5390 } else if (sysfs_streq(buf, "within_size")) {
5391 spin_lock(&huge_shmem_orders_lock);
5392 clear_bit(order, &huge_shmem_orders_always);
5393 clear_bit(order, &huge_shmem_orders_inherit);
5394 clear_bit(order, &huge_shmem_orders_madvise);
5395 set_bit(order, &huge_shmem_orders_within_size);
5396 spin_unlock(&huge_shmem_orders_lock);
5397 } else if (sysfs_streq(buf, "advise")) {
5398 spin_lock(&huge_shmem_orders_lock);
5399 clear_bit(order, &huge_shmem_orders_always);
5400 clear_bit(order, &huge_shmem_orders_inherit);
5401 clear_bit(order, &huge_shmem_orders_within_size);
5402 set_bit(order, &huge_shmem_orders_madvise);
5403 spin_unlock(&huge_shmem_orders_lock);
5404 } else if (sysfs_streq(buf, "never")) {
5405 spin_lock(&huge_shmem_orders_lock);
5406 clear_bit(order, &huge_shmem_orders_always);
5407 clear_bit(order, &huge_shmem_orders_inherit);
5408 clear_bit(order, &huge_shmem_orders_within_size);
5409 clear_bit(order, &huge_shmem_orders_madvise);
5410 spin_unlock(&huge_shmem_orders_lock);
5411 } else {
5412 ret = -EINVAL;
5413 }
5414
5415 if (ret > 0) {
5416 int err = start_stop_khugepaged();
5417
5418 if (err)
5419 ret = err;
5420 }
5421 return ret;
5422 }
5423
5424 struct kobj_attribute thpsize_shmem_enabled_attr =
5425 __ATTR(shmem_enabled, 0644, thpsize_shmem_enabled_show, thpsize_shmem_enabled_store);
5426 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
5427
5428 #if defined(CONFIG_TRANSPARENT_HUGEPAGE)
5429
setup_transparent_hugepage_shmem(char * str)5430 static int __init setup_transparent_hugepage_shmem(char *str)
5431 {
5432 int huge;
5433
5434 huge = shmem_parse_huge(str);
5435 if (huge == -EINVAL) {
5436 pr_warn("transparent_hugepage_shmem= cannot parse, ignored\n");
5437 return huge;
5438 }
5439
5440 shmem_huge = huge;
5441 return 1;
5442 }
5443 __setup("transparent_hugepage_shmem=", setup_transparent_hugepage_shmem);
5444
5445 static char str_dup[PAGE_SIZE] __initdata;
setup_thp_shmem(char * str)5446 static int __init setup_thp_shmem(char *str)
5447 {
5448 char *token, *range, *policy, *subtoken;
5449 unsigned long always, inherit, madvise, within_size;
5450 char *start_size, *end_size;
5451 int start, end, nr;
5452 char *p;
5453
5454 if (!str || strlen(str) + 1 > PAGE_SIZE)
5455 goto err;
5456 strscpy(str_dup, str);
5457
5458 always = huge_shmem_orders_always;
5459 inherit = huge_shmem_orders_inherit;
5460 madvise = huge_shmem_orders_madvise;
5461 within_size = huge_shmem_orders_within_size;
5462 p = str_dup;
5463 while ((token = strsep(&p, ";")) != NULL) {
5464 range = strsep(&token, ":");
5465 policy = token;
5466
5467 if (!policy)
5468 goto err;
5469
5470 while ((subtoken = strsep(&range, ",")) != NULL) {
5471 if (strchr(subtoken, '-')) {
5472 start_size = strsep(&subtoken, "-");
5473 end_size = subtoken;
5474
5475 start = get_order_from_str(start_size,
5476 THP_ORDERS_ALL_FILE_DEFAULT);
5477 end = get_order_from_str(end_size,
5478 THP_ORDERS_ALL_FILE_DEFAULT);
5479 } else {
5480 start_size = end_size = subtoken;
5481 start = end = get_order_from_str(subtoken,
5482 THP_ORDERS_ALL_FILE_DEFAULT);
5483 }
5484
5485 if (start == -EINVAL) {
5486 pr_err("invalid size %s in thp_shmem boot parameter\n",
5487 start_size);
5488 goto err;
5489 }
5490
5491 if (end == -EINVAL) {
5492 pr_err("invalid size %s in thp_shmem boot parameter\n",
5493 end_size);
5494 goto err;
5495 }
5496
5497 if (start < 0 || end < 0 || start > end)
5498 goto err;
5499
5500 nr = end - start + 1;
5501 if (!strcmp(policy, "always")) {
5502 bitmap_set(&always, start, nr);
5503 bitmap_clear(&inherit, start, nr);
5504 bitmap_clear(&madvise, start, nr);
5505 bitmap_clear(&within_size, start, nr);
5506 } else if (!strcmp(policy, "advise")) {
5507 bitmap_set(&madvise, start, nr);
5508 bitmap_clear(&inherit, start, nr);
5509 bitmap_clear(&always, start, nr);
5510 bitmap_clear(&within_size, start, nr);
5511 } else if (!strcmp(policy, "inherit")) {
5512 bitmap_set(&inherit, start, nr);
5513 bitmap_clear(&madvise, start, nr);
5514 bitmap_clear(&always, start, nr);
5515 bitmap_clear(&within_size, start, nr);
5516 } else if (!strcmp(policy, "within_size")) {
5517 bitmap_set(&within_size, start, nr);
5518 bitmap_clear(&inherit, start, nr);
5519 bitmap_clear(&madvise, start, nr);
5520 bitmap_clear(&always, start, nr);
5521 } else if (!strcmp(policy, "never")) {
5522 bitmap_clear(&inherit, start, nr);
5523 bitmap_clear(&madvise, start, nr);
5524 bitmap_clear(&always, start, nr);
5525 bitmap_clear(&within_size, start, nr);
5526 } else {
5527 pr_err("invalid policy %s in thp_shmem boot parameter\n", policy);
5528 goto err;
5529 }
5530 }
5531 }
5532
5533 huge_shmem_orders_always = always;
5534 huge_shmem_orders_madvise = madvise;
5535 huge_shmem_orders_inherit = inherit;
5536 huge_shmem_orders_within_size = within_size;
5537 shmem_orders_configured = true;
5538 return 1;
5539
5540 err:
5541 pr_warn("thp_shmem=%s: error parsing string, ignoring setting\n", str);
5542 return 0;
5543 }
5544 __setup("thp_shmem=", setup_thp_shmem);
5545
5546 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
5547
5548 #else /* !CONFIG_SHMEM */
5549
5550 /*
5551 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
5552 *
5553 * This is intended for small system where the benefits of the full
5554 * shmem code (swap-backed and resource-limited) are outweighed by
5555 * their complexity. On systems without swap this code should be
5556 * effectively equivalent, but much lighter weight.
5557 */
5558
5559 static struct file_system_type shmem_fs_type = {
5560 .name = "tmpfs",
5561 .init_fs_context = ramfs_init_fs_context,
5562 .parameters = ramfs_fs_parameters,
5563 .kill_sb = ramfs_kill_sb,
5564 .fs_flags = FS_USERNS_MOUNT,
5565 };
5566
shmem_init(void)5567 void __init shmem_init(void)
5568 {
5569 BUG_ON(register_filesystem(&shmem_fs_type) != 0);
5570
5571 shm_mnt = kern_mount(&shmem_fs_type);
5572 BUG_ON(IS_ERR(shm_mnt));
5573 }
5574
shmem_unuse(unsigned int type)5575 int shmem_unuse(unsigned int type)
5576 {
5577 return 0;
5578 }
5579
shmem_lock(struct file * file,int lock,struct ucounts * ucounts)5580 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
5581 {
5582 return 0;
5583 }
5584
shmem_unlock_mapping(struct address_space * mapping)5585 void shmem_unlock_mapping(struct address_space *mapping)
5586 {
5587 }
5588
5589 #ifdef CONFIG_MMU
shmem_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)5590 unsigned long shmem_get_unmapped_area(struct file *file,
5591 unsigned long addr, unsigned long len,
5592 unsigned long pgoff, unsigned long flags)
5593 {
5594 return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
5595 }
5596 #endif
5597
shmem_truncate_range(struct inode * inode,loff_t lstart,loff_t lend)5598 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
5599 {
5600 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
5601 }
5602 EXPORT_SYMBOL_GPL(shmem_truncate_range);
5603
5604 #define shmem_vm_ops generic_file_vm_ops
5605 #define shmem_anon_vm_ops generic_file_vm_ops
5606 #define shmem_file_operations ramfs_file_operations
5607 #define shmem_acct_size(flags, size) 0
5608 #define shmem_unacct_size(flags, size) do {} while (0)
5609
shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,unsigned long flags)5610 static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
5611 struct super_block *sb, struct inode *dir,
5612 umode_t mode, dev_t dev, unsigned long flags)
5613 {
5614 struct inode *inode = ramfs_get_inode(sb, dir, mode, dev);
5615 return inode ? inode : ERR_PTR(-ENOSPC);
5616 }
5617
5618 #endif /* CONFIG_SHMEM */
5619
5620 /* common code */
5621
__shmem_file_setup(struct vfsmount * mnt,const char * name,loff_t size,unsigned long flags,unsigned int i_flags)5622 static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name,
5623 loff_t size, unsigned long flags, unsigned int i_flags)
5624 {
5625 struct inode *inode;
5626 struct file *res;
5627
5628 if (IS_ERR(mnt))
5629 return ERR_CAST(mnt);
5630
5631 if (size < 0 || size > MAX_LFS_FILESIZE)
5632 return ERR_PTR(-EINVAL);
5633
5634 if (shmem_acct_size(flags, size))
5635 return ERR_PTR(-ENOMEM);
5636
5637 if (is_idmapped_mnt(mnt))
5638 return ERR_PTR(-EINVAL);
5639
5640 inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL,
5641 S_IFREG | S_IRWXUGO, 0, flags);
5642 if (IS_ERR(inode)) {
5643 shmem_unacct_size(flags, size);
5644 return ERR_CAST(inode);
5645 }
5646 inode->i_flags |= i_flags;
5647 inode->i_size = size;
5648 clear_nlink(inode); /* It is unlinked */
5649 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
5650 if (!IS_ERR(res))
5651 res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
5652 &shmem_file_operations);
5653 if (IS_ERR(res))
5654 iput(inode);
5655 return res;
5656 }
5657
5658 /**
5659 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
5660 * kernel internal. There will be NO LSM permission checks against the
5661 * underlying inode. So users of this interface must do LSM checks at a
5662 * higher layer. The users are the big_key and shm implementations. LSM
5663 * checks are provided at the key or shm level rather than the inode.
5664 * @name: name for dentry (to be seen in /proc/<pid>/maps
5665 * @size: size to be set for the file
5666 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
5667 */
shmem_kernel_file_setup(const char * name,loff_t size,unsigned long flags)5668 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
5669 {
5670 return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
5671 }
5672 EXPORT_SYMBOL_GPL(shmem_kernel_file_setup);
5673
5674 /**
5675 * shmem_file_setup - get an unlinked file living in tmpfs
5676 * @name: name for dentry (to be seen in /proc/<pid>/maps
5677 * @size: size to be set for the file
5678 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
5679 */
shmem_file_setup(const char * name,loff_t size,unsigned long flags)5680 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
5681 {
5682 return __shmem_file_setup(shm_mnt, name, size, flags, 0);
5683 }
5684 EXPORT_SYMBOL_GPL(shmem_file_setup);
5685
5686 /**
5687 * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
5688 * @mnt: the tmpfs mount where the file will be created
5689 * @name: name for dentry (to be seen in /proc/<pid>/maps
5690 * @size: size to be set for the file
5691 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
5692 */
shmem_file_setup_with_mnt(struct vfsmount * mnt,const char * name,loff_t size,unsigned long flags)5693 struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
5694 loff_t size, unsigned long flags)
5695 {
5696 return __shmem_file_setup(mnt, name, size, flags, 0);
5697 }
5698 EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
5699
5700 /**
5701 * shmem_zero_setup - setup a shared anonymous mapping
5702 * @vma: the vma to be mmapped is prepared by do_mmap
5703 */
shmem_zero_setup(struct vm_area_struct * vma)5704 int shmem_zero_setup(struct vm_area_struct *vma)
5705 {
5706 struct file *file;
5707 loff_t size = vma->vm_end - vma->vm_start;
5708
5709 /*
5710 * Cloning a new file under mmap_lock leads to a lock ordering conflict
5711 * between XFS directory reading and selinux: since this file is only
5712 * accessible to the user through its mapping, use S_PRIVATE flag to
5713 * bypass file security, in the same way as shmem_kernel_file_setup().
5714 */
5715 file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
5716 if (IS_ERR(file))
5717 return PTR_ERR(file);
5718
5719 if (vma->vm_file)
5720 fput(vma->vm_file);
5721 vma->vm_file = file;
5722 vma->vm_ops = &shmem_anon_vm_ops;
5723
5724 return 0;
5725 }
5726
5727 /**
5728 * shmem_read_folio_gfp - read into page cache, using specified page allocation flags.
5729 * @mapping: the folio's address_space
5730 * @index: the folio index
5731 * @gfp: the page allocator flags to use if allocating
5732 *
5733 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
5734 * with any new page allocations done using the specified allocation flags.
5735 * But read_cache_page_gfp() uses the ->read_folio() method: which does not
5736 * suit tmpfs, since it may have pages in swapcache, and needs to find those
5737 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
5738 *
5739 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
5740 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
5741 */
shmem_read_folio_gfp(struct address_space * mapping,pgoff_t index,gfp_t gfp)5742 struct folio *shmem_read_folio_gfp(struct address_space *mapping,
5743 pgoff_t index, gfp_t gfp)
5744 {
5745 #ifdef CONFIG_SHMEM
5746 struct inode *inode = mapping->host;
5747 struct folio *folio;
5748 int error;
5749
5750 error = shmem_get_folio_gfp(inode, index, 0, &folio, SGP_CACHE,
5751 gfp, NULL, NULL);
5752 if (error)
5753 return ERR_PTR(error);
5754
5755 folio_unlock(folio);
5756 return folio;
5757 #else
5758 /*
5759 * The tiny !SHMEM case uses ramfs without swap
5760 */
5761 return mapping_read_folio_gfp(mapping, index, gfp);
5762 #endif
5763 }
5764 EXPORT_SYMBOL_GPL(shmem_read_folio_gfp);
5765
shmem_read_mapping_page_gfp(struct address_space * mapping,pgoff_t index,gfp_t gfp)5766 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
5767 pgoff_t index, gfp_t gfp)
5768 {
5769 struct folio *folio = shmem_read_folio_gfp(mapping, index, gfp);
5770 struct page *page;
5771
5772 if (IS_ERR(folio))
5773 return &folio->page;
5774
5775 page = folio_file_page(folio, index);
5776 if (PageHWPoison(page)) {
5777 folio_put(folio);
5778 return ERR_PTR(-EIO);
5779 }
5780
5781 return page;
5782 }
5783 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
5784