1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Resizable virtual memory filesystem for Linux.
4 *
5 * Copyright (C) 2000 Linus Torvalds.
6 * 2000 Transmeta Corp.
7 * 2000-2001 Christoph Rohland
8 * 2000-2001 SAP AG
9 * 2002 Red Hat Inc.
10 * Copyright (C) 2002-2011 Hugh Dickins.
11 * Copyright (C) 2011 Google Inc.
12 * Copyright (C) 2002-2005 VERITAS Software Corporation.
13 * Copyright (C) 2004 Andi Kleen, SuSE Labs
14 *
15 * Extended attribute support for tmpfs:
16 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
17 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
18 *
19 * tiny-shmem:
20 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
21 */
22
23 #include <linux/fs.h>
24 #include <linux/init.h>
25 #include <linux/vfs.h>
26 #include <linux/mount.h>
27 #include <linux/ramfs.h>
28 #include <linux/pagemap.h>
29 #include <linux/file.h>
30 #include <linux/fileattr.h>
31 #include <linux/filelock.h>
32 #include <linux/mm.h>
33 #include <linux/random.h>
34 #include <linux/sched/signal.h>
35 #include <linux/export.h>
36 #include <linux/shmem_fs.h>
37 #include <linux/swap.h>
38 #include <linux/uio.h>
39 #include <linux/hugetlb.h>
40 #include <linux/fs_parser.h>
41 #include <linux/swapfile.h>
42 #include <linux/iversion.h>
43 #include <linux/unicode.h>
44 #include "swap.h"
45
46 static struct vfsmount *shm_mnt __ro_after_init;
47
48 #ifdef CONFIG_SHMEM
49 /*
50 * This virtual memory filesystem is heavily based on the ramfs. It
51 * extends ramfs by the ability to use swap and honor resource limits
52 * which makes it a completely usable filesystem.
53 */
54
55 #include <linux/xattr.h>
56 #include <linux/exportfs.h>
57 #include <linux/posix_acl.h>
58 #include <linux/posix_acl_xattr.h>
59 #include <linux/mman.h>
60 #include <linux/string.h>
61 #include <linux/slab.h>
62 #include <linux/backing-dev.h>
63 #include <linux/writeback.h>
64 #include <linux/pagevec.h>
65 #include <linux/percpu_counter.h>
66 #include <linux/falloc.h>
67 #include <linux/splice.h>
68 #include <linux/security.h>
69 #include <linux/leafops.h>
70 #include <linux/mempolicy.h>
71 #include <linux/namei.h>
72 #include <linux/ctype.h>
73 #include <linux/migrate.h>
74 #include <linux/highmem.h>
75 #include <linux/seq_file.h>
76 #include <linux/magic.h>
77 #include <linux/syscalls.h>
78 #include <linux/fcntl.h>
79 #include <uapi/linux/memfd.h>
80 #include <linux/rmap.h>
81 #include <linux/uuid.h>
82 #include <linux/quotaops.h>
83 #include <linux/rcupdate_wait.h>
84
85 #include <linux/uaccess.h>
86
87 #include "internal.h"
88
89 #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
90
91 /* Pretend that each entry is of this size in directory's i_size */
92 #define BOGO_DIRENT_SIZE 20
93
94 /* Pretend that one inode + its dentry occupy this much memory */
95 #define BOGO_INODE_SIZE 1024
96
97 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
98 #define SHORT_SYMLINK_LEN 128
99
100 /*
101 * shmem_fallocate communicates with shmem_fault or shmem_writeout via
102 * inode->i_private (with i_rwsem making sure that it has only one user at
103 * a time): we would prefer not to enlarge the shmem inode just for that.
104 */
105 struct shmem_falloc {
106 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
107 pgoff_t start; /* start of range currently being fallocated */
108 pgoff_t next; /* the next page offset to be fallocated */
109 pgoff_t nr_falloced; /* how many new pages have been fallocated */
110 pgoff_t nr_unswapped; /* how often writeout refused to swap out */
111 };
112
113 struct shmem_options {
114 unsigned long long blocks;
115 unsigned long long inodes;
116 struct mempolicy *mpol;
117 kuid_t uid;
118 kgid_t gid;
119 umode_t mode;
120 bool full_inums;
121 int huge;
122 int seen;
123 bool noswap;
124 unsigned short quota_types;
125 struct shmem_quota_limits qlimits;
126 #if IS_ENABLED(CONFIG_UNICODE)
127 struct unicode_map *encoding;
128 bool strict_encoding;
129 #endif
130 #define SHMEM_SEEN_BLOCKS 1
131 #define SHMEM_SEEN_INODES 2
132 #define SHMEM_SEEN_HUGE 4
133 #define SHMEM_SEEN_INUMS 8
134 #define SHMEM_SEEN_QUOTA 16
135 };
136
137 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
138 static unsigned long huge_shmem_orders_always __read_mostly;
139 static unsigned long huge_shmem_orders_madvise __read_mostly;
140 static unsigned long huge_shmem_orders_inherit __read_mostly;
141 static unsigned long huge_shmem_orders_within_size __read_mostly;
142 static bool shmem_orders_configured __initdata;
143 #endif
144
145 #ifdef CONFIG_TMPFS
shmem_default_max_blocks(void)146 static unsigned long shmem_default_max_blocks(void)
147 {
148 return totalram_pages() / 2;
149 }
150
shmem_default_max_inodes(void)151 static unsigned long shmem_default_max_inodes(void)
152 {
153 unsigned long nr_pages = totalram_pages();
154
155 return min3(nr_pages - totalhigh_pages(), nr_pages / 2,
156 ULONG_MAX / BOGO_INODE_SIZE);
157 }
158 #endif
159
160 static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
161 struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
162 struct vm_area_struct *vma, vm_fault_t *fault_type);
163
SHMEM_SB(struct super_block * sb)164 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
165 {
166 return sb->s_fs_info;
167 }
168
169 /*
170 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
171 * for shared memory and for shared anonymous (/dev/zero) mappings
172 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
173 * consistent with the pre-accounting of private mappings ...
174 */
shmem_acct_size(unsigned long flags,loff_t size)175 static inline int shmem_acct_size(unsigned long flags, loff_t size)
176 {
177 return (flags & SHMEM_F_NORESERVE) ?
178 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
179 }
180
shmem_unacct_size(unsigned long flags,loff_t size)181 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
182 {
183 if (!(flags & SHMEM_F_NORESERVE))
184 vm_unacct_memory(VM_ACCT(size));
185 }
186
shmem_reacct_size(unsigned long flags,loff_t oldsize,loff_t newsize)187 static inline int shmem_reacct_size(unsigned long flags,
188 loff_t oldsize, loff_t newsize)
189 {
190 if (!(flags & SHMEM_F_NORESERVE)) {
191 if (VM_ACCT(newsize) > VM_ACCT(oldsize))
192 return security_vm_enough_memory_mm(current->mm,
193 VM_ACCT(newsize) - VM_ACCT(oldsize));
194 else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
195 vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
196 }
197 return 0;
198 }
199
200 /*
201 * ... whereas tmpfs objects are accounted incrementally as
202 * pages are allocated, in order to allow large sparse files.
203 * shmem_get_folio reports shmem_acct_blocks failure as -ENOSPC not -ENOMEM,
204 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
205 */
shmem_acct_blocks(unsigned long flags,long pages)206 static inline int shmem_acct_blocks(unsigned long flags, long pages)
207 {
208 if (!(flags & SHMEM_F_NORESERVE))
209 return 0;
210
211 return security_vm_enough_memory_mm(current->mm,
212 pages * VM_ACCT(PAGE_SIZE));
213 }
214
shmem_unacct_blocks(unsigned long flags,long pages)215 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
216 {
217 if (flags & SHMEM_F_NORESERVE)
218 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
219 }
220
shmem_inode_acct_blocks(struct inode * inode,long pages)221 int shmem_inode_acct_blocks(struct inode *inode, long pages)
222 {
223 struct shmem_inode_info *info = SHMEM_I(inode);
224 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
225 int err = -ENOSPC;
226
227 if (shmem_acct_blocks(info->flags, pages))
228 return err;
229
230 might_sleep(); /* when quotas */
231 if (sbinfo->max_blocks) {
232 if (!percpu_counter_limited_add(&sbinfo->used_blocks,
233 sbinfo->max_blocks, pages))
234 goto unacct;
235
236 err = dquot_alloc_block_nodirty(inode, pages);
237 if (err) {
238 percpu_counter_sub(&sbinfo->used_blocks, pages);
239 goto unacct;
240 }
241 } else {
242 err = dquot_alloc_block_nodirty(inode, pages);
243 if (err)
244 goto unacct;
245 }
246
247 return 0;
248
249 unacct:
250 shmem_unacct_blocks(info->flags, pages);
251 return err;
252 }
253
shmem_inode_unacct_blocks(struct inode * inode,long pages)254 static void shmem_inode_unacct_blocks(struct inode *inode, long pages)
255 {
256 struct shmem_inode_info *info = SHMEM_I(inode);
257 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
258
259 might_sleep(); /* when quotas */
260 dquot_free_block_nodirty(inode, pages);
261
262 if (sbinfo->max_blocks)
263 percpu_counter_sub(&sbinfo->used_blocks, pages);
264 shmem_unacct_blocks(info->flags, pages);
265 }
266
267 static const struct super_operations shmem_ops;
268 static const struct address_space_operations shmem_aops;
269 static const struct file_operations shmem_file_operations;
270 static const struct inode_operations shmem_inode_operations;
271 static const struct inode_operations shmem_dir_inode_operations;
272 static const struct inode_operations shmem_special_inode_operations;
273 static const struct vm_operations_struct shmem_vm_ops;
274 static const struct vm_operations_struct shmem_anon_vm_ops;
275 static struct file_system_type shmem_fs_type;
276
shmem_mapping(const struct address_space * mapping)277 bool shmem_mapping(const struct address_space *mapping)
278 {
279 return mapping->a_ops == &shmem_aops;
280 }
281 EXPORT_SYMBOL_GPL(shmem_mapping);
282
vma_is_anon_shmem(const struct vm_area_struct * vma)283 bool vma_is_anon_shmem(const struct vm_area_struct *vma)
284 {
285 return vma->vm_ops == &shmem_anon_vm_ops;
286 }
287
vma_is_shmem(const struct vm_area_struct * vma)288 bool vma_is_shmem(const struct vm_area_struct *vma)
289 {
290 return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops;
291 }
292
293 static LIST_HEAD(shmem_swaplist);
294 static DEFINE_SPINLOCK(shmem_swaplist_lock);
295
296 #ifdef CONFIG_TMPFS_QUOTA
297
shmem_enable_quotas(struct super_block * sb,unsigned short quota_types)298 static int shmem_enable_quotas(struct super_block *sb,
299 unsigned short quota_types)
300 {
301 int type, err = 0;
302
303 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
304 for (type = 0; type < SHMEM_MAXQUOTAS; type++) {
305 if (!(quota_types & (1 << type)))
306 continue;
307 err = dquot_load_quota_sb(sb, type, QFMT_SHMEM,
308 DQUOT_USAGE_ENABLED |
309 DQUOT_LIMITS_ENABLED);
310 if (err)
311 goto out_err;
312 }
313 return 0;
314
315 out_err:
316 pr_warn("tmpfs: failed to enable quota tracking (type=%d, err=%d)\n",
317 type, err);
318 for (type--; type >= 0; type--)
319 dquot_quota_off(sb, type);
320 return err;
321 }
322
shmem_disable_quotas(struct super_block * sb)323 static void shmem_disable_quotas(struct super_block *sb)
324 {
325 int type;
326
327 for (type = 0; type < SHMEM_MAXQUOTAS; type++)
328 dquot_quota_off(sb, type);
329 }
330
shmem_get_dquots(struct inode * inode)331 static struct dquot __rcu **shmem_get_dquots(struct inode *inode)
332 {
333 return SHMEM_I(inode)->i_dquot;
334 }
335 #endif /* CONFIG_TMPFS_QUOTA */
336
337 /*
338 * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
339 * produces a novel ino for the newly allocated inode.
340 *
341 * It may also be called when making a hard link to permit the space needed by
342 * each dentry. However, in that case, no new inode number is needed since that
343 * internally draws from another pool of inode numbers (currently global
344 * get_next_ino()). This case is indicated by passing NULL as inop.
345 */
346 #define SHMEM_INO_BATCH 1024
shmem_reserve_inode(struct super_block * sb,ino_t * inop)347 static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
348 {
349 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
350 ino_t ino;
351
352 if (!(sb->s_flags & SB_KERNMOUNT)) {
353 raw_spin_lock(&sbinfo->stat_lock);
354 if (sbinfo->max_inodes) {
355 if (sbinfo->free_ispace < BOGO_INODE_SIZE) {
356 raw_spin_unlock(&sbinfo->stat_lock);
357 return -ENOSPC;
358 }
359 sbinfo->free_ispace -= BOGO_INODE_SIZE;
360 }
361 if (inop) {
362 ino = sbinfo->next_ino++;
363 if (unlikely(is_zero_ino(ino)))
364 ino = sbinfo->next_ino++;
365 if (unlikely(!sbinfo->full_inums &&
366 ino > UINT_MAX)) {
367 /*
368 * Emulate get_next_ino uint wraparound for
369 * compatibility
370 */
371 if (IS_ENABLED(CONFIG_64BIT))
372 pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
373 __func__, MINOR(sb->s_dev));
374 sbinfo->next_ino = 1;
375 ino = sbinfo->next_ino++;
376 }
377 *inop = ino;
378 }
379 raw_spin_unlock(&sbinfo->stat_lock);
380 } else if (inop) {
381 /*
382 * __shmem_file_setup, one of our callers, is lock-free: it
383 * doesn't hold stat_lock in shmem_reserve_inode since
384 * max_inodes is always 0, and is called from potentially
385 * unknown contexts. As such, use a per-cpu batched allocator
386 * which doesn't require the per-sb stat_lock unless we are at
387 * the batch boundary.
388 *
389 * We don't need to worry about inode{32,64} since SB_KERNMOUNT
390 * shmem mounts are not exposed to userspace, so we don't need
391 * to worry about things like glibc compatibility.
392 */
393 ino_t *next_ino;
394
395 next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
396 ino = *next_ino;
397 if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
398 raw_spin_lock(&sbinfo->stat_lock);
399 ino = sbinfo->next_ino;
400 sbinfo->next_ino += SHMEM_INO_BATCH;
401 raw_spin_unlock(&sbinfo->stat_lock);
402 if (unlikely(is_zero_ino(ino)))
403 ino++;
404 }
405 *inop = ino;
406 *next_ino = ++ino;
407 put_cpu();
408 }
409
410 return 0;
411 }
412
shmem_free_inode(struct super_block * sb,size_t freed_ispace)413 static void shmem_free_inode(struct super_block *sb, size_t freed_ispace)
414 {
415 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
416 if (sbinfo->max_inodes) {
417 raw_spin_lock(&sbinfo->stat_lock);
418 sbinfo->free_ispace += BOGO_INODE_SIZE + freed_ispace;
419 raw_spin_unlock(&sbinfo->stat_lock);
420 }
421 }
422
423 /**
424 * shmem_recalc_inode - recalculate the block usage of an inode
425 * @inode: inode to recalc
426 * @alloced: the change in number of pages allocated to inode
427 * @swapped: the change in number of pages swapped from inode
428 *
429 * We have to calculate the free blocks since the mm can drop
430 * undirtied hole pages behind our back.
431 *
432 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
433 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
434 *
435 * Return: true if swapped was incremented from 0, for shmem_writeout().
436 */
shmem_recalc_inode(struct inode * inode,long alloced,long swapped)437 bool shmem_recalc_inode(struct inode *inode, long alloced, long swapped)
438 {
439 struct shmem_inode_info *info = SHMEM_I(inode);
440 bool first_swapped = false;
441 long freed;
442
443 spin_lock(&info->lock);
444 info->alloced += alloced;
445 info->swapped += swapped;
446 freed = info->alloced - info->swapped -
447 READ_ONCE(inode->i_mapping->nrpages);
448 /*
449 * Special case: whereas normally shmem_recalc_inode() is called
450 * after i_mapping->nrpages has already been adjusted (up or down),
451 * shmem_writeout() has to raise swapped before nrpages is lowered -
452 * to stop a racing shmem_recalc_inode() from thinking that a page has
453 * been freed. Compensate here, to avoid the need for a followup call.
454 */
455 if (swapped > 0) {
456 if (info->swapped == swapped)
457 first_swapped = true;
458 freed += swapped;
459 }
460 if (freed > 0)
461 info->alloced -= freed;
462 spin_unlock(&info->lock);
463
464 /* The quota case may block */
465 if (freed > 0)
466 shmem_inode_unacct_blocks(inode, freed);
467 return first_swapped;
468 }
469
shmem_charge(struct inode * inode,long pages)470 bool shmem_charge(struct inode *inode, long pages)
471 {
472 struct address_space *mapping = inode->i_mapping;
473
474 if (shmem_inode_acct_blocks(inode, pages))
475 return false;
476
477 /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
478 xa_lock_irq(&mapping->i_pages);
479 mapping->nrpages += pages;
480 xa_unlock_irq(&mapping->i_pages);
481
482 shmem_recalc_inode(inode, pages, 0);
483 return true;
484 }
485
shmem_uncharge(struct inode * inode,long pages)486 void shmem_uncharge(struct inode *inode, long pages)
487 {
488 /* pages argument is currently unused: keep it to help debugging */
489 /* nrpages adjustment done by __filemap_remove_folio() or caller */
490
491 shmem_recalc_inode(inode, 0, 0);
492 }
493
494 /*
495 * Replace item expected in xarray by a new item, while holding xa_lock.
496 */
shmem_replace_entry(struct address_space * mapping,pgoff_t index,void * expected,void * replacement)497 static int shmem_replace_entry(struct address_space *mapping,
498 pgoff_t index, void *expected, void *replacement)
499 {
500 XA_STATE(xas, &mapping->i_pages, index);
501 void *item;
502
503 VM_BUG_ON(!expected);
504 VM_BUG_ON(!replacement);
505 item = xas_load(&xas);
506 if (item != expected)
507 return -ENOENT;
508 xas_store(&xas, replacement);
509 return 0;
510 }
511
512 /*
513 * Sometimes, before we decide whether to proceed or to fail, we must check
514 * that an entry was not already brought back or split by a racing thread.
515 *
516 * Checking folio is not enough: by the time a swapcache folio is locked, it
517 * might be reused, and again be swapcache, using the same swap as before.
518 * Returns the swap entry's order if it still presents, else returns -1.
519 */
shmem_confirm_swap(struct address_space * mapping,pgoff_t index,swp_entry_t swap)520 static int shmem_confirm_swap(struct address_space *mapping, pgoff_t index,
521 swp_entry_t swap)
522 {
523 XA_STATE(xas, &mapping->i_pages, index);
524 int ret = -1;
525 void *entry;
526
527 rcu_read_lock();
528 do {
529 entry = xas_load(&xas);
530 if (entry == swp_to_radix_entry(swap))
531 ret = xas_get_order(&xas);
532 } while (xas_retry(&xas, entry));
533 rcu_read_unlock();
534 return ret;
535 }
536
537 /*
538 * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
539 *
540 * SHMEM_HUGE_NEVER:
541 * disables huge pages for the mount;
542 * SHMEM_HUGE_ALWAYS:
543 * enables huge pages for the mount;
544 * SHMEM_HUGE_WITHIN_SIZE:
545 * only allocate huge pages if the page will be fully within i_size,
546 * also respect madvise() hints;
547 * SHMEM_HUGE_ADVISE:
548 * only allocate huge pages if requested with madvise();
549 */
550
551 #define SHMEM_HUGE_NEVER 0
552 #define SHMEM_HUGE_ALWAYS 1
553 #define SHMEM_HUGE_WITHIN_SIZE 2
554 #define SHMEM_HUGE_ADVISE 3
555
556 /*
557 * Special values.
558 * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
559 *
560 * SHMEM_HUGE_DENY:
561 * disables huge on shm_mnt and all mounts, for emergency use;
562 * SHMEM_HUGE_FORCE:
563 * enables huge on shm_mnt and all mounts, w/o needing option, for testing;
564 *
565 */
566 #define SHMEM_HUGE_DENY (-1)
567 #define SHMEM_HUGE_FORCE (-2)
568
569 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
570 /* ifdef here to avoid bloating shmem.o when not necessary */
571
572 #if defined(CONFIG_TRANSPARENT_HUGEPAGE_SHMEM_HUGE_NEVER)
573 #define SHMEM_HUGE_DEFAULT SHMEM_HUGE_NEVER
574 #elif defined(CONFIG_TRANSPARENT_HUGEPAGE_SHMEM_HUGE_ALWAYS)
575 #define SHMEM_HUGE_DEFAULT SHMEM_HUGE_ALWAYS
576 #elif defined(CONFIG_TRANSPARENT_HUGEPAGE_SHMEM_HUGE_WITHIN_SIZE)
577 #define SHMEM_HUGE_DEFAULT SHMEM_HUGE_WITHIN_SIZE
578 #elif defined(CONFIG_TRANSPARENT_HUGEPAGE_SHMEM_HUGE_ADVISE)
579 #define SHMEM_HUGE_DEFAULT SHMEM_HUGE_ADVISE
580 #else
581 #define SHMEM_HUGE_DEFAULT SHMEM_HUGE_NEVER
582 #endif
583
584 static int shmem_huge __read_mostly = SHMEM_HUGE_DEFAULT;
585
586 #undef SHMEM_HUGE_DEFAULT
587
588 #if defined(CONFIG_TRANSPARENT_HUGEPAGE_TMPFS_HUGE_NEVER)
589 #define TMPFS_HUGE_DEFAULT SHMEM_HUGE_NEVER
590 #elif defined(CONFIG_TRANSPARENT_HUGEPAGE_TMPFS_HUGE_ALWAYS)
591 #define TMPFS_HUGE_DEFAULT SHMEM_HUGE_ALWAYS
592 #elif defined(CONFIG_TRANSPARENT_HUGEPAGE_TMPFS_HUGE_WITHIN_SIZE)
593 #define TMPFS_HUGE_DEFAULT SHMEM_HUGE_WITHIN_SIZE
594 #elif defined(CONFIG_TRANSPARENT_HUGEPAGE_TMPFS_HUGE_ADVISE)
595 #define TMPFS_HUGE_DEFAULT SHMEM_HUGE_ADVISE
596 #else
597 #define TMPFS_HUGE_DEFAULT SHMEM_HUGE_NEVER
598 #endif
599
600 static int tmpfs_huge __read_mostly = TMPFS_HUGE_DEFAULT;
601
602 #undef TMPFS_HUGE_DEFAULT
603
shmem_get_orders_within_size(struct inode * inode,unsigned long within_size_orders,pgoff_t index,loff_t write_end)604 static unsigned int shmem_get_orders_within_size(struct inode *inode,
605 unsigned long within_size_orders, pgoff_t index,
606 loff_t write_end)
607 {
608 pgoff_t aligned_index;
609 unsigned long order;
610 loff_t i_size;
611
612 order = highest_order(within_size_orders);
613 while (within_size_orders) {
614 aligned_index = round_up(index + 1, 1 << order);
615 i_size = max(write_end, i_size_read(inode));
616 i_size = round_up(i_size, PAGE_SIZE);
617 if (i_size >> PAGE_SHIFT >= aligned_index)
618 return within_size_orders;
619
620 order = next_order(&within_size_orders, order);
621 }
622
623 return 0;
624 }
625
shmem_huge_global_enabled(struct inode * inode,pgoff_t index,loff_t write_end,bool shmem_huge_force,struct vm_area_struct * vma,vm_flags_t vm_flags)626 static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
627 loff_t write_end, bool shmem_huge_force,
628 struct vm_area_struct *vma,
629 vm_flags_t vm_flags)
630 {
631 unsigned int maybe_pmd_order = HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER ?
632 0 : BIT(HPAGE_PMD_ORDER);
633 unsigned long within_size_orders;
634
635 if (!S_ISREG(inode->i_mode))
636 return 0;
637 if (shmem_huge == SHMEM_HUGE_DENY)
638 return 0;
639 if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
640 return maybe_pmd_order;
641
642 /*
643 * The huge order allocation for anon shmem is controlled through
644 * the mTHP interface, so we still use PMD-sized huge order to
645 * check whether global control is enabled.
646 *
647 * For tmpfs with 'huge=always' or 'huge=within_size' mount option,
648 * we will always try PMD-sized order first. If that failed, it will
649 * fall back to small large folios.
650 */
651 switch (SHMEM_SB(inode->i_sb)->huge) {
652 case SHMEM_HUGE_ALWAYS:
653 return THP_ORDERS_ALL_FILE_DEFAULT;
654 case SHMEM_HUGE_WITHIN_SIZE:
655 within_size_orders = shmem_get_orders_within_size(inode,
656 THP_ORDERS_ALL_FILE_DEFAULT, index, write_end);
657 if (within_size_orders > 0)
658 return within_size_orders;
659
660 fallthrough;
661 case SHMEM_HUGE_ADVISE:
662 if (vm_flags & VM_HUGEPAGE)
663 return THP_ORDERS_ALL_FILE_DEFAULT;
664 fallthrough;
665 default:
666 return 0;
667 }
668 }
669
shmem_parse_huge(const char * str)670 static int shmem_parse_huge(const char *str)
671 {
672 int huge;
673
674 if (!str)
675 return -EINVAL;
676
677 if (!strcmp(str, "never"))
678 huge = SHMEM_HUGE_NEVER;
679 else if (!strcmp(str, "always"))
680 huge = SHMEM_HUGE_ALWAYS;
681 else if (!strcmp(str, "within_size"))
682 huge = SHMEM_HUGE_WITHIN_SIZE;
683 else if (!strcmp(str, "advise"))
684 huge = SHMEM_HUGE_ADVISE;
685 else if (!strcmp(str, "deny"))
686 huge = SHMEM_HUGE_DENY;
687 else if (!strcmp(str, "force"))
688 huge = SHMEM_HUGE_FORCE;
689 else
690 return -EINVAL;
691
692 if (!has_transparent_hugepage() &&
693 huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
694 return -EINVAL;
695
696 /* Do not override huge allocation policy with non-PMD sized mTHP */
697 if (huge == SHMEM_HUGE_FORCE &&
698 huge_shmem_orders_inherit != BIT(HPAGE_PMD_ORDER))
699 return -EINVAL;
700
701 return huge;
702 }
703
704 #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
shmem_format_huge(int huge)705 static const char *shmem_format_huge(int huge)
706 {
707 switch (huge) {
708 case SHMEM_HUGE_NEVER:
709 return "never";
710 case SHMEM_HUGE_ALWAYS:
711 return "always";
712 case SHMEM_HUGE_WITHIN_SIZE:
713 return "within_size";
714 case SHMEM_HUGE_ADVISE:
715 return "advise";
716 case SHMEM_HUGE_DENY:
717 return "deny";
718 case SHMEM_HUGE_FORCE:
719 return "force";
720 default:
721 VM_BUG_ON(1);
722 return "bad_val";
723 }
724 }
725 #endif
726
shmem_unused_huge_shrink(struct shmem_sb_info * sbinfo,struct shrink_control * sc,unsigned long nr_to_free)727 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
728 struct shrink_control *sc, unsigned long nr_to_free)
729 {
730 LIST_HEAD(list), *pos, *next;
731 struct inode *inode;
732 struct shmem_inode_info *info;
733 struct folio *folio;
734 unsigned long batch = sc ? sc->nr_to_scan : 128;
735 unsigned long split = 0, freed = 0;
736
737 if (list_empty(&sbinfo->shrinklist))
738 return SHRINK_STOP;
739
740 spin_lock(&sbinfo->shrinklist_lock);
741 list_for_each_safe(pos, next, &sbinfo->shrinklist) {
742 info = list_entry(pos, struct shmem_inode_info, shrinklist);
743
744 /* pin the inode */
745 inode = igrab(&info->vfs_inode);
746
747 /* inode is about to be evicted */
748 if (!inode) {
749 list_del_init(&info->shrinklist);
750 goto next;
751 }
752
753 list_move(&info->shrinklist, &list);
754 next:
755 sbinfo->shrinklist_len--;
756 if (!--batch)
757 break;
758 }
759 spin_unlock(&sbinfo->shrinklist_lock);
760
761 list_for_each_safe(pos, next, &list) {
762 pgoff_t next, end;
763 loff_t i_size;
764 int ret;
765
766 info = list_entry(pos, struct shmem_inode_info, shrinklist);
767 inode = &info->vfs_inode;
768
769 if (nr_to_free && freed >= nr_to_free)
770 goto move_back;
771
772 i_size = i_size_read(inode);
773 folio = filemap_get_entry(inode->i_mapping, i_size / PAGE_SIZE);
774 if (!folio || xa_is_value(folio))
775 goto drop;
776
777 /* No large folio at the end of the file: nothing to split */
778 if (!folio_test_large(folio)) {
779 folio_put(folio);
780 goto drop;
781 }
782
783 /* Check if there is anything to gain from splitting */
784 next = folio_next_index(folio);
785 end = shmem_fallocend(inode, DIV_ROUND_UP(i_size, PAGE_SIZE));
786 if (end <= folio->index || end >= next) {
787 folio_put(folio);
788 goto drop;
789 }
790
791 /*
792 * Move the inode on the list back to shrinklist if we failed
793 * to lock the page at this time.
794 *
795 * Waiting for the lock may lead to deadlock in the
796 * reclaim path.
797 */
798 if (!folio_trylock(folio)) {
799 folio_put(folio);
800 goto move_back;
801 }
802
803 ret = split_folio(folio);
804 folio_unlock(folio);
805 folio_put(folio);
806
807 /* If split failed move the inode on the list back to shrinklist */
808 if (ret)
809 goto move_back;
810
811 freed += next - end;
812 split++;
813 drop:
814 list_del_init(&info->shrinklist);
815 goto put;
816 move_back:
817 /*
818 * Make sure the inode is either on the global list or deleted
819 * from any local list before iput() since it could be deleted
820 * in another thread once we put the inode (then the local list
821 * is corrupted).
822 */
823 spin_lock(&sbinfo->shrinklist_lock);
824 list_move(&info->shrinklist, &sbinfo->shrinklist);
825 sbinfo->shrinklist_len++;
826 spin_unlock(&sbinfo->shrinklist_lock);
827 put:
828 iput(inode);
829 }
830
831 return split;
832 }
833
shmem_unused_huge_scan(struct super_block * sb,struct shrink_control * sc)834 static long shmem_unused_huge_scan(struct super_block *sb,
835 struct shrink_control *sc)
836 {
837 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
838
839 if (!READ_ONCE(sbinfo->shrinklist_len))
840 return SHRINK_STOP;
841
842 return shmem_unused_huge_shrink(sbinfo, sc, 0);
843 }
844
shmem_unused_huge_count(struct super_block * sb,struct shrink_control * sc)845 static long shmem_unused_huge_count(struct super_block *sb,
846 struct shrink_control *sc)
847 {
848 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
849 return READ_ONCE(sbinfo->shrinklist_len);
850 }
851 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
852
853 #define shmem_huge SHMEM_HUGE_DENY
854
shmem_unused_huge_shrink(struct shmem_sb_info * sbinfo,struct shrink_control * sc,unsigned long nr_to_free)855 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
856 struct shrink_control *sc, unsigned long nr_to_free)
857 {
858 return 0;
859 }
860
shmem_huge_global_enabled(struct inode * inode,pgoff_t index,loff_t write_end,bool shmem_huge_force,struct vm_area_struct * vma,vm_flags_t vm_flags)861 static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
862 loff_t write_end, bool shmem_huge_force,
863 struct vm_area_struct *vma,
864 vm_flags_t vm_flags)
865 {
866 return 0;
867 }
868 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
869
shmem_update_stats(struct folio * folio,int nr_pages)870 static void shmem_update_stats(struct folio *folio, int nr_pages)
871 {
872 if (folio_test_pmd_mappable(folio))
873 lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr_pages);
874 lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
875 lruvec_stat_mod_folio(folio, NR_SHMEM, nr_pages);
876 }
877
878 /*
879 * Somewhat like filemap_add_folio, but error if expected item has gone.
880 */
shmem_add_to_page_cache(struct folio * folio,struct address_space * mapping,pgoff_t index,void * expected,gfp_t gfp)881 int shmem_add_to_page_cache(struct folio *folio,
882 struct address_space *mapping,
883 pgoff_t index, void *expected, gfp_t gfp)
884 {
885 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
886 unsigned long nr = folio_nr_pages(folio);
887 swp_entry_t iter, swap;
888 void *entry;
889
890 VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
891 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
892 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
893
894 folio_ref_add(folio, nr);
895 folio->mapping = mapping;
896 folio->index = index;
897
898 gfp &= GFP_RECLAIM_MASK;
899 folio_throttle_swaprate(folio, gfp);
900 swap = radix_to_swp_entry(expected);
901
902 do {
903 iter = swap;
904 xas_lock_irq(&xas);
905 xas_for_each_conflict(&xas, entry) {
906 /*
907 * The range must either be empty, or filled with
908 * expected swap entries. Shmem swap entries are never
909 * partially freed without split of both entry and
910 * folio, so there shouldn't be any holes.
911 */
912 if (!expected || entry != swp_to_radix_entry(iter)) {
913 xas_set_err(&xas, -EEXIST);
914 goto unlock;
915 }
916 iter.val += 1 << xas_get_order(&xas);
917 }
918 if (expected && iter.val - nr != swap.val) {
919 xas_set_err(&xas, -EEXIST);
920 goto unlock;
921 }
922 xas_store(&xas, folio);
923 if (xas_error(&xas))
924 goto unlock;
925 shmem_update_stats(folio, nr);
926 mapping->nrpages += nr;
927 unlock:
928 xas_unlock_irq(&xas);
929 } while (xas_nomem(&xas, gfp));
930
931 if (xas_error(&xas)) {
932 folio->mapping = NULL;
933 folio_ref_sub(folio, nr);
934 return xas_error(&xas);
935 }
936
937 return 0;
938 }
939
940 /*
941 * Somewhat like filemap_remove_folio, but substitutes swap for @folio.
942 */
shmem_delete_from_page_cache(struct folio * folio,void * radswap)943 static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
944 {
945 struct address_space *mapping = folio->mapping;
946 long nr = folio_nr_pages(folio);
947 int error;
948
949 xa_lock_irq(&mapping->i_pages);
950 error = shmem_replace_entry(mapping, folio->index, folio, radswap);
951 folio->mapping = NULL;
952 mapping->nrpages -= nr;
953 shmem_update_stats(folio, -nr);
954 xa_unlock_irq(&mapping->i_pages);
955 folio_put_refs(folio, nr);
956 BUG_ON(error);
957 }
958
959 /*
960 * Remove swap entry from page cache, free the swap and its page cache. Returns
961 * the number of pages being freed. 0 means entry not found in XArray (0 pages
962 * being freed).
963 */
shmem_free_swap(struct address_space * mapping,pgoff_t index,pgoff_t end,void * radswap)964 static long shmem_free_swap(struct address_space *mapping,
965 pgoff_t index, pgoff_t end, void *radswap)
966 {
967 XA_STATE(xas, &mapping->i_pages, index);
968 unsigned int nr_pages = 0;
969 pgoff_t base;
970 void *entry;
971
972 xas_lock_irq(&xas);
973 entry = xas_load(&xas);
974 if (entry == radswap) {
975 nr_pages = 1 << xas_get_order(&xas);
976 base = round_down(xas.xa_index, nr_pages);
977 if (base < index || base + nr_pages - 1 > end)
978 nr_pages = 0;
979 else
980 xas_store(&xas, NULL);
981 }
982 xas_unlock_irq(&xas);
983
984 if (nr_pages)
985 swap_put_entries_direct(radix_to_swp_entry(radswap), nr_pages);
986
987 return nr_pages;
988 }
989
990 /*
991 * Determine (in bytes) how many of the shmem object's pages mapped by the
992 * given offsets are swapped out.
993 *
994 * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
995 * as long as the inode doesn't go away and racy results are not a problem.
996 */
shmem_partial_swap_usage(struct address_space * mapping,pgoff_t start,pgoff_t end)997 unsigned long shmem_partial_swap_usage(struct address_space *mapping,
998 pgoff_t start, pgoff_t end)
999 {
1000 XA_STATE(xas, &mapping->i_pages, start);
1001 struct folio *folio;
1002 unsigned long swapped = 0;
1003 unsigned long max = end - 1;
1004
1005 rcu_read_lock();
1006 xas_for_each(&xas, folio, max) {
1007 if (xas_retry(&xas, folio))
1008 continue;
1009 if (xa_is_value(folio))
1010 swapped += 1 << xas_get_order(&xas);
1011 if (xas.xa_index == max)
1012 break;
1013 if (need_resched()) {
1014 xas_pause(&xas);
1015 cond_resched_rcu();
1016 }
1017 }
1018 rcu_read_unlock();
1019
1020 return swapped << PAGE_SHIFT;
1021 }
1022
1023 /*
1024 * Determine (in bytes) how many of the shmem object's pages mapped by the
1025 * given vma is swapped out.
1026 *
1027 * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
1028 * as long as the inode doesn't go away and racy results are not a problem.
1029 */
shmem_swap_usage(struct vm_area_struct * vma)1030 unsigned long shmem_swap_usage(struct vm_area_struct *vma)
1031 {
1032 struct inode *inode = file_inode(vma->vm_file);
1033 struct shmem_inode_info *info = SHMEM_I(inode);
1034 struct address_space *mapping = inode->i_mapping;
1035 unsigned long swapped;
1036
1037 /* Be careful as we don't hold info->lock */
1038 swapped = READ_ONCE(info->swapped);
1039
1040 /*
1041 * The easier cases are when the shmem object has nothing in swap, or
1042 * the vma maps it whole. Then we can simply use the stats that we
1043 * already track.
1044 */
1045 if (!swapped)
1046 return 0;
1047
1048 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
1049 return swapped << PAGE_SHIFT;
1050
1051 /* Here comes the more involved part */
1052 return shmem_partial_swap_usage(mapping, vma->vm_pgoff,
1053 vma->vm_pgoff + vma_pages(vma));
1054 }
1055
1056 /*
1057 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
1058 */
shmem_unlock_mapping(struct address_space * mapping)1059 void shmem_unlock_mapping(struct address_space *mapping)
1060 {
1061 struct folio_batch fbatch;
1062 pgoff_t index = 0;
1063
1064 folio_batch_init(&fbatch);
1065 /*
1066 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
1067 */
1068 while (!mapping_unevictable(mapping) &&
1069 filemap_get_folios(mapping, &index, ~0UL, &fbatch)) {
1070 check_move_unevictable_folios(&fbatch);
1071 folio_batch_release(&fbatch);
1072 cond_resched();
1073 }
1074 }
1075
shmem_get_partial_folio(struct inode * inode,pgoff_t index)1076 static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
1077 {
1078 struct folio *folio;
1079
1080 /*
1081 * At first avoid shmem_get_folio(,,,SGP_READ): that fails
1082 * beyond i_size, and reports fallocated folios as holes.
1083 */
1084 folio = filemap_get_entry(inode->i_mapping, index);
1085 if (!folio)
1086 return folio;
1087 if (!xa_is_value(folio)) {
1088 folio_lock(folio);
1089 if (folio->mapping == inode->i_mapping)
1090 return folio;
1091 /* The folio has been swapped out */
1092 folio_unlock(folio);
1093 folio_put(folio);
1094 }
1095 /*
1096 * But read a folio back from swap if any of it is within i_size
1097 * (although in some cases this is just a waste of time).
1098 */
1099 folio = NULL;
1100 shmem_get_folio(inode, index, 0, &folio, SGP_READ);
1101 return folio;
1102 }
1103
1104 /*
1105 * Remove range of pages and swap entries from page cache, and free them.
1106 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
1107 */
shmem_undo_range(struct inode * inode,loff_t lstart,uoff_t lend,bool unfalloc)1108 static void shmem_undo_range(struct inode *inode, loff_t lstart, uoff_t lend,
1109 bool unfalloc)
1110 {
1111 struct address_space *mapping = inode->i_mapping;
1112 struct shmem_inode_info *info = SHMEM_I(inode);
1113 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
1114 pgoff_t end = (lend + 1) >> PAGE_SHIFT;
1115 struct folio_batch fbatch;
1116 pgoff_t indices[PAGEVEC_SIZE];
1117 struct folio *folio;
1118 bool same_folio;
1119 long nr_swaps_freed = 0;
1120 pgoff_t index;
1121 int i;
1122
1123 if (lend == -1)
1124 end = -1; /* unsigned, so actually very big */
1125
1126 if (info->fallocend > start && info->fallocend <= end && !unfalloc)
1127 info->fallocend = start;
1128
1129 folio_batch_init(&fbatch);
1130 index = start;
1131 while (index < end && find_lock_entries(mapping, &index, end - 1,
1132 &fbatch, indices)) {
1133 for (i = 0; i < folio_batch_count(&fbatch); i++) {
1134 folio = fbatch.folios[i];
1135
1136 if (xa_is_value(folio)) {
1137 if (unfalloc)
1138 continue;
1139 nr_swaps_freed += shmem_free_swap(mapping, indices[i],
1140 end - 1, folio);
1141 continue;
1142 }
1143
1144 if (!unfalloc || !folio_test_uptodate(folio))
1145 truncate_inode_folio(mapping, folio);
1146 folio_unlock(folio);
1147 }
1148 folio_batch_remove_exceptionals(&fbatch);
1149 folio_batch_release(&fbatch);
1150 cond_resched();
1151 }
1152
1153 /*
1154 * When undoing a failed fallocate, we want none of the partial folio
1155 * zeroing and splitting below, but shall want to truncate the whole
1156 * folio when !uptodate indicates that it was added by this fallocate,
1157 * even when [lstart, lend] covers only a part of the folio.
1158 */
1159 if (unfalloc)
1160 goto whole_folios;
1161
1162 same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
1163 folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
1164 if (folio) {
1165 same_folio = lend < folio_next_pos(folio);
1166 folio_mark_dirty(folio);
1167 if (!truncate_inode_partial_folio(folio, lstart, lend)) {
1168 start = folio_next_index(folio);
1169 if (same_folio)
1170 end = folio->index;
1171 }
1172 folio_unlock(folio);
1173 folio_put(folio);
1174 folio = NULL;
1175 }
1176
1177 if (!same_folio)
1178 folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT);
1179 if (folio) {
1180 folio_mark_dirty(folio);
1181 if (!truncate_inode_partial_folio(folio, lstart, lend))
1182 end = folio->index;
1183 folio_unlock(folio);
1184 folio_put(folio);
1185 }
1186
1187 whole_folios:
1188
1189 index = start;
1190 while (index < end) {
1191 cond_resched();
1192
1193 if (!find_get_entries(mapping, &index, end - 1, &fbatch,
1194 indices)) {
1195 /* If all gone or hole-punch or unfalloc, we're done */
1196 if (index == start || end != -1)
1197 break;
1198 /* But if truncating, restart to make sure all gone */
1199 index = start;
1200 continue;
1201 }
1202 for (i = 0; i < folio_batch_count(&fbatch); i++) {
1203 folio = fbatch.folios[i];
1204
1205 if (xa_is_value(folio)) {
1206 int order;
1207 long swaps_freed;
1208
1209 if (unfalloc)
1210 continue;
1211 swaps_freed = shmem_free_swap(mapping, indices[i],
1212 end - 1, folio);
1213 if (!swaps_freed) {
1214 pgoff_t base = indices[i];
1215
1216 order = shmem_confirm_swap(mapping, indices[i],
1217 radix_to_swp_entry(folio));
1218 /*
1219 * If found a large swap entry cross the end or start
1220 * border, skip it as the truncate_inode_partial_folio
1221 * above should have at least zerod its content once.
1222 */
1223 if (order > 0) {
1224 base = round_down(base, 1 << order);
1225 if (base < start || base + (1 << order) > end)
1226 continue;
1227 }
1228 /* Swap was replaced by page or extended, retry */
1229 index = base;
1230 break;
1231 }
1232 nr_swaps_freed += swaps_freed;
1233 continue;
1234 }
1235
1236 folio_lock(folio);
1237
1238 if (!unfalloc || !folio_test_uptodate(folio)) {
1239 if (folio_mapping(folio) != mapping) {
1240 /* Page was replaced by swap: retry */
1241 folio_unlock(folio);
1242 index = indices[i];
1243 break;
1244 }
1245 VM_BUG_ON_FOLIO(folio_test_writeback(folio),
1246 folio);
1247
1248 if (!folio_test_large(folio)) {
1249 truncate_inode_folio(mapping, folio);
1250 } else if (truncate_inode_partial_folio(folio, lstart, lend)) {
1251 /*
1252 * If we split a page, reset the loop so
1253 * that we pick up the new sub pages.
1254 * Otherwise the THP was entirely
1255 * dropped or the target range was
1256 * zeroed, so just continue the loop as
1257 * is.
1258 */
1259 if (!folio_test_large(folio)) {
1260 folio_unlock(folio);
1261 index = start;
1262 break;
1263 }
1264 }
1265 }
1266 folio_unlock(folio);
1267 }
1268 folio_batch_remove_exceptionals(&fbatch);
1269 folio_batch_release(&fbatch);
1270 }
1271
1272 shmem_recalc_inode(inode, 0, -nr_swaps_freed);
1273 }
1274
shmem_truncate_range(struct inode * inode,loff_t lstart,uoff_t lend)1275 void shmem_truncate_range(struct inode *inode, loff_t lstart, uoff_t lend)
1276 {
1277 shmem_undo_range(inode, lstart, lend, false);
1278 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1279 inode_inc_iversion(inode);
1280 }
1281 EXPORT_SYMBOL_GPL(shmem_truncate_range);
1282
shmem_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)1283 static int shmem_getattr(struct mnt_idmap *idmap,
1284 const struct path *path, struct kstat *stat,
1285 u32 request_mask, unsigned int query_flags)
1286 {
1287 struct inode *inode = path->dentry->d_inode;
1288 struct shmem_inode_info *info = SHMEM_I(inode);
1289
1290 if (info->alloced - info->swapped != inode->i_mapping->nrpages)
1291 shmem_recalc_inode(inode, 0, 0);
1292
1293 if (info->fsflags & FS_APPEND_FL)
1294 stat->attributes |= STATX_ATTR_APPEND;
1295 if (info->fsflags & FS_IMMUTABLE_FL)
1296 stat->attributes |= STATX_ATTR_IMMUTABLE;
1297 if (info->fsflags & FS_NODUMP_FL)
1298 stat->attributes |= STATX_ATTR_NODUMP;
1299 stat->attributes_mask |= (STATX_ATTR_APPEND |
1300 STATX_ATTR_IMMUTABLE |
1301 STATX_ATTR_NODUMP);
1302 generic_fillattr(idmap, request_mask, inode, stat);
1303
1304 if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0))
1305 stat->blksize = HPAGE_PMD_SIZE;
1306
1307 if (request_mask & STATX_BTIME) {
1308 stat->result_mask |= STATX_BTIME;
1309 stat->btime.tv_sec = info->i_crtime.tv_sec;
1310 stat->btime.tv_nsec = info->i_crtime.tv_nsec;
1311 }
1312
1313 return 0;
1314 }
1315
shmem_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)1316 static int shmem_setattr(struct mnt_idmap *idmap,
1317 struct dentry *dentry, struct iattr *attr)
1318 {
1319 struct inode *inode = d_inode(dentry);
1320 struct shmem_inode_info *info = SHMEM_I(inode);
1321 int error;
1322 bool update_mtime = false;
1323 bool update_ctime = true;
1324
1325 error = setattr_prepare(idmap, dentry, attr);
1326 if (error)
1327 return error;
1328
1329 if ((info->seals & F_SEAL_EXEC) && (attr->ia_valid & ATTR_MODE)) {
1330 if ((inode->i_mode ^ attr->ia_mode) & 0111) {
1331 return -EPERM;
1332 }
1333 }
1334
1335 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1336 loff_t oldsize = inode->i_size;
1337 loff_t newsize = attr->ia_size;
1338
1339 /* protected by i_rwsem */
1340 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1341 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1342 return -EPERM;
1343
1344 if (newsize != oldsize) {
1345 if (info->flags & SHMEM_F_MAPPING_FROZEN)
1346 return -EPERM;
1347 error = shmem_reacct_size(SHMEM_I(inode)->flags,
1348 oldsize, newsize);
1349 if (error)
1350 return error;
1351 i_size_write(inode, newsize);
1352 update_mtime = true;
1353 } else {
1354 update_ctime = false;
1355 }
1356 if (newsize <= oldsize) {
1357 loff_t holebegin = round_up(newsize, PAGE_SIZE);
1358 if (oldsize > holebegin)
1359 unmap_mapping_range(inode->i_mapping,
1360 holebegin, 0, 1);
1361 if (info->alloced)
1362 shmem_truncate_range(inode,
1363 newsize, (loff_t)-1);
1364 /* unmap again to remove racily COWed private pages */
1365 if (oldsize > holebegin)
1366 unmap_mapping_range(inode->i_mapping,
1367 holebegin, 0, 1);
1368 }
1369 }
1370
1371 if (is_quota_modification(idmap, inode, attr)) {
1372 error = dquot_initialize(inode);
1373 if (error)
1374 return error;
1375 }
1376
1377 /* Transfer quota accounting */
1378 if (i_uid_needs_update(idmap, attr, inode) ||
1379 i_gid_needs_update(idmap, attr, inode)) {
1380 error = dquot_transfer(idmap, inode, attr);
1381 if (error)
1382 return error;
1383 }
1384
1385 setattr_copy(idmap, inode, attr);
1386 if (attr->ia_valid & ATTR_MODE)
1387 error = posix_acl_chmod(idmap, dentry, inode->i_mode);
1388 if (!error && update_ctime) {
1389 inode_set_ctime_current(inode);
1390 if (update_mtime)
1391 inode_set_mtime_to_ts(inode, inode_get_ctime(inode));
1392 inode_inc_iversion(inode);
1393 }
1394 return error;
1395 }
1396
shmem_evict_inode(struct inode * inode)1397 static void shmem_evict_inode(struct inode *inode)
1398 {
1399 struct shmem_inode_info *info = SHMEM_I(inode);
1400 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1401 size_t freed = 0;
1402
1403 if (shmem_mapping(inode->i_mapping)) {
1404 shmem_unacct_size(info->flags, inode->i_size);
1405 inode->i_size = 0;
1406 mapping_set_exiting(inode->i_mapping);
1407 shmem_truncate_range(inode, 0, (loff_t)-1);
1408 if (!list_empty(&info->shrinklist)) {
1409 spin_lock(&sbinfo->shrinklist_lock);
1410 if (!list_empty(&info->shrinklist)) {
1411 list_del_init(&info->shrinklist);
1412 sbinfo->shrinklist_len--;
1413 }
1414 spin_unlock(&sbinfo->shrinklist_lock);
1415 }
1416 while (!list_empty(&info->swaplist)) {
1417 /* Wait while shmem_unuse() is scanning this inode... */
1418 wait_var_event(&info->stop_eviction,
1419 !atomic_read(&info->stop_eviction));
1420 spin_lock(&shmem_swaplist_lock);
1421 /* ...but beware of the race if we peeked too early */
1422 if (!atomic_read(&info->stop_eviction))
1423 list_del_init(&info->swaplist);
1424 spin_unlock(&shmem_swaplist_lock);
1425 }
1426 }
1427
1428 simple_xattrs_free(&info->xattrs, sbinfo->max_inodes ? &freed : NULL);
1429 shmem_free_inode(inode->i_sb, freed);
1430 WARN_ON(inode->i_blocks);
1431 clear_inode(inode);
1432 #ifdef CONFIG_TMPFS_QUOTA
1433 dquot_free_inode(inode);
1434 dquot_drop(inode);
1435 #endif
1436 }
1437
shmem_find_swap_entries(struct address_space * mapping,pgoff_t start,struct folio_batch * fbatch,pgoff_t * indices,unsigned int type)1438 static unsigned int shmem_find_swap_entries(struct address_space *mapping,
1439 pgoff_t start, struct folio_batch *fbatch,
1440 pgoff_t *indices, unsigned int type)
1441 {
1442 XA_STATE(xas, &mapping->i_pages, start);
1443 struct folio *folio;
1444 swp_entry_t entry;
1445
1446 rcu_read_lock();
1447 xas_for_each(&xas, folio, ULONG_MAX) {
1448 if (xas_retry(&xas, folio))
1449 continue;
1450
1451 if (!xa_is_value(folio))
1452 continue;
1453
1454 entry = radix_to_swp_entry(folio);
1455 /*
1456 * swapin error entries can be found in the mapping. But they're
1457 * deliberately ignored here as we've done everything we can do.
1458 */
1459 if (swp_type(entry) != type)
1460 continue;
1461
1462 indices[folio_batch_count(fbatch)] = xas.xa_index;
1463 if (!folio_batch_add(fbatch, folio))
1464 break;
1465
1466 if (need_resched()) {
1467 xas_pause(&xas);
1468 cond_resched_rcu();
1469 }
1470 }
1471 rcu_read_unlock();
1472
1473 return folio_batch_count(fbatch);
1474 }
1475
1476 /*
1477 * Move the swapped pages for an inode to page cache. Returns the count
1478 * of pages swapped in, or the error in case of failure.
1479 */
shmem_unuse_swap_entries(struct inode * inode,struct folio_batch * fbatch,pgoff_t * indices)1480 static int shmem_unuse_swap_entries(struct inode *inode,
1481 struct folio_batch *fbatch, pgoff_t *indices)
1482 {
1483 int i = 0;
1484 int ret = 0;
1485 int error = 0;
1486 struct address_space *mapping = inode->i_mapping;
1487
1488 for (i = 0; i < folio_batch_count(fbatch); i++) {
1489 struct folio *folio = fbatch->folios[i];
1490
1491 error = shmem_swapin_folio(inode, indices[i], &folio, SGP_CACHE,
1492 mapping_gfp_mask(mapping), NULL, NULL);
1493 if (error == 0) {
1494 folio_unlock(folio);
1495 folio_put(folio);
1496 ret++;
1497 }
1498 if (error == -ENOMEM)
1499 break;
1500 error = 0;
1501 }
1502 return error ? error : ret;
1503 }
1504
1505 /*
1506 * If swap found in inode, free it and move page from swapcache to filecache.
1507 */
shmem_unuse_inode(struct inode * inode,unsigned int type)1508 static int shmem_unuse_inode(struct inode *inode, unsigned int type)
1509 {
1510 struct address_space *mapping = inode->i_mapping;
1511 pgoff_t start = 0;
1512 struct folio_batch fbatch;
1513 pgoff_t indices[PAGEVEC_SIZE];
1514 int ret = 0;
1515
1516 do {
1517 folio_batch_init(&fbatch);
1518 if (!shmem_find_swap_entries(mapping, start, &fbatch,
1519 indices, type)) {
1520 ret = 0;
1521 break;
1522 }
1523
1524 ret = shmem_unuse_swap_entries(inode, &fbatch, indices);
1525 if (ret < 0)
1526 break;
1527
1528 start = indices[folio_batch_count(&fbatch) - 1];
1529 } while (true);
1530
1531 return ret;
1532 }
1533
1534 /*
1535 * Read all the shared memory data that resides in the swap
1536 * device 'type' back into memory, so the swap device can be
1537 * unused.
1538 */
shmem_unuse(unsigned int type)1539 int shmem_unuse(unsigned int type)
1540 {
1541 struct shmem_inode_info *info, *next;
1542 int error = 0;
1543
1544 if (list_empty(&shmem_swaplist))
1545 return 0;
1546
1547 spin_lock(&shmem_swaplist_lock);
1548 start_over:
1549 list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1550 if (!info->swapped) {
1551 list_del_init(&info->swaplist);
1552 continue;
1553 }
1554 /*
1555 * Drop the swaplist mutex while searching the inode for swap;
1556 * but before doing so, make sure shmem_evict_inode() will not
1557 * remove placeholder inode from swaplist, nor let it be freed
1558 * (igrab() would protect from unlink, but not from unmount).
1559 */
1560 atomic_inc(&info->stop_eviction);
1561 spin_unlock(&shmem_swaplist_lock);
1562
1563 error = shmem_unuse_inode(&info->vfs_inode, type);
1564 cond_resched();
1565
1566 spin_lock(&shmem_swaplist_lock);
1567 if (atomic_dec_and_test(&info->stop_eviction))
1568 wake_up_var(&info->stop_eviction);
1569 if (error)
1570 break;
1571 if (list_empty(&info->swaplist))
1572 goto start_over;
1573 next = list_next_entry(info, swaplist);
1574 if (!info->swapped)
1575 list_del_init(&info->swaplist);
1576 }
1577 spin_unlock(&shmem_swaplist_lock);
1578
1579 return error;
1580 }
1581
1582 /**
1583 * shmem_writeout - Write the folio to swap
1584 * @folio: The folio to write
1585 * @plug: swap plug
1586 * @folio_list: list to put back folios on split
1587 *
1588 * Move the folio from the page cache to the swap cache.
1589 */
shmem_writeout(struct folio * folio,struct swap_iocb ** plug,struct list_head * folio_list)1590 int shmem_writeout(struct folio *folio, struct swap_iocb **plug,
1591 struct list_head *folio_list)
1592 {
1593 struct address_space *mapping = folio->mapping;
1594 struct inode *inode = mapping->host;
1595 struct shmem_inode_info *info = SHMEM_I(inode);
1596 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1597 pgoff_t index;
1598 int nr_pages;
1599 bool split = false;
1600
1601 if ((info->flags & SHMEM_F_LOCKED) || sbinfo->noswap)
1602 goto redirty;
1603
1604 if (!total_swap_pages)
1605 goto redirty;
1606
1607 /*
1608 * If CONFIG_THP_SWAP is not enabled, the large folio should be
1609 * split when swapping.
1610 *
1611 * And shrinkage of pages beyond i_size does not split swap, so
1612 * swapout of a large folio crossing i_size needs to split too
1613 * (unless fallocate has been used to preallocate beyond EOF).
1614 */
1615 if (folio_test_large(folio)) {
1616 index = shmem_fallocend(inode,
1617 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE));
1618 if ((index > folio->index && index < folio_next_index(folio)) ||
1619 !IS_ENABLED(CONFIG_THP_SWAP))
1620 split = true;
1621 }
1622
1623 if (split) {
1624 int order;
1625
1626 try_split:
1627 order = folio_order(folio);
1628 /* Ensure the subpages are still dirty */
1629 folio_test_set_dirty(folio);
1630 if (split_folio_to_list(folio, folio_list))
1631 goto redirty;
1632
1633 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1634 if (order >= HPAGE_PMD_ORDER) {
1635 count_memcg_folio_events(folio, THP_SWPOUT_FALLBACK, 1);
1636 count_vm_event(THP_SWPOUT_FALLBACK);
1637 }
1638 #endif
1639 count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK);
1640
1641 folio_clear_dirty(folio);
1642 }
1643
1644 index = folio->index;
1645 nr_pages = folio_nr_pages(folio);
1646
1647 /*
1648 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1649 * value into swapfile.c, the only way we can correctly account for a
1650 * fallocated folio arriving here is now to initialize it and write it.
1651 *
1652 * That's okay for a folio already fallocated earlier, but if we have
1653 * not yet completed the fallocation, then (a) we want to keep track
1654 * of this folio in case we have to undo it, and (b) it may not be a
1655 * good idea to continue anyway, once we're pushing into swap. So
1656 * reactivate the folio, and let shmem_fallocate() quit when too many.
1657 */
1658 if (!folio_test_uptodate(folio)) {
1659 if (inode->i_private) {
1660 struct shmem_falloc *shmem_falloc;
1661 spin_lock(&inode->i_lock);
1662 shmem_falloc = inode->i_private;
1663 if (shmem_falloc &&
1664 !shmem_falloc->waitq &&
1665 index >= shmem_falloc->start &&
1666 index < shmem_falloc->next)
1667 shmem_falloc->nr_unswapped += nr_pages;
1668 else
1669 shmem_falloc = NULL;
1670 spin_unlock(&inode->i_lock);
1671 if (shmem_falloc)
1672 goto redirty;
1673 }
1674 folio_zero_range(folio, 0, folio_size(folio));
1675 flush_dcache_folio(folio);
1676 folio_mark_uptodate(folio);
1677 }
1678
1679 if (!folio_alloc_swap(folio)) {
1680 bool first_swapped = shmem_recalc_inode(inode, 0, nr_pages);
1681 int error;
1682
1683 /*
1684 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1685 * if it's not already there. Do it now before the folio is
1686 * removed from page cache, when its pagelock no longer
1687 * protects the inode from eviction. And do it now, after
1688 * we've incremented swapped, because shmem_unuse() will
1689 * prune a !swapped inode from the swaplist.
1690 */
1691 if (first_swapped) {
1692 spin_lock(&shmem_swaplist_lock);
1693 if (list_empty(&info->swaplist))
1694 list_add(&info->swaplist, &shmem_swaplist);
1695 spin_unlock(&shmem_swaplist_lock);
1696 }
1697
1698 folio_dup_swap(folio, NULL);
1699 shmem_delete_from_page_cache(folio, swp_to_radix_entry(folio->swap));
1700
1701 BUG_ON(folio_mapped(folio));
1702 error = swap_writeout(folio, plug);
1703 if (error != AOP_WRITEPAGE_ACTIVATE) {
1704 /* folio has been unlocked */
1705 return error;
1706 }
1707
1708 /*
1709 * The intention here is to avoid holding on to the swap when
1710 * zswap was unable to compress and unable to writeback; but
1711 * it will be appropriate if other reactivate cases are added.
1712 */
1713 error = shmem_add_to_page_cache(folio, mapping, index,
1714 swp_to_radix_entry(folio->swap),
1715 __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
1716 /* Swap entry might be erased by racing shmem_free_swap() */
1717 if (!error) {
1718 shmem_recalc_inode(inode, 0, -nr_pages);
1719 folio_put_swap(folio, NULL);
1720 }
1721
1722 /*
1723 * The swap_cache_del_folio() below could be left for
1724 * shrink_folio_list()'s folio_free_swap() to dispose of;
1725 * but I'm a little nervous about letting this folio out of
1726 * shmem_writeout() in a hybrid half-tmpfs-half-swap state
1727 * e.g. folio_mapping(folio) might give an unexpected answer.
1728 */
1729 swap_cache_del_folio(folio);
1730 goto redirty;
1731 }
1732 if (nr_pages > 1)
1733 goto try_split;
1734 redirty:
1735 folio_mark_dirty(folio);
1736 return AOP_WRITEPAGE_ACTIVATE; /* Return with folio locked */
1737 }
1738 EXPORT_SYMBOL_GPL(shmem_writeout);
1739
1740 #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
shmem_show_mpol(struct seq_file * seq,struct mempolicy * mpol)1741 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1742 {
1743 char buffer[64];
1744
1745 if (!mpol || mpol->mode == MPOL_DEFAULT)
1746 return; /* show nothing */
1747
1748 mpol_to_str(buffer, sizeof(buffer), mpol);
1749
1750 seq_printf(seq, ",mpol=%s", buffer);
1751 }
1752
shmem_get_sbmpol(struct shmem_sb_info * sbinfo)1753 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1754 {
1755 struct mempolicy *mpol = NULL;
1756 if (sbinfo->mpol) {
1757 raw_spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
1758 mpol = sbinfo->mpol;
1759 mpol_get(mpol);
1760 raw_spin_unlock(&sbinfo->stat_lock);
1761 }
1762 return mpol;
1763 }
1764 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
shmem_show_mpol(struct seq_file * seq,struct mempolicy * mpol)1765 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1766 {
1767 }
shmem_get_sbmpol(struct shmem_sb_info * sbinfo)1768 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1769 {
1770 return NULL;
1771 }
1772 #endif /* CONFIG_NUMA && CONFIG_TMPFS */
1773
1774 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
1775 pgoff_t index, unsigned int order, pgoff_t *ilx);
1776
shmem_swapin_cluster(swp_entry_t swap,gfp_t gfp,struct shmem_inode_info * info,pgoff_t index)1777 static struct folio *shmem_swapin_cluster(swp_entry_t swap, gfp_t gfp,
1778 struct shmem_inode_info *info, pgoff_t index)
1779 {
1780 struct mempolicy *mpol;
1781 pgoff_t ilx;
1782 struct folio *folio;
1783
1784 mpol = shmem_get_pgoff_policy(info, index, 0, &ilx);
1785 folio = swap_cluster_readahead(swap, gfp, mpol, ilx);
1786 mpol_cond_put(mpol);
1787
1788 return folio;
1789 }
1790
1791 /*
1792 * Make sure huge_gfp is always more limited than limit_gfp.
1793 * Some of the flags set permissions, while others set limitations.
1794 */
limit_gfp_mask(gfp_t huge_gfp,gfp_t limit_gfp)1795 static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
1796 {
1797 gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
1798 gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
1799 gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
1800 gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
1801
1802 /* Allow allocations only from the originally specified zones. */
1803 result |= zoneflags;
1804
1805 /*
1806 * Minimize the result gfp by taking the union with the deny flags,
1807 * and the intersection of the allow flags.
1808 */
1809 result |= (limit_gfp & denyflags);
1810 result |= (huge_gfp & limit_gfp) & allowflags;
1811
1812 return result;
1813 }
1814
1815 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
shmem_hpage_pmd_enabled(void)1816 bool shmem_hpage_pmd_enabled(void)
1817 {
1818 if (shmem_huge == SHMEM_HUGE_DENY)
1819 return false;
1820 if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_always))
1821 return true;
1822 if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_madvise))
1823 return true;
1824 if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_within_size))
1825 return true;
1826 if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_inherit) &&
1827 shmem_huge != SHMEM_HUGE_NEVER)
1828 return true;
1829
1830 return false;
1831 }
1832
shmem_allowable_huge_orders(struct inode * inode,struct vm_area_struct * vma,pgoff_t index,loff_t write_end,bool shmem_huge_force)1833 unsigned long shmem_allowable_huge_orders(struct inode *inode,
1834 struct vm_area_struct *vma, pgoff_t index,
1835 loff_t write_end, bool shmem_huge_force)
1836 {
1837 unsigned long mask = READ_ONCE(huge_shmem_orders_always);
1838 unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
1839 vm_flags_t vm_flags = vma ? vma->vm_flags : 0;
1840 unsigned int global_orders;
1841
1842 if (thp_disabled_by_hw() || (vma && vma_thp_disabled(vma, vm_flags, shmem_huge_force)))
1843 return 0;
1844
1845 global_orders = shmem_huge_global_enabled(inode, index, write_end,
1846 shmem_huge_force, vma, vm_flags);
1847 /* Tmpfs huge pages allocation */
1848 if (!vma || !vma_is_anon_shmem(vma))
1849 return global_orders;
1850
1851 /*
1852 * Following the 'deny' semantics of the top level, force the huge
1853 * option off from all mounts.
1854 */
1855 if (shmem_huge == SHMEM_HUGE_DENY)
1856 return 0;
1857
1858 /*
1859 * Only allow inherit orders if the top-level value is 'force', which
1860 * means non-PMD sized THP can not override 'huge' mount option now.
1861 */
1862 if (shmem_huge == SHMEM_HUGE_FORCE)
1863 return READ_ONCE(huge_shmem_orders_inherit);
1864
1865 /* Allow mTHP that will be fully within i_size. */
1866 mask |= shmem_get_orders_within_size(inode, within_size_orders, index, 0);
1867
1868 if (vm_flags & VM_HUGEPAGE)
1869 mask |= READ_ONCE(huge_shmem_orders_madvise);
1870
1871 if (global_orders > 0)
1872 mask |= READ_ONCE(huge_shmem_orders_inherit);
1873
1874 return THP_ORDERS_ALL_FILE_DEFAULT & mask;
1875 }
1876
shmem_suitable_orders(struct inode * inode,struct vm_fault * vmf,struct address_space * mapping,pgoff_t index,unsigned long orders)1877 static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
1878 struct address_space *mapping, pgoff_t index,
1879 unsigned long orders)
1880 {
1881 struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
1882 pgoff_t aligned_index;
1883 unsigned long pages;
1884 int order;
1885
1886 if (vma) {
1887 orders = thp_vma_suitable_orders(vma, vmf->address, orders);
1888 if (!orders)
1889 return 0;
1890 }
1891
1892 /* Find the highest order that can add into the page cache */
1893 order = highest_order(orders);
1894 while (orders) {
1895 pages = 1UL << order;
1896 aligned_index = round_down(index, pages);
1897 /*
1898 * Check for conflict before waiting on a huge allocation.
1899 * Conflict might be that a huge page has just been allocated
1900 * and added to page cache by a racing thread, or that there
1901 * is already at least one small page in the huge extent.
1902 * Be careful to retry when appropriate, but not forever!
1903 * Elsewhere -EEXIST would be the right code, but not here.
1904 */
1905 if (!xa_find(&mapping->i_pages, &aligned_index,
1906 aligned_index + pages - 1, XA_PRESENT))
1907 break;
1908 order = next_order(&orders, order);
1909 }
1910
1911 return orders;
1912 }
1913 #else
shmem_suitable_orders(struct inode * inode,struct vm_fault * vmf,struct address_space * mapping,pgoff_t index,unsigned long orders)1914 static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
1915 struct address_space *mapping, pgoff_t index,
1916 unsigned long orders)
1917 {
1918 return 0;
1919 }
1920 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1921
shmem_alloc_folio(gfp_t gfp,int order,struct shmem_inode_info * info,pgoff_t index)1922 static struct folio *shmem_alloc_folio(gfp_t gfp, int order,
1923 struct shmem_inode_info *info, pgoff_t index)
1924 {
1925 struct mempolicy *mpol;
1926 pgoff_t ilx;
1927 struct folio *folio;
1928
1929 mpol = shmem_get_pgoff_policy(info, index, order, &ilx);
1930 folio = folio_alloc_mpol(gfp, order, mpol, ilx, numa_node_id());
1931 mpol_cond_put(mpol);
1932
1933 return folio;
1934 }
1935
shmem_alloc_and_add_folio(struct vm_fault * vmf,gfp_t gfp,struct inode * inode,pgoff_t index,struct mm_struct * fault_mm,unsigned long orders)1936 static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
1937 gfp_t gfp, struct inode *inode, pgoff_t index,
1938 struct mm_struct *fault_mm, unsigned long orders)
1939 {
1940 struct address_space *mapping = inode->i_mapping;
1941 struct shmem_inode_info *info = SHMEM_I(inode);
1942 unsigned long suitable_orders = 0;
1943 struct folio *folio = NULL;
1944 pgoff_t aligned_index;
1945 long pages;
1946 int error, order;
1947
1948 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1949 orders = 0;
1950
1951 if (orders > 0) {
1952 suitable_orders = shmem_suitable_orders(inode, vmf,
1953 mapping, index, orders);
1954
1955 order = highest_order(suitable_orders);
1956 while (suitable_orders) {
1957 pages = 1UL << order;
1958 aligned_index = round_down(index, pages);
1959 folio = shmem_alloc_folio(gfp, order, info, aligned_index);
1960 if (folio) {
1961 index = aligned_index;
1962 goto allocated;
1963 }
1964
1965 if (pages == HPAGE_PMD_NR)
1966 count_vm_event(THP_FILE_FALLBACK);
1967 count_mthp_stat(order, MTHP_STAT_SHMEM_FALLBACK);
1968 order = next_order(&suitable_orders, order);
1969 }
1970 } else {
1971 pages = 1;
1972 folio = shmem_alloc_folio(gfp, 0, info, index);
1973 }
1974 if (!folio)
1975 return ERR_PTR(-ENOMEM);
1976
1977 allocated:
1978 __folio_set_locked(folio);
1979 __folio_set_swapbacked(folio);
1980
1981 gfp &= GFP_RECLAIM_MASK;
1982 error = mem_cgroup_charge(folio, fault_mm, gfp);
1983 if (error) {
1984 if (xa_find(&mapping->i_pages, &index,
1985 index + pages - 1, XA_PRESENT)) {
1986 error = -EEXIST;
1987 } else if (pages > 1) {
1988 if (pages == HPAGE_PMD_NR) {
1989 count_vm_event(THP_FILE_FALLBACK);
1990 count_vm_event(THP_FILE_FALLBACK_CHARGE);
1991 }
1992 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK);
1993 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE);
1994 }
1995 goto unlock;
1996 }
1997
1998 error = shmem_add_to_page_cache(folio, mapping, index, NULL, gfp);
1999 if (error)
2000 goto unlock;
2001
2002 error = shmem_inode_acct_blocks(inode, pages);
2003 if (error) {
2004 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2005 long freed;
2006 /*
2007 * Try to reclaim some space by splitting a few
2008 * large folios beyond i_size on the filesystem.
2009 */
2010 shmem_unused_huge_shrink(sbinfo, NULL, pages);
2011 /*
2012 * And do a shmem_recalc_inode() to account for freed pages:
2013 * except our folio is there in cache, so not quite balanced.
2014 */
2015 spin_lock(&info->lock);
2016 freed = pages + info->alloced - info->swapped -
2017 READ_ONCE(mapping->nrpages);
2018 if (freed > 0)
2019 info->alloced -= freed;
2020 spin_unlock(&info->lock);
2021 if (freed > 0)
2022 shmem_inode_unacct_blocks(inode, freed);
2023 error = shmem_inode_acct_blocks(inode, pages);
2024 if (error) {
2025 filemap_remove_folio(folio);
2026 goto unlock;
2027 }
2028 }
2029
2030 shmem_recalc_inode(inode, pages, 0);
2031 folio_add_lru(folio);
2032 return folio;
2033
2034 unlock:
2035 folio_unlock(folio);
2036 folio_put(folio);
2037 return ERR_PTR(error);
2038 }
2039
shmem_swap_alloc_folio(struct inode * inode,struct vm_area_struct * vma,pgoff_t index,swp_entry_t entry,int order,gfp_t gfp)2040 static struct folio *shmem_swap_alloc_folio(struct inode *inode,
2041 struct vm_area_struct *vma, pgoff_t index,
2042 swp_entry_t entry, int order, gfp_t gfp)
2043 {
2044 struct shmem_inode_info *info = SHMEM_I(inode);
2045 struct folio *new, *swapcache;
2046 int nr_pages = 1 << order;
2047 gfp_t alloc_gfp;
2048
2049 /*
2050 * We have arrived here because our zones are constrained, so don't
2051 * limit chance of success with further cpuset and node constraints.
2052 */
2053 gfp &= ~GFP_CONSTRAINT_MASK;
2054 alloc_gfp = gfp;
2055 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
2056 if (WARN_ON_ONCE(order))
2057 return ERR_PTR(-EINVAL);
2058 } else if (order) {
2059 /*
2060 * If uffd is active for the vma, we need per-page fault
2061 * fidelity to maintain the uffd semantics, then fallback
2062 * to swapin order-0 folio, as well as for zswap case.
2063 * Any existing sub folio in the swap cache also blocks
2064 * mTHP swapin.
2065 */
2066 if ((vma && unlikely(userfaultfd_armed(vma))) ||
2067 !zswap_never_enabled() ||
2068 non_swapcache_batch(entry, nr_pages) != nr_pages)
2069 goto fallback;
2070
2071 alloc_gfp = limit_gfp_mask(vma_thp_gfp_mask(vma), gfp);
2072 }
2073 retry:
2074 new = shmem_alloc_folio(alloc_gfp, order, info, index);
2075 if (!new) {
2076 new = ERR_PTR(-ENOMEM);
2077 goto fallback;
2078 }
2079
2080 if (mem_cgroup_swapin_charge_folio(new, vma ? vma->vm_mm : NULL,
2081 alloc_gfp, entry)) {
2082 folio_put(new);
2083 new = ERR_PTR(-ENOMEM);
2084 goto fallback;
2085 }
2086
2087 swapcache = swapin_folio(entry, new);
2088 if (swapcache != new) {
2089 folio_put(new);
2090 if (!swapcache) {
2091 /*
2092 * The new folio is charged already, swapin can
2093 * only fail due to another raced swapin.
2094 */
2095 new = ERR_PTR(-EEXIST);
2096 goto fallback;
2097 }
2098 }
2099 return swapcache;
2100 fallback:
2101 /* Order 0 swapin failed, nothing to fallback to, abort */
2102 if (!order)
2103 return new;
2104 entry.val += index - round_down(index, nr_pages);
2105 alloc_gfp = gfp;
2106 nr_pages = 1;
2107 order = 0;
2108 goto retry;
2109 }
2110
2111 /*
2112 * When a page is moved from swapcache to shmem filecache (either by the
2113 * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of
2114 * shmem_unuse_inode()), it may have been read in earlier from swap, in
2115 * ignorance of the mapping it belongs to. If that mapping has special
2116 * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
2117 * we may need to copy to a suitable page before moving to filecache.
2118 *
2119 * In a future release, this may well be extended to respect cpuset and
2120 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
2121 * but for now it is a simple matter of zone.
2122 */
shmem_should_replace_folio(struct folio * folio,gfp_t gfp)2123 static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp)
2124 {
2125 return folio_zonenum(folio) > gfp_zone(gfp);
2126 }
2127
shmem_replace_folio(struct folio ** foliop,gfp_t gfp,struct shmem_inode_info * info,pgoff_t index,struct vm_area_struct * vma)2128 static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
2129 struct shmem_inode_info *info, pgoff_t index,
2130 struct vm_area_struct *vma)
2131 {
2132 struct swap_cluster_info *ci;
2133 struct folio *new, *old = *foliop;
2134 swp_entry_t entry = old->swap;
2135 int nr_pages = folio_nr_pages(old);
2136 int error = 0;
2137
2138 /*
2139 * We have arrived here because our zones are constrained, so don't
2140 * limit chance of success by further cpuset and node constraints.
2141 */
2142 gfp &= ~GFP_CONSTRAINT_MASK;
2143 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2144 if (nr_pages > 1) {
2145 gfp_t huge_gfp = vma_thp_gfp_mask(vma);
2146
2147 gfp = limit_gfp_mask(huge_gfp, gfp);
2148 }
2149 #endif
2150
2151 new = shmem_alloc_folio(gfp, folio_order(old), info, index);
2152 if (!new)
2153 return -ENOMEM;
2154
2155 folio_ref_add(new, nr_pages);
2156 folio_copy(new, old);
2157 flush_dcache_folio(new);
2158
2159 __folio_set_locked(new);
2160 __folio_set_swapbacked(new);
2161 folio_mark_uptodate(new);
2162 new->swap = entry;
2163 folio_set_swapcache(new);
2164
2165 ci = swap_cluster_get_and_lock_irq(old);
2166 __swap_cache_replace_folio(ci, old, new);
2167 mem_cgroup_replace_folio(old, new);
2168 shmem_update_stats(new, nr_pages);
2169 shmem_update_stats(old, -nr_pages);
2170 swap_cluster_unlock_irq(ci);
2171
2172 folio_add_lru(new);
2173 *foliop = new;
2174
2175 folio_clear_swapcache(old);
2176 old->private = NULL;
2177
2178 folio_unlock(old);
2179 /*
2180 * The old folio are removed from swap cache, drop the 'nr_pages'
2181 * reference, as well as one temporary reference getting from swap
2182 * cache.
2183 */
2184 folio_put_refs(old, nr_pages + 1);
2185 return error;
2186 }
2187
shmem_set_folio_swapin_error(struct inode * inode,pgoff_t index,struct folio * folio,swp_entry_t swap)2188 static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
2189 struct folio *folio, swp_entry_t swap)
2190 {
2191 struct address_space *mapping = inode->i_mapping;
2192 swp_entry_t swapin_error;
2193 void *old;
2194 int nr_pages;
2195
2196 swapin_error = make_poisoned_swp_entry();
2197 old = xa_cmpxchg_irq(&mapping->i_pages, index,
2198 swp_to_radix_entry(swap),
2199 swp_to_radix_entry(swapin_error), 0);
2200 if (old != swp_to_radix_entry(swap))
2201 return;
2202
2203 nr_pages = folio_nr_pages(folio);
2204 folio_wait_writeback(folio);
2205 folio_put_swap(folio, NULL);
2206 swap_cache_del_folio(folio);
2207 /*
2208 * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
2209 * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
2210 * in shmem_evict_inode().
2211 */
2212 shmem_recalc_inode(inode, -nr_pages, -nr_pages);
2213 }
2214
shmem_split_large_entry(struct inode * inode,pgoff_t index,swp_entry_t swap,gfp_t gfp)2215 static int shmem_split_large_entry(struct inode *inode, pgoff_t index,
2216 swp_entry_t swap, gfp_t gfp)
2217 {
2218 struct address_space *mapping = inode->i_mapping;
2219 XA_STATE_ORDER(xas, &mapping->i_pages, index, 0);
2220 int split_order = 0;
2221 int i;
2222
2223 /* Convert user data gfp flags to xarray node gfp flags */
2224 gfp &= GFP_RECLAIM_MASK;
2225
2226 for (;;) {
2227 void *old = NULL;
2228 int cur_order;
2229 pgoff_t swap_index;
2230
2231 xas_lock_irq(&xas);
2232 old = xas_load(&xas);
2233 if (!xa_is_value(old) || swp_to_radix_entry(swap) != old) {
2234 xas_set_err(&xas, -EEXIST);
2235 goto unlock;
2236 }
2237
2238 cur_order = xas_get_order(&xas);
2239 if (!cur_order)
2240 goto unlock;
2241
2242 /* Try to split large swap entry in pagecache */
2243 swap_index = round_down(index, 1 << cur_order);
2244 split_order = xas_try_split_min_order(cur_order);
2245
2246 while (cur_order > 0) {
2247 pgoff_t aligned_index =
2248 round_down(index, 1 << cur_order);
2249 pgoff_t swap_offset = aligned_index - swap_index;
2250
2251 xas_set_order(&xas, index, split_order);
2252 xas_try_split(&xas, old, cur_order);
2253 if (xas_error(&xas))
2254 goto unlock;
2255
2256 /*
2257 * Re-set the swap entry after splitting, and the swap
2258 * offset of the original large entry must be continuous.
2259 */
2260 for (i = 0; i < 1 << cur_order;
2261 i += (1 << split_order)) {
2262 swp_entry_t tmp;
2263
2264 tmp = swp_entry(swp_type(swap),
2265 swp_offset(swap) + swap_offset +
2266 i);
2267 __xa_store(&mapping->i_pages, aligned_index + i,
2268 swp_to_radix_entry(tmp), 0);
2269 }
2270 cur_order = split_order;
2271 split_order = xas_try_split_min_order(split_order);
2272 }
2273
2274 unlock:
2275 xas_unlock_irq(&xas);
2276
2277 if (!xas_nomem(&xas, gfp))
2278 break;
2279 }
2280
2281 if (xas_error(&xas))
2282 return xas_error(&xas);
2283
2284 return 0;
2285 }
2286
2287 /*
2288 * Swap in the folio pointed to by *foliop.
2289 * Caller has to make sure that *foliop contains a valid swapped folio.
2290 * Returns 0 and the folio in foliop if success. On failure, returns the
2291 * error code and NULL in *foliop.
2292 */
shmem_swapin_folio(struct inode * inode,pgoff_t index,struct folio ** foliop,enum sgp_type sgp,gfp_t gfp,struct vm_area_struct * vma,vm_fault_t * fault_type)2293 static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
2294 struct folio **foliop, enum sgp_type sgp,
2295 gfp_t gfp, struct vm_area_struct *vma,
2296 vm_fault_t *fault_type)
2297 {
2298 struct address_space *mapping = inode->i_mapping;
2299 struct mm_struct *fault_mm = vma ? vma->vm_mm : NULL;
2300 struct shmem_inode_info *info = SHMEM_I(inode);
2301 swp_entry_t swap;
2302 softleaf_t index_entry;
2303 struct swap_info_struct *si;
2304 struct folio *folio = NULL;
2305 int error, nr_pages, order;
2306 pgoff_t offset;
2307
2308 VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
2309 index_entry = radix_to_swp_entry(*foliop);
2310 swap = index_entry;
2311 *foliop = NULL;
2312
2313 if (softleaf_is_poison_marker(index_entry))
2314 return -EIO;
2315
2316 si = get_swap_device(index_entry);
2317 order = shmem_confirm_swap(mapping, index, index_entry);
2318 if (unlikely(!si)) {
2319 if (order < 0)
2320 return -EEXIST;
2321 else
2322 return -EINVAL;
2323 }
2324 if (unlikely(order < 0)) {
2325 put_swap_device(si);
2326 return -EEXIST;
2327 }
2328
2329 /* index may point to the middle of a large entry, get the sub entry */
2330 if (order) {
2331 offset = index - round_down(index, 1 << order);
2332 swap = swp_entry(swp_type(swap), swp_offset(swap) + offset);
2333 }
2334
2335 /* Look it up and read it in.. */
2336 folio = swap_cache_get_folio(swap);
2337 if (!folio) {
2338 if (data_race(si->flags & SWP_SYNCHRONOUS_IO)) {
2339 /* Direct swapin skipping swap cache & readahead */
2340 folio = shmem_swap_alloc_folio(inode, vma, index,
2341 index_entry, order, gfp);
2342 if (IS_ERR(folio)) {
2343 error = PTR_ERR(folio);
2344 folio = NULL;
2345 goto failed;
2346 }
2347 } else {
2348 /* Cached swapin only supports order 0 folio */
2349 folio = shmem_swapin_cluster(swap, gfp, info, index);
2350 if (!folio) {
2351 error = -ENOMEM;
2352 goto failed;
2353 }
2354 }
2355 if (fault_type) {
2356 *fault_type |= VM_FAULT_MAJOR;
2357 count_vm_event(PGMAJFAULT);
2358 count_memcg_event_mm(fault_mm, PGMAJFAULT);
2359 }
2360 } else {
2361 swap_update_readahead(folio, NULL, 0);
2362 }
2363
2364 if (order > folio_order(folio)) {
2365 /*
2366 * Swapin may get smaller folios due to various reasons:
2367 * It may fallback to order 0 due to memory pressure or race,
2368 * swap readahead may swap in order 0 folios into swapcache
2369 * asynchronously, while the shmem mapping can still stores
2370 * large swap entries. In such cases, we should split the
2371 * large swap entry to prevent possible data corruption.
2372 */
2373 error = shmem_split_large_entry(inode, index, index_entry, gfp);
2374 if (error)
2375 goto failed_nolock;
2376 }
2377
2378 /*
2379 * If the folio is large, round down swap and index by folio size.
2380 * No matter what race occurs, the swap layer ensures we either get
2381 * a valid folio that has its swap entry aligned by size, or a
2382 * temporarily invalid one which we'll abort very soon and retry.
2383 *
2384 * shmem_add_to_page_cache ensures the whole range contains expected
2385 * entries and prevents any corruption, so any race split is fine
2386 * too, it will succeed as long as the entries are still there.
2387 */
2388 nr_pages = folio_nr_pages(folio);
2389 if (nr_pages > 1) {
2390 swap.val = round_down(swap.val, nr_pages);
2391 index = round_down(index, nr_pages);
2392 }
2393
2394 /*
2395 * We have to do this with the folio locked to prevent races.
2396 * The shmem_confirm_swap below only checks if the first swap
2397 * entry matches the folio, that's enough to ensure the folio
2398 * is not used outside of shmem, as shmem swap entries
2399 * and swap cache folios are never partially freed.
2400 */
2401 folio_lock(folio);
2402 if (!folio_matches_swap_entry(folio, swap) ||
2403 shmem_confirm_swap(mapping, index, swap) < 0) {
2404 error = -EEXIST;
2405 goto unlock;
2406 }
2407 if (!folio_test_uptodate(folio)) {
2408 error = -EIO;
2409 goto failed;
2410 }
2411 folio_wait_writeback(folio);
2412
2413 /*
2414 * Some architectures may have to restore extra metadata to the
2415 * folio after reading from swap.
2416 */
2417 arch_swap_restore(folio_swap(swap, folio), folio);
2418
2419 if (shmem_should_replace_folio(folio, gfp)) {
2420 error = shmem_replace_folio(&folio, gfp, info, index, vma);
2421 if (error)
2422 goto failed;
2423 }
2424
2425 error = shmem_add_to_page_cache(folio, mapping, index,
2426 swp_to_radix_entry(swap), gfp);
2427 if (error)
2428 goto failed;
2429
2430 shmem_recalc_inode(inode, 0, -nr_pages);
2431
2432 if (sgp == SGP_WRITE)
2433 folio_mark_accessed(folio);
2434
2435 folio_put_swap(folio, NULL);
2436 swap_cache_del_folio(folio);
2437 folio_mark_dirty(folio);
2438 put_swap_device(si);
2439
2440 *foliop = folio;
2441 return 0;
2442 failed:
2443 if (shmem_confirm_swap(mapping, index, swap) < 0)
2444 error = -EEXIST;
2445 if (error == -EIO)
2446 shmem_set_folio_swapin_error(inode, index, folio, swap);
2447 unlock:
2448 if (folio)
2449 folio_unlock(folio);
2450 failed_nolock:
2451 if (folio)
2452 folio_put(folio);
2453 put_swap_device(si);
2454
2455 return error;
2456 }
2457
2458 /*
2459 * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate
2460 *
2461 * If we allocate a new one we do not mark it dirty. That's up to the
2462 * vm. If we swap it in we mark it dirty since we also free the swap
2463 * entry since a page cannot live in both the swap and page cache.
2464 *
2465 * vmf and fault_type are only supplied by shmem_fault: otherwise they are NULL.
2466 */
shmem_get_folio_gfp(struct inode * inode,pgoff_t index,loff_t write_end,struct folio ** foliop,enum sgp_type sgp,gfp_t gfp,struct vm_fault * vmf,vm_fault_t * fault_type)2467 static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
2468 loff_t write_end, struct folio **foliop, enum sgp_type sgp,
2469 gfp_t gfp, struct vm_fault *vmf, vm_fault_t *fault_type)
2470 {
2471 struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
2472 struct mm_struct *fault_mm;
2473 struct folio *folio;
2474 int error;
2475 bool alloced;
2476 unsigned long orders = 0;
2477
2478 if (WARN_ON_ONCE(!shmem_mapping(inode->i_mapping)))
2479 return -EINVAL;
2480
2481 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
2482 return -EFBIG;
2483 repeat:
2484 if (sgp <= SGP_CACHE &&
2485 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode))
2486 return -EINVAL;
2487
2488 alloced = false;
2489 fault_mm = vma ? vma->vm_mm : NULL;
2490
2491 folio = filemap_get_entry(inode->i_mapping, index);
2492 if (folio && vma && userfaultfd_minor(vma)) {
2493 if (!xa_is_value(folio))
2494 folio_put(folio);
2495 *fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
2496 return 0;
2497 }
2498
2499 if (xa_is_value(folio)) {
2500 error = shmem_swapin_folio(inode, index, &folio,
2501 sgp, gfp, vma, fault_type);
2502 if (error == -EEXIST)
2503 goto repeat;
2504
2505 *foliop = folio;
2506 return error;
2507 }
2508
2509 if (folio) {
2510 folio_lock(folio);
2511
2512 /* Has the folio been truncated or swapped out? */
2513 if (unlikely(folio->mapping != inode->i_mapping)) {
2514 folio_unlock(folio);
2515 folio_put(folio);
2516 goto repeat;
2517 }
2518 if (sgp == SGP_WRITE)
2519 folio_mark_accessed(folio);
2520 if (folio_test_uptodate(folio))
2521 goto out;
2522 /* fallocated folio */
2523 if (sgp != SGP_READ)
2524 goto clear;
2525 folio_unlock(folio);
2526 folio_put(folio);
2527 }
2528
2529 /*
2530 * SGP_READ: succeed on hole, with NULL folio, letting caller zero.
2531 * SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail.
2532 */
2533 *foliop = NULL;
2534 if (sgp == SGP_READ)
2535 return 0;
2536 if (sgp == SGP_NOALLOC)
2537 return -ENOENT;
2538
2539 /*
2540 * Fast cache lookup and swap lookup did not find it: allocate.
2541 */
2542
2543 if (vma && userfaultfd_missing(vma)) {
2544 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
2545 return 0;
2546 }
2547
2548 /* Find hugepage orders that are allowed for anonymous shmem and tmpfs. */
2549 orders = shmem_allowable_huge_orders(inode, vma, index, write_end, false);
2550 if (orders > 0) {
2551 gfp_t huge_gfp;
2552
2553 huge_gfp = vma_thp_gfp_mask(vma);
2554 huge_gfp = limit_gfp_mask(huge_gfp, gfp);
2555 folio = shmem_alloc_and_add_folio(vmf, huge_gfp,
2556 inode, index, fault_mm, orders);
2557 if (!IS_ERR(folio)) {
2558 if (folio_test_pmd_mappable(folio))
2559 count_vm_event(THP_FILE_ALLOC);
2560 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC);
2561 goto alloced;
2562 }
2563 if (PTR_ERR(folio) == -EEXIST)
2564 goto repeat;
2565 }
2566
2567 folio = shmem_alloc_and_add_folio(vmf, gfp, inode, index, fault_mm, 0);
2568 if (IS_ERR(folio)) {
2569 error = PTR_ERR(folio);
2570 if (error == -EEXIST)
2571 goto repeat;
2572 folio = NULL;
2573 goto unlock;
2574 }
2575
2576 alloced:
2577 alloced = true;
2578 if (folio_test_large(folio) &&
2579 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
2580 folio_next_index(folio)) {
2581 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2582 struct shmem_inode_info *info = SHMEM_I(inode);
2583 /*
2584 * Part of the large folio is beyond i_size: subject
2585 * to shrink under memory pressure.
2586 */
2587 spin_lock(&sbinfo->shrinklist_lock);
2588 /*
2589 * _careful to defend against unlocked access to
2590 * ->shrink_list in shmem_unused_huge_shrink()
2591 */
2592 if (list_empty_careful(&info->shrinklist)) {
2593 list_add_tail(&info->shrinklist,
2594 &sbinfo->shrinklist);
2595 sbinfo->shrinklist_len++;
2596 }
2597 spin_unlock(&sbinfo->shrinklist_lock);
2598 }
2599
2600 if (sgp == SGP_WRITE)
2601 folio_set_referenced(folio);
2602 /*
2603 * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio.
2604 */
2605 if (sgp == SGP_FALLOC)
2606 sgp = SGP_WRITE;
2607 clear:
2608 /*
2609 * Let SGP_WRITE caller clear ends if write does not fill folio;
2610 * but SGP_FALLOC on a folio fallocated earlier must initialize
2611 * it now, lest undo on failure cancel our earlier guarantee.
2612 */
2613 if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
2614 long i, n = folio_nr_pages(folio);
2615
2616 for (i = 0; i < n; i++)
2617 clear_highpage(folio_page(folio, i));
2618 flush_dcache_folio(folio);
2619 folio_mark_uptodate(folio);
2620 }
2621
2622 /* Perhaps the file has been truncated since we checked */
2623 if (sgp <= SGP_CACHE &&
2624 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
2625 error = -EINVAL;
2626 goto unlock;
2627 }
2628 out:
2629 *foliop = folio;
2630 return 0;
2631
2632 /*
2633 * Error recovery.
2634 */
2635 unlock:
2636 if (alloced)
2637 filemap_remove_folio(folio);
2638 shmem_recalc_inode(inode, 0, 0);
2639 if (folio) {
2640 folio_unlock(folio);
2641 folio_put(folio);
2642 }
2643 return error;
2644 }
2645
2646 /**
2647 * shmem_get_folio - find, and lock a shmem folio.
2648 * @inode: inode to search
2649 * @index: the page index.
2650 * @write_end: end of a write, could extend inode size
2651 * @foliop: pointer to the folio if found
2652 * @sgp: SGP_* flags to control behavior
2653 *
2654 * Looks up the page cache entry at @inode & @index. If a folio is
2655 * present, it is returned locked with an increased refcount.
2656 *
2657 * If the caller modifies data in the folio, it must call folio_mark_dirty()
2658 * before unlocking the folio to ensure that the folio is not reclaimed.
2659 * There is no need to reserve space before calling folio_mark_dirty().
2660 *
2661 * When no folio is found, the behavior depends on @sgp:
2662 * - for SGP_READ, *@foliop is %NULL and 0 is returned
2663 * - for SGP_NOALLOC, *@foliop is %NULL and -ENOENT is returned
2664 * - for all other flags a new folio is allocated, inserted into the
2665 * page cache and returned locked in @foliop.
2666 *
2667 * Context: May sleep.
2668 * Return: 0 if successful, else a negative error code.
2669 */
shmem_get_folio(struct inode * inode,pgoff_t index,loff_t write_end,struct folio ** foliop,enum sgp_type sgp)2670 int shmem_get_folio(struct inode *inode, pgoff_t index, loff_t write_end,
2671 struct folio **foliop, enum sgp_type sgp)
2672 {
2673 return shmem_get_folio_gfp(inode, index, write_end, foliop, sgp,
2674 mapping_gfp_mask(inode->i_mapping), NULL, NULL);
2675 }
2676 EXPORT_SYMBOL_GPL(shmem_get_folio);
2677
2678 /*
2679 * This is like autoremove_wake_function, but it removes the wait queue
2680 * entry unconditionally - even if something else had already woken the
2681 * target.
2682 */
synchronous_wake_function(wait_queue_entry_t * wait,unsigned int mode,int sync,void * key)2683 static int synchronous_wake_function(wait_queue_entry_t *wait,
2684 unsigned int mode, int sync, void *key)
2685 {
2686 int ret = default_wake_function(wait, mode, sync, key);
2687 list_del_init(&wait->entry);
2688 return ret;
2689 }
2690
2691 /*
2692 * Trinity finds that probing a hole which tmpfs is punching can
2693 * prevent the hole-punch from ever completing: which in turn
2694 * locks writers out with its hold on i_rwsem. So refrain from
2695 * faulting pages into the hole while it's being punched. Although
2696 * shmem_undo_range() does remove the additions, it may be unable to
2697 * keep up, as each new page needs its own unmap_mapping_range() call,
2698 * and the i_mmap tree grows ever slower to scan if new vmas are added.
2699 *
2700 * It does not matter if we sometimes reach this check just before the
2701 * hole-punch begins, so that one fault then races with the punch:
2702 * we just need to make racing faults a rare case.
2703 *
2704 * The implementation below would be much simpler if we just used a
2705 * standard mutex or completion: but we cannot take i_rwsem in fault,
2706 * and bloating every shmem inode for this unlikely case would be sad.
2707 */
shmem_falloc_wait(struct vm_fault * vmf,struct inode * inode)2708 static vm_fault_t shmem_falloc_wait(struct vm_fault *vmf, struct inode *inode)
2709 {
2710 struct shmem_falloc *shmem_falloc;
2711 struct file *fpin = NULL;
2712 vm_fault_t ret = 0;
2713
2714 spin_lock(&inode->i_lock);
2715 shmem_falloc = inode->i_private;
2716 if (shmem_falloc &&
2717 shmem_falloc->waitq &&
2718 vmf->pgoff >= shmem_falloc->start &&
2719 vmf->pgoff < shmem_falloc->next) {
2720 wait_queue_head_t *shmem_falloc_waitq;
2721 DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
2722
2723 ret = VM_FAULT_NOPAGE;
2724 fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2725 shmem_falloc_waitq = shmem_falloc->waitq;
2726 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2727 TASK_UNINTERRUPTIBLE);
2728 spin_unlock(&inode->i_lock);
2729 schedule();
2730
2731 /*
2732 * shmem_falloc_waitq points into the shmem_fallocate()
2733 * stack of the hole-punching task: shmem_falloc_waitq
2734 * is usually invalid by the time we reach here, but
2735 * finish_wait() does not dereference it in that case;
2736 * though i_lock needed lest racing with wake_up_all().
2737 */
2738 spin_lock(&inode->i_lock);
2739 finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2740 }
2741 spin_unlock(&inode->i_lock);
2742 if (fpin) {
2743 fput(fpin);
2744 ret = VM_FAULT_RETRY;
2745 }
2746 return ret;
2747 }
2748
shmem_fault(struct vm_fault * vmf)2749 static vm_fault_t shmem_fault(struct vm_fault *vmf)
2750 {
2751 struct inode *inode = file_inode(vmf->vma->vm_file);
2752 gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
2753 struct folio *folio = NULL;
2754 vm_fault_t ret = 0;
2755 int err;
2756
2757 /*
2758 * Trinity finds that probing a hole which tmpfs is punching can
2759 * prevent the hole-punch from ever completing: noted in i_private.
2760 */
2761 if (unlikely(inode->i_private)) {
2762 ret = shmem_falloc_wait(vmf, inode);
2763 if (ret)
2764 return ret;
2765 }
2766
2767 WARN_ON_ONCE(vmf->page != NULL);
2768 err = shmem_get_folio_gfp(inode, vmf->pgoff, 0, &folio, SGP_CACHE,
2769 gfp, vmf, &ret);
2770 if (err)
2771 return vmf_error(err);
2772 if (folio) {
2773 vmf->page = folio_file_page(folio, vmf->pgoff);
2774 ret |= VM_FAULT_LOCKED;
2775 }
2776 return ret;
2777 }
2778
shmem_get_unmapped_area(struct file * file,unsigned long uaddr,unsigned long len,unsigned long pgoff,unsigned long flags)2779 unsigned long shmem_get_unmapped_area(struct file *file,
2780 unsigned long uaddr, unsigned long len,
2781 unsigned long pgoff, unsigned long flags)
2782 {
2783 unsigned long addr;
2784 unsigned long offset;
2785 unsigned long inflated_len;
2786 unsigned long inflated_addr;
2787 unsigned long inflated_offset;
2788 unsigned long hpage_size;
2789
2790 if (len > TASK_SIZE)
2791 return -ENOMEM;
2792
2793 addr = mm_get_unmapped_area(file, uaddr, len, pgoff, flags);
2794
2795 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2796 return addr;
2797 if (IS_ERR_VALUE(addr))
2798 return addr;
2799 if (addr & ~PAGE_MASK)
2800 return addr;
2801 if (addr > TASK_SIZE - len)
2802 return addr;
2803
2804 if (shmem_huge == SHMEM_HUGE_DENY)
2805 return addr;
2806 if (flags & MAP_FIXED)
2807 return addr;
2808 /*
2809 * Our priority is to support MAP_SHARED mapped hugely;
2810 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2811 * But if caller specified an address hint and we allocated area there
2812 * successfully, respect that as before.
2813 */
2814 if (uaddr == addr)
2815 return addr;
2816
2817 hpage_size = HPAGE_PMD_SIZE;
2818 if (shmem_huge != SHMEM_HUGE_FORCE) {
2819 struct super_block *sb;
2820 unsigned long __maybe_unused hpage_orders;
2821 int order = 0;
2822
2823 if (file) {
2824 VM_BUG_ON(file->f_op != &shmem_file_operations);
2825 sb = file_inode(file)->i_sb;
2826 } else {
2827 /*
2828 * Called directly from mm/mmap.c, or drivers/char/mem.c
2829 * for "/dev/zero", to create a shared anonymous object.
2830 */
2831 if (IS_ERR(shm_mnt))
2832 return addr;
2833 sb = shm_mnt->mnt_sb;
2834
2835 /*
2836 * Find the highest mTHP order used for anonymous shmem to
2837 * provide a suitable alignment address.
2838 */
2839 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2840 hpage_orders = READ_ONCE(huge_shmem_orders_always);
2841 hpage_orders |= READ_ONCE(huge_shmem_orders_within_size);
2842 hpage_orders |= READ_ONCE(huge_shmem_orders_madvise);
2843 if (SHMEM_SB(sb)->huge != SHMEM_HUGE_NEVER)
2844 hpage_orders |= READ_ONCE(huge_shmem_orders_inherit);
2845
2846 if (hpage_orders > 0) {
2847 order = highest_order(hpage_orders);
2848 hpage_size = PAGE_SIZE << order;
2849 }
2850 #endif
2851 }
2852 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER && !order)
2853 return addr;
2854 }
2855
2856 if (len < hpage_size)
2857 return addr;
2858
2859 offset = (pgoff << PAGE_SHIFT) & (hpage_size - 1);
2860 if (offset && offset + len < 2 * hpage_size)
2861 return addr;
2862 if ((addr & (hpage_size - 1)) == offset)
2863 return addr;
2864
2865 inflated_len = len + hpage_size - PAGE_SIZE;
2866 if (inflated_len > TASK_SIZE)
2867 return addr;
2868 if (inflated_len < len)
2869 return addr;
2870
2871 inflated_addr = mm_get_unmapped_area(NULL, uaddr, inflated_len, 0, flags);
2872 if (IS_ERR_VALUE(inflated_addr))
2873 return addr;
2874 if (inflated_addr & ~PAGE_MASK)
2875 return addr;
2876
2877 inflated_offset = inflated_addr & (hpage_size - 1);
2878 inflated_addr += offset - inflated_offset;
2879 if (inflated_offset > offset)
2880 inflated_addr += hpage_size;
2881
2882 if (inflated_addr > TASK_SIZE - len)
2883 return addr;
2884 return inflated_addr;
2885 }
2886
2887 #ifdef CONFIG_NUMA
shmem_set_policy(struct vm_area_struct * vma,struct mempolicy * mpol)2888 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2889 {
2890 struct inode *inode = file_inode(vma->vm_file);
2891 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2892 }
2893
shmem_get_policy(struct vm_area_struct * vma,unsigned long addr,pgoff_t * ilx)2894 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2895 unsigned long addr, pgoff_t *ilx)
2896 {
2897 struct inode *inode = file_inode(vma->vm_file);
2898 pgoff_t index;
2899
2900 /*
2901 * Bias interleave by inode number to distribute better across nodes;
2902 * but this interface is independent of which page order is used, so
2903 * supplies only that bias, letting caller apply the offset (adjusted
2904 * by page order, as in shmem_get_pgoff_policy() and get_vma_policy()).
2905 */
2906 *ilx = inode->i_ino;
2907 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2908 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2909 }
2910
shmem_get_pgoff_policy(struct shmem_inode_info * info,pgoff_t index,unsigned int order,pgoff_t * ilx)2911 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
2912 pgoff_t index, unsigned int order, pgoff_t *ilx)
2913 {
2914 struct mempolicy *mpol;
2915
2916 /* Bias interleave by inode number to distribute better across nodes */
2917 *ilx = info->vfs_inode.i_ino + (index >> order);
2918
2919 mpol = mpol_shared_policy_lookup(&info->policy, index);
2920 return mpol ? mpol : get_task_policy(current);
2921 }
2922 #else
shmem_get_pgoff_policy(struct shmem_inode_info * info,pgoff_t index,unsigned int order,pgoff_t * ilx)2923 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
2924 pgoff_t index, unsigned int order, pgoff_t *ilx)
2925 {
2926 *ilx = 0;
2927 return NULL;
2928 }
2929 #endif /* CONFIG_NUMA */
2930
shmem_lock(struct file * file,int lock,struct ucounts * ucounts)2931 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
2932 {
2933 struct inode *inode = file_inode(file);
2934 struct shmem_inode_info *info = SHMEM_I(inode);
2935 int retval = -ENOMEM;
2936
2937 /*
2938 * What serializes the accesses to info->flags?
2939 * ipc_lock_object() when called from shmctl_do_lock(),
2940 * no serialization needed when called from shm_destroy().
2941 */
2942 if (lock && !(info->flags & SHMEM_F_LOCKED)) {
2943 if (!user_shm_lock(inode->i_size, ucounts))
2944 goto out_nomem;
2945 info->flags |= SHMEM_F_LOCKED;
2946 mapping_set_unevictable(file->f_mapping);
2947 }
2948 if (!lock && (info->flags & SHMEM_F_LOCKED) && ucounts) {
2949 user_shm_unlock(inode->i_size, ucounts);
2950 info->flags &= ~SHMEM_F_LOCKED;
2951 mapping_clear_unevictable(file->f_mapping);
2952 }
2953 retval = 0;
2954
2955 out_nomem:
2956 return retval;
2957 }
2958
shmem_mmap_prepare(struct vm_area_desc * desc)2959 static int shmem_mmap_prepare(struct vm_area_desc *desc)
2960 {
2961 struct file *file = desc->file;
2962 struct inode *inode = file_inode(file);
2963
2964 file_accessed(file);
2965 /* This is anonymous shared memory if it is unlinked at the time of mmap */
2966 if (inode->i_nlink)
2967 desc->vm_ops = &shmem_vm_ops;
2968 else
2969 desc->vm_ops = &shmem_anon_vm_ops;
2970 return 0;
2971 }
2972
shmem_file_open(struct inode * inode,struct file * file)2973 static int shmem_file_open(struct inode *inode, struct file *file)
2974 {
2975 file->f_mode |= FMODE_CAN_ODIRECT;
2976 return generic_file_open(inode, file);
2977 }
2978
2979 #ifdef CONFIG_TMPFS_XATTR
2980 static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2981
2982 #if IS_ENABLED(CONFIG_UNICODE)
2983 /*
2984 * shmem_inode_casefold_flags - Deal with casefold file attribute flag
2985 *
2986 * The casefold file attribute needs some special checks. I can just be added to
2987 * an empty dir, and can't be removed from a non-empty dir.
2988 */
shmem_inode_casefold_flags(struct inode * inode,unsigned int fsflags,struct dentry * dentry,unsigned int * i_flags)2989 static int shmem_inode_casefold_flags(struct inode *inode, unsigned int fsflags,
2990 struct dentry *dentry, unsigned int *i_flags)
2991 {
2992 unsigned int old = inode->i_flags;
2993 struct super_block *sb = inode->i_sb;
2994
2995 if (fsflags & FS_CASEFOLD_FL) {
2996 if (!(old & S_CASEFOLD)) {
2997 if (!sb->s_encoding)
2998 return -EOPNOTSUPP;
2999
3000 if (!S_ISDIR(inode->i_mode))
3001 return -ENOTDIR;
3002
3003 if (dentry && !simple_empty(dentry))
3004 return -ENOTEMPTY;
3005 }
3006
3007 *i_flags = *i_flags | S_CASEFOLD;
3008 } else if (old & S_CASEFOLD) {
3009 if (dentry && !simple_empty(dentry))
3010 return -ENOTEMPTY;
3011 }
3012
3013 return 0;
3014 }
3015 #else
shmem_inode_casefold_flags(struct inode * inode,unsigned int fsflags,struct dentry * dentry,unsigned int * i_flags)3016 static int shmem_inode_casefold_flags(struct inode *inode, unsigned int fsflags,
3017 struct dentry *dentry, unsigned int *i_flags)
3018 {
3019 if (fsflags & FS_CASEFOLD_FL)
3020 return -EOPNOTSUPP;
3021
3022 return 0;
3023 }
3024 #endif
3025
3026 /*
3027 * chattr's fsflags are unrelated to extended attributes,
3028 * but tmpfs has chosen to enable them under the same config option.
3029 */
shmem_set_inode_flags(struct inode * inode,unsigned int fsflags,struct dentry * dentry)3030 static int shmem_set_inode_flags(struct inode *inode, unsigned int fsflags, struct dentry *dentry)
3031 {
3032 unsigned int i_flags = 0;
3033 int ret;
3034
3035 ret = shmem_inode_casefold_flags(inode, fsflags, dentry, &i_flags);
3036 if (ret)
3037 return ret;
3038
3039 if (fsflags & FS_NOATIME_FL)
3040 i_flags |= S_NOATIME;
3041 if (fsflags & FS_APPEND_FL)
3042 i_flags |= S_APPEND;
3043 if (fsflags & FS_IMMUTABLE_FL)
3044 i_flags |= S_IMMUTABLE;
3045 /*
3046 * But FS_NODUMP_FL does not require any action in i_flags.
3047 */
3048 inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE | S_CASEFOLD);
3049
3050 return 0;
3051 }
3052 #else
shmem_set_inode_flags(struct inode * inode,unsigned int fsflags,struct dentry * dentry)3053 static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags, struct dentry *dentry)
3054 {
3055 }
3056 #define shmem_initxattrs NULL
3057 #endif
3058
shmem_get_offset_ctx(struct inode * inode)3059 static struct offset_ctx *shmem_get_offset_ctx(struct inode *inode)
3060 {
3061 return &SHMEM_I(inode)->dir_offsets;
3062 }
3063
__shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,vma_flags_t flags)3064 static struct inode *__shmem_get_inode(struct mnt_idmap *idmap,
3065 struct super_block *sb,
3066 struct inode *dir, umode_t mode,
3067 dev_t dev, vma_flags_t flags)
3068 {
3069 struct inode *inode;
3070 struct shmem_inode_info *info;
3071 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3072 ino_t ino;
3073 int err;
3074
3075 err = shmem_reserve_inode(sb, &ino);
3076 if (err)
3077 return ERR_PTR(err);
3078
3079 inode = new_inode(sb);
3080 if (!inode) {
3081 shmem_free_inode(sb, 0);
3082 return ERR_PTR(-ENOSPC);
3083 }
3084
3085 inode->i_ino = ino;
3086 inode_init_owner(idmap, inode, dir, mode);
3087 inode->i_blocks = 0;
3088 simple_inode_init_ts(inode);
3089 inode->i_generation = get_random_u32();
3090 info = SHMEM_I(inode);
3091 memset(info, 0, (char *)inode - (char *)info);
3092 spin_lock_init(&info->lock);
3093 atomic_set(&info->stop_eviction, 0);
3094 info->seals = F_SEAL_SEAL;
3095 info->flags = vma_flags_test(&flags, VMA_NORESERVE_BIT)
3096 ? SHMEM_F_NORESERVE : 0;
3097 info->i_crtime = inode_get_mtime(inode);
3098 info->fsflags = (dir == NULL) ? 0 :
3099 SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
3100 if (info->fsflags)
3101 shmem_set_inode_flags(inode, info->fsflags, NULL);
3102 INIT_LIST_HEAD(&info->shrinklist);
3103 INIT_LIST_HEAD(&info->swaplist);
3104 simple_xattrs_init(&info->xattrs);
3105 cache_no_acl(inode);
3106 if (sbinfo->noswap)
3107 mapping_set_unevictable(inode->i_mapping);
3108
3109 /* Don't consider 'deny' for emergencies and 'force' for testing */
3110 if (sbinfo->huge)
3111 mapping_set_large_folios(inode->i_mapping);
3112
3113 switch (mode & S_IFMT) {
3114 default:
3115 inode->i_op = &shmem_special_inode_operations;
3116 init_special_inode(inode, mode, dev);
3117 break;
3118 case S_IFREG:
3119 inode->i_mapping->a_ops = &shmem_aops;
3120 inode->i_op = &shmem_inode_operations;
3121 inode->i_fop = &shmem_file_operations;
3122 mpol_shared_policy_init(&info->policy,
3123 shmem_get_sbmpol(sbinfo));
3124 break;
3125 case S_IFDIR:
3126 inc_nlink(inode);
3127 /* Some things misbehave if size == 0 on a directory */
3128 inode->i_size = 2 * BOGO_DIRENT_SIZE;
3129 inode->i_op = &shmem_dir_inode_operations;
3130 inode->i_fop = &simple_offset_dir_operations;
3131 simple_offset_init(shmem_get_offset_ctx(inode));
3132 break;
3133 case S_IFLNK:
3134 /*
3135 * Must not load anything in the rbtree,
3136 * mpol_free_shared_policy will not be called.
3137 */
3138 mpol_shared_policy_init(&info->policy, NULL);
3139 break;
3140 }
3141
3142 lockdep_annotate_inode_mutex_key(inode);
3143 return inode;
3144 }
3145
3146 #ifdef CONFIG_TMPFS_QUOTA
shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,vma_flags_t flags)3147 static struct inode *shmem_get_inode(struct mnt_idmap *idmap,
3148 struct super_block *sb, struct inode *dir,
3149 umode_t mode, dev_t dev, vma_flags_t flags)
3150 {
3151 int err;
3152 struct inode *inode;
3153
3154 inode = __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
3155 if (IS_ERR(inode))
3156 return inode;
3157
3158 err = dquot_initialize(inode);
3159 if (err)
3160 goto errout;
3161
3162 err = dquot_alloc_inode(inode);
3163 if (err) {
3164 dquot_drop(inode);
3165 goto errout;
3166 }
3167 return inode;
3168
3169 errout:
3170 inode->i_flags |= S_NOQUOTA;
3171 iput(inode);
3172 return ERR_PTR(err);
3173 }
3174 #else
shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,vma_flags_t flags)3175 static struct inode *shmem_get_inode(struct mnt_idmap *idmap,
3176 struct super_block *sb, struct inode *dir,
3177 umode_t mode, dev_t dev, vma_flags_t flags)
3178 {
3179 return __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
3180 }
3181 #endif /* CONFIG_TMPFS_QUOTA */
3182
3183 #ifdef CONFIG_USERFAULTFD
shmem_mfill_atomic_pte(pmd_t * dst_pmd,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,uffd_flags_t flags,struct folio ** foliop)3184 int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
3185 struct vm_area_struct *dst_vma,
3186 unsigned long dst_addr,
3187 unsigned long src_addr,
3188 uffd_flags_t flags,
3189 struct folio **foliop)
3190 {
3191 struct inode *inode = file_inode(dst_vma->vm_file);
3192 struct shmem_inode_info *info = SHMEM_I(inode);
3193 struct address_space *mapping = inode->i_mapping;
3194 gfp_t gfp = mapping_gfp_mask(mapping);
3195 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
3196 void *page_kaddr;
3197 struct folio *folio;
3198 int ret;
3199 pgoff_t max_off;
3200
3201 if (shmem_inode_acct_blocks(inode, 1)) {
3202 /*
3203 * We may have got a page, returned -ENOENT triggering a retry,
3204 * and now we find ourselves with -ENOMEM. Release the page, to
3205 * avoid a BUG_ON in our caller.
3206 */
3207 if (unlikely(*foliop)) {
3208 folio_put(*foliop);
3209 *foliop = NULL;
3210 }
3211 return -ENOMEM;
3212 }
3213
3214 if (!*foliop) {
3215 ret = -ENOMEM;
3216 folio = shmem_alloc_folio(gfp, 0, info, pgoff);
3217 if (!folio)
3218 goto out_unacct_blocks;
3219
3220 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
3221 page_kaddr = kmap_local_folio(folio, 0);
3222 /*
3223 * The read mmap_lock is held here. Despite the
3224 * mmap_lock being read recursive a deadlock is still
3225 * possible if a writer has taken a lock. For example:
3226 *
3227 * process A thread 1 takes read lock on own mmap_lock
3228 * process A thread 2 calls mmap, blocks taking write lock
3229 * process B thread 1 takes page fault, read lock on own mmap lock
3230 * process B thread 2 calls mmap, blocks taking write lock
3231 * process A thread 1 blocks taking read lock on process B
3232 * process B thread 1 blocks taking read lock on process A
3233 *
3234 * Disable page faults to prevent potential deadlock
3235 * and retry the copy outside the mmap_lock.
3236 */
3237 pagefault_disable();
3238 ret = copy_from_user(page_kaddr,
3239 (const void __user *)src_addr,
3240 PAGE_SIZE);
3241 pagefault_enable();
3242 kunmap_local(page_kaddr);
3243
3244 /* fallback to copy_from_user outside mmap_lock */
3245 if (unlikely(ret)) {
3246 *foliop = folio;
3247 ret = -ENOENT;
3248 /* don't free the page */
3249 goto out_unacct_blocks;
3250 }
3251
3252 flush_dcache_folio(folio);
3253 } else { /* ZEROPAGE */
3254 clear_user_highpage(&folio->page, dst_addr);
3255 }
3256 } else {
3257 folio = *foliop;
3258 VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
3259 *foliop = NULL;
3260 }
3261
3262 VM_BUG_ON(folio_test_locked(folio));
3263 VM_BUG_ON(folio_test_swapbacked(folio));
3264 __folio_set_locked(folio);
3265 __folio_set_swapbacked(folio);
3266 __folio_mark_uptodate(folio);
3267
3268 ret = -EFAULT;
3269 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3270 if (unlikely(pgoff >= max_off))
3271 goto out_release;
3272
3273 ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp);
3274 if (ret)
3275 goto out_release;
3276 ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp);
3277 if (ret)
3278 goto out_release;
3279
3280 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
3281 &folio->page, true, flags);
3282 if (ret)
3283 goto out_delete_from_cache;
3284
3285 shmem_recalc_inode(inode, 1, 0);
3286 folio_unlock(folio);
3287 return 0;
3288 out_delete_from_cache:
3289 filemap_remove_folio(folio);
3290 out_release:
3291 folio_unlock(folio);
3292 folio_put(folio);
3293 out_unacct_blocks:
3294 shmem_inode_unacct_blocks(inode, 1);
3295 return ret;
3296 }
3297 #endif /* CONFIG_USERFAULTFD */
3298
3299 #ifdef CONFIG_TMPFS
3300 static const struct inode_operations shmem_symlink_inode_operations;
3301 static const struct inode_operations shmem_short_symlink_operations;
3302
3303 static int
shmem_write_begin(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata)3304 shmem_write_begin(const struct kiocb *iocb, struct address_space *mapping,
3305 loff_t pos, unsigned len,
3306 struct folio **foliop, void **fsdata)
3307 {
3308 struct inode *inode = mapping->host;
3309 struct shmem_inode_info *info = SHMEM_I(inode);
3310 pgoff_t index = pos >> PAGE_SHIFT;
3311 struct folio *folio;
3312 int ret = 0;
3313
3314 /* i_rwsem is held by caller */
3315 if (unlikely(info->seals & (F_SEAL_GROW |
3316 F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
3317 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
3318 return -EPERM;
3319 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
3320 return -EPERM;
3321 }
3322
3323 if (unlikely((info->flags & SHMEM_F_MAPPING_FROZEN) &&
3324 pos + len > inode->i_size))
3325 return -EPERM;
3326
3327 ret = shmem_get_folio(inode, index, pos + len, &folio, SGP_WRITE);
3328 if (ret)
3329 return ret;
3330
3331 if (folio_contain_hwpoisoned_page(folio)) {
3332 folio_unlock(folio);
3333 folio_put(folio);
3334 return -EIO;
3335 }
3336
3337 *foliop = folio;
3338 return 0;
3339 }
3340
3341 static int
shmem_write_end(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)3342 shmem_write_end(const struct kiocb *iocb, struct address_space *mapping,
3343 loff_t pos, unsigned len, unsigned copied,
3344 struct folio *folio, void *fsdata)
3345 {
3346 struct inode *inode = mapping->host;
3347
3348 if (pos + copied > inode->i_size)
3349 i_size_write(inode, pos + copied);
3350
3351 if (!folio_test_uptodate(folio)) {
3352 if (copied < folio_size(folio)) {
3353 size_t from = offset_in_folio(folio, pos);
3354 folio_zero_segments(folio, 0, from,
3355 from + copied, folio_size(folio));
3356 }
3357 folio_mark_uptodate(folio);
3358 }
3359 folio_mark_dirty(folio);
3360 folio_unlock(folio);
3361 folio_put(folio);
3362
3363 return copied;
3364 }
3365
shmem_file_read_iter(struct kiocb * iocb,struct iov_iter * to)3366 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
3367 {
3368 struct file *file = iocb->ki_filp;
3369 struct inode *inode = file_inode(file);
3370 struct address_space *mapping = inode->i_mapping;
3371 pgoff_t index;
3372 unsigned long offset;
3373 int error = 0;
3374 ssize_t retval = 0;
3375
3376 for (;;) {
3377 struct folio *folio = NULL;
3378 struct page *page = NULL;
3379 unsigned long nr, ret;
3380 loff_t end_offset, i_size = i_size_read(inode);
3381 bool fallback_page_copy = false;
3382 size_t fsize;
3383
3384 if (unlikely(iocb->ki_pos >= i_size))
3385 break;
3386
3387 index = iocb->ki_pos >> PAGE_SHIFT;
3388 error = shmem_get_folio(inode, index, 0, &folio, SGP_READ);
3389 if (error) {
3390 if (error == -EINVAL)
3391 error = 0;
3392 break;
3393 }
3394 if (folio) {
3395 folio_unlock(folio);
3396
3397 page = folio_file_page(folio, index);
3398 if (PageHWPoison(page)) {
3399 folio_put(folio);
3400 error = -EIO;
3401 break;
3402 }
3403
3404 if (folio_test_large(folio) &&
3405 folio_test_has_hwpoisoned(folio))
3406 fallback_page_copy = true;
3407 }
3408
3409 /*
3410 * We must evaluate after, since reads (unlike writes)
3411 * are called without i_rwsem protection against truncate
3412 */
3413 i_size = i_size_read(inode);
3414 if (unlikely(iocb->ki_pos >= i_size)) {
3415 if (folio)
3416 folio_put(folio);
3417 break;
3418 }
3419 end_offset = min_t(loff_t, i_size, iocb->ki_pos + to->count);
3420 if (folio && likely(!fallback_page_copy))
3421 fsize = folio_size(folio);
3422 else
3423 fsize = PAGE_SIZE;
3424 offset = iocb->ki_pos & (fsize - 1);
3425 nr = min_t(loff_t, end_offset - iocb->ki_pos, fsize - offset);
3426
3427 if (folio) {
3428 /*
3429 * If users can be writing to this page using arbitrary
3430 * virtual addresses, take care about potential aliasing
3431 * before reading the page on the kernel side.
3432 */
3433 if (mapping_writably_mapped(mapping)) {
3434 if (likely(!fallback_page_copy))
3435 flush_dcache_folio(folio);
3436 else
3437 flush_dcache_page(page);
3438 }
3439
3440 /*
3441 * Mark the folio accessed if we read the beginning.
3442 */
3443 if (!offset)
3444 folio_mark_accessed(folio);
3445 /*
3446 * Ok, we have the page, and it's up-to-date, so
3447 * now we can copy it to user space...
3448 */
3449 if (likely(!fallback_page_copy))
3450 ret = copy_folio_to_iter(folio, offset, nr, to);
3451 else
3452 ret = copy_page_to_iter(page, offset, nr, to);
3453 folio_put(folio);
3454 } else if (user_backed_iter(to)) {
3455 /*
3456 * Copy to user tends to be so well optimized, but
3457 * clear_user() not so much, that it is noticeably
3458 * faster to copy the zero page instead of clearing.
3459 */
3460 ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to);
3461 } else {
3462 /*
3463 * But submitting the same page twice in a row to
3464 * splice() - or others? - can result in confusion:
3465 * so don't attempt that optimization on pipes etc.
3466 */
3467 ret = iov_iter_zero(nr, to);
3468 }
3469
3470 retval += ret;
3471 iocb->ki_pos += ret;
3472
3473 if (!iov_iter_count(to))
3474 break;
3475 if (ret < nr) {
3476 error = -EFAULT;
3477 break;
3478 }
3479 cond_resched();
3480 }
3481
3482 file_accessed(file);
3483 return retval ? retval : error;
3484 }
3485
shmem_file_write_iter(struct kiocb * iocb,struct iov_iter * from)3486 static ssize_t shmem_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3487 {
3488 struct file *file = iocb->ki_filp;
3489 struct inode *inode = file->f_mapping->host;
3490 ssize_t ret;
3491
3492 inode_lock(inode);
3493 ret = generic_write_checks(iocb, from);
3494 if (ret <= 0)
3495 goto unlock;
3496 ret = file_remove_privs(file);
3497 if (ret)
3498 goto unlock;
3499 ret = file_update_time(file);
3500 if (ret)
3501 goto unlock;
3502 ret = generic_perform_write(iocb, from);
3503 unlock:
3504 inode_unlock(inode);
3505 return ret;
3506 }
3507
zero_pipe_buf_get(struct pipe_inode_info * pipe,struct pipe_buffer * buf)3508 static bool zero_pipe_buf_get(struct pipe_inode_info *pipe,
3509 struct pipe_buffer *buf)
3510 {
3511 return true;
3512 }
3513
zero_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)3514 static void zero_pipe_buf_release(struct pipe_inode_info *pipe,
3515 struct pipe_buffer *buf)
3516 {
3517 }
3518
zero_pipe_buf_try_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)3519 static bool zero_pipe_buf_try_steal(struct pipe_inode_info *pipe,
3520 struct pipe_buffer *buf)
3521 {
3522 return false;
3523 }
3524
3525 static const struct pipe_buf_operations zero_pipe_buf_ops = {
3526 .release = zero_pipe_buf_release,
3527 .try_steal = zero_pipe_buf_try_steal,
3528 .get = zero_pipe_buf_get,
3529 };
3530
splice_zeropage_into_pipe(struct pipe_inode_info * pipe,loff_t fpos,size_t size)3531 static size_t splice_zeropage_into_pipe(struct pipe_inode_info *pipe,
3532 loff_t fpos, size_t size)
3533 {
3534 size_t offset = fpos & ~PAGE_MASK;
3535
3536 size = min_t(size_t, size, PAGE_SIZE - offset);
3537
3538 if (!pipe_is_full(pipe)) {
3539 struct pipe_buffer *buf = pipe_head_buf(pipe);
3540
3541 *buf = (struct pipe_buffer) {
3542 .ops = &zero_pipe_buf_ops,
3543 .page = ZERO_PAGE(0),
3544 .offset = offset,
3545 .len = size,
3546 };
3547 pipe->head++;
3548 }
3549
3550 return size;
3551 }
3552
shmem_file_splice_read(struct file * in,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)3553 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
3554 struct pipe_inode_info *pipe,
3555 size_t len, unsigned int flags)
3556 {
3557 struct inode *inode = file_inode(in);
3558 struct address_space *mapping = inode->i_mapping;
3559 struct folio *folio = NULL;
3560 size_t total_spliced = 0, used, npages, n, part;
3561 loff_t isize;
3562 int error = 0;
3563
3564 /* Work out how much data we can actually add into the pipe */
3565 used = pipe_buf_usage(pipe);
3566 npages = max_t(ssize_t, pipe->max_usage - used, 0);
3567 len = min_t(size_t, len, npages * PAGE_SIZE);
3568
3569 do {
3570 bool fallback_page_splice = false;
3571 struct page *page = NULL;
3572 pgoff_t index;
3573 size_t size;
3574
3575 if (*ppos >= i_size_read(inode))
3576 break;
3577
3578 index = *ppos >> PAGE_SHIFT;
3579 error = shmem_get_folio(inode, index, 0, &folio, SGP_READ);
3580 if (error) {
3581 if (error == -EINVAL)
3582 error = 0;
3583 break;
3584 }
3585 if (folio) {
3586 folio_unlock(folio);
3587
3588 page = folio_file_page(folio, index);
3589 if (PageHWPoison(page)) {
3590 error = -EIO;
3591 break;
3592 }
3593
3594 if (folio_test_large(folio) &&
3595 folio_test_has_hwpoisoned(folio))
3596 fallback_page_splice = true;
3597 }
3598
3599 /*
3600 * i_size must be checked after we know the pages are Uptodate.
3601 *
3602 * Checking i_size after the check allows us to calculate
3603 * the correct value for "nr", which means the zero-filled
3604 * part of the page is not copied back to userspace (unless
3605 * another truncate extends the file - this is desired though).
3606 */
3607 isize = i_size_read(inode);
3608 if (unlikely(*ppos >= isize))
3609 break;
3610 /*
3611 * Fallback to PAGE_SIZE splice if the large folio has hwpoisoned
3612 * pages.
3613 */
3614 size = len;
3615 if (unlikely(fallback_page_splice)) {
3616 size_t offset = *ppos & ~PAGE_MASK;
3617
3618 size = umin(size, PAGE_SIZE - offset);
3619 }
3620 part = min_t(loff_t, isize - *ppos, size);
3621
3622 if (folio) {
3623 /*
3624 * If users can be writing to this page using arbitrary
3625 * virtual addresses, take care about potential aliasing
3626 * before reading the page on the kernel side.
3627 */
3628 if (mapping_writably_mapped(mapping)) {
3629 if (likely(!fallback_page_splice))
3630 flush_dcache_folio(folio);
3631 else
3632 flush_dcache_page(page);
3633 }
3634 folio_mark_accessed(folio);
3635 /*
3636 * Ok, we have the page, and it's up-to-date, so we can
3637 * now splice it into the pipe.
3638 */
3639 n = splice_folio_into_pipe(pipe, folio, *ppos, part);
3640 folio_put(folio);
3641 folio = NULL;
3642 } else {
3643 n = splice_zeropage_into_pipe(pipe, *ppos, part);
3644 }
3645
3646 if (!n)
3647 break;
3648 len -= n;
3649 total_spliced += n;
3650 *ppos += n;
3651 in->f_ra.prev_pos = *ppos;
3652 if (pipe_is_full(pipe))
3653 break;
3654
3655 cond_resched();
3656 } while (len);
3657
3658 if (folio)
3659 folio_put(folio);
3660
3661 file_accessed(in);
3662 return total_spliced ? total_spliced : error;
3663 }
3664
shmem_file_llseek(struct file * file,loff_t offset,int whence)3665 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
3666 {
3667 struct address_space *mapping = file->f_mapping;
3668 struct inode *inode = mapping->host;
3669
3670 if (whence != SEEK_DATA && whence != SEEK_HOLE)
3671 return generic_file_llseek_size(file, offset, whence,
3672 MAX_LFS_FILESIZE, i_size_read(inode));
3673 if (offset < 0)
3674 return -ENXIO;
3675
3676 inode_lock(inode);
3677 /* We're holding i_rwsem so we can access i_size directly */
3678 offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
3679 if (offset >= 0)
3680 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
3681 inode_unlock(inode);
3682 return offset;
3683 }
3684
shmem_fallocate(struct file * file,int mode,loff_t offset,loff_t len)3685 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
3686 loff_t len)
3687 {
3688 struct inode *inode = file_inode(file);
3689 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3690 struct shmem_inode_info *info = SHMEM_I(inode);
3691 struct shmem_falloc shmem_falloc;
3692 pgoff_t start, index, end, undo_fallocend;
3693 int error;
3694
3695 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
3696 return -EOPNOTSUPP;
3697
3698 inode_lock(inode);
3699
3700 if (info->flags & SHMEM_F_MAPPING_FROZEN) {
3701 error = -EPERM;
3702 goto out;
3703 }
3704
3705 if (mode & FALLOC_FL_PUNCH_HOLE) {
3706 struct address_space *mapping = file->f_mapping;
3707 loff_t unmap_start = round_up(offset, PAGE_SIZE);
3708 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
3709 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
3710
3711 /* protected by i_rwsem */
3712 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
3713 error = -EPERM;
3714 goto out;
3715 }
3716
3717 shmem_falloc.waitq = &shmem_falloc_waitq;
3718 shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
3719 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
3720 spin_lock(&inode->i_lock);
3721 inode->i_private = &shmem_falloc;
3722 spin_unlock(&inode->i_lock);
3723
3724 if ((u64)unmap_end > (u64)unmap_start)
3725 unmap_mapping_range(mapping, unmap_start,
3726 1 + unmap_end - unmap_start, 0);
3727 shmem_truncate_range(inode, offset, offset + len - 1);
3728 /* No need to unmap again: hole-punching leaves COWed pages */
3729
3730 spin_lock(&inode->i_lock);
3731 inode->i_private = NULL;
3732 wake_up_all(&shmem_falloc_waitq);
3733 WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
3734 spin_unlock(&inode->i_lock);
3735 error = 0;
3736 goto out;
3737 }
3738
3739 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
3740 error = inode_newsize_ok(inode, offset + len);
3741 if (error)
3742 goto out;
3743
3744 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
3745 error = -EPERM;
3746 goto out;
3747 }
3748
3749 start = offset >> PAGE_SHIFT;
3750 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
3751 /* Try to avoid a swapstorm if len is impossible to satisfy */
3752 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
3753 error = -ENOSPC;
3754 goto out;
3755 }
3756
3757 shmem_falloc.waitq = NULL;
3758 shmem_falloc.start = start;
3759 shmem_falloc.next = start;
3760 shmem_falloc.nr_falloced = 0;
3761 shmem_falloc.nr_unswapped = 0;
3762 spin_lock(&inode->i_lock);
3763 inode->i_private = &shmem_falloc;
3764 spin_unlock(&inode->i_lock);
3765
3766 /*
3767 * info->fallocend is only relevant when huge pages might be
3768 * involved: to prevent split_huge_page() freeing fallocated
3769 * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size.
3770 */
3771 undo_fallocend = info->fallocend;
3772 if (info->fallocend < end)
3773 info->fallocend = end;
3774
3775 for (index = start; index < end; ) {
3776 struct folio *folio;
3777
3778 /*
3779 * Check for fatal signal so that we abort early in OOM
3780 * situations. We don't want to abort in case of non-fatal
3781 * signals as large fallocate can take noticeable time and
3782 * e.g. periodic timers may result in fallocate constantly
3783 * restarting.
3784 */
3785 if (fatal_signal_pending(current))
3786 error = -EINTR;
3787 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
3788 error = -ENOMEM;
3789 else
3790 error = shmem_get_folio(inode, index, offset + len,
3791 &folio, SGP_FALLOC);
3792 if (error) {
3793 info->fallocend = undo_fallocend;
3794 /* Remove the !uptodate folios we added */
3795 if (index > start) {
3796 shmem_undo_range(inode,
3797 (loff_t)start << PAGE_SHIFT,
3798 ((loff_t)index << PAGE_SHIFT) - 1, true);
3799 }
3800 goto undone;
3801 }
3802
3803 /*
3804 * Here is a more important optimization than it appears:
3805 * a second SGP_FALLOC on the same large folio will clear it,
3806 * making it uptodate and un-undoable if we fail later.
3807 */
3808 index = folio_next_index(folio);
3809 /* Beware 32-bit wraparound */
3810 if (!index)
3811 index--;
3812
3813 /*
3814 * Inform shmem_writeout() how far we have reached.
3815 * No need for lock or barrier: we have the page lock.
3816 */
3817 if (!folio_test_uptodate(folio))
3818 shmem_falloc.nr_falloced += index - shmem_falloc.next;
3819 shmem_falloc.next = index;
3820
3821 /*
3822 * If !uptodate, leave it that way so that freeable folios
3823 * can be recognized if we need to rollback on error later.
3824 * But mark it dirty so that memory pressure will swap rather
3825 * than free the folios we are allocating (and SGP_CACHE folios
3826 * might still be clean: we now need to mark those dirty too).
3827 */
3828 folio_mark_dirty(folio);
3829 folio_unlock(folio);
3830 folio_put(folio);
3831 cond_resched();
3832 }
3833
3834 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
3835 i_size_write(inode, offset + len);
3836 undone:
3837 spin_lock(&inode->i_lock);
3838 inode->i_private = NULL;
3839 spin_unlock(&inode->i_lock);
3840 out:
3841 if (!error)
3842 file_modified(file);
3843 inode_unlock(inode);
3844 return error;
3845 }
3846
shmem_statfs(struct dentry * dentry,struct kstatfs * buf)3847 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
3848 {
3849 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
3850
3851 buf->f_type = TMPFS_MAGIC;
3852 buf->f_bsize = PAGE_SIZE;
3853 buf->f_namelen = NAME_MAX;
3854 if (sbinfo->max_blocks) {
3855 buf->f_blocks = sbinfo->max_blocks;
3856 buf->f_bavail =
3857 buf->f_bfree = sbinfo->max_blocks -
3858 percpu_counter_sum(&sbinfo->used_blocks);
3859 }
3860 if (sbinfo->max_inodes) {
3861 buf->f_files = sbinfo->max_inodes;
3862 buf->f_ffree = sbinfo->free_ispace / BOGO_INODE_SIZE;
3863 }
3864 /* else leave those fields 0 like simple_statfs */
3865
3866 buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
3867
3868 return 0;
3869 }
3870
3871 /*
3872 * File creation. Allocate an inode, and we're done..
3873 */
3874 static int
shmem_mknod(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode,dev_t dev)3875 shmem_mknod(struct mnt_idmap *idmap, struct inode *dir,
3876 struct dentry *dentry, umode_t mode, dev_t dev)
3877 {
3878 struct inode *inode;
3879 int error;
3880
3881 if (!generic_ci_validate_strict_name(dir, &dentry->d_name))
3882 return -EINVAL;
3883
3884 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, dev,
3885 mk_vma_flags(VMA_NORESERVE_BIT));
3886 if (IS_ERR(inode))
3887 return PTR_ERR(inode);
3888
3889 error = simple_acl_create(dir, inode);
3890 if (error)
3891 goto out_iput;
3892 error = security_inode_init_security(inode, dir, &dentry->d_name,
3893 shmem_initxattrs, NULL);
3894 if (error && error != -EOPNOTSUPP)
3895 goto out_iput;
3896
3897 error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3898 if (error)
3899 goto out_iput;
3900
3901 dir->i_size += BOGO_DIRENT_SIZE;
3902 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
3903 inode_inc_iversion(dir);
3904
3905 d_make_persistent(dentry, inode);
3906 return error;
3907
3908 out_iput:
3909 iput(inode);
3910 return error;
3911 }
3912
3913 static int
shmem_tmpfile(struct mnt_idmap * idmap,struct inode * dir,struct file * file,umode_t mode)3914 shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
3915 struct file *file, umode_t mode)
3916 {
3917 struct inode *inode;
3918 int error;
3919
3920 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0,
3921 mk_vma_flags(VMA_NORESERVE_BIT));
3922 if (IS_ERR(inode)) {
3923 error = PTR_ERR(inode);
3924 goto err_out;
3925 }
3926 error = security_inode_init_security(inode, dir, NULL,
3927 shmem_initxattrs, NULL);
3928 if (error && error != -EOPNOTSUPP)
3929 goto out_iput;
3930 error = simple_acl_create(dir, inode);
3931 if (error)
3932 goto out_iput;
3933 d_tmpfile(file, inode);
3934
3935 err_out:
3936 return finish_open_simple(file, error);
3937 out_iput:
3938 iput(inode);
3939 return error;
3940 }
3941
shmem_mkdir(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode)3942 static struct dentry *shmem_mkdir(struct mnt_idmap *idmap, struct inode *dir,
3943 struct dentry *dentry, umode_t mode)
3944 {
3945 int error;
3946
3947 error = shmem_mknod(idmap, dir, dentry, mode | S_IFDIR, 0);
3948 if (error)
3949 return ERR_PTR(error);
3950 inc_nlink(dir);
3951 return NULL;
3952 }
3953
shmem_create(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode,bool excl)3954 static int shmem_create(struct mnt_idmap *idmap, struct inode *dir,
3955 struct dentry *dentry, umode_t mode, bool excl)
3956 {
3957 return shmem_mknod(idmap, dir, dentry, mode | S_IFREG, 0);
3958 }
3959
3960 /*
3961 * Link a file..
3962 */
shmem_link(struct dentry * old_dentry,struct inode * dir,struct dentry * dentry)3963 static int shmem_link(struct dentry *old_dentry, struct inode *dir,
3964 struct dentry *dentry)
3965 {
3966 struct inode *inode = d_inode(old_dentry);
3967 int ret;
3968
3969 /*
3970 * No ordinary (disk based) filesystem counts links as inodes;
3971 * but each new link needs a new dentry, pinning lowmem, and
3972 * tmpfs dentries cannot be pruned until they are unlinked.
3973 * But if an O_TMPFILE file is linked into the tmpfs, the
3974 * first link must skip that, to get the accounting right.
3975 */
3976 if (inode->i_nlink) {
3977 ret = shmem_reserve_inode(inode->i_sb, NULL);
3978 if (ret)
3979 return ret;
3980 }
3981
3982 ret = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3983 if (ret) {
3984 if (inode->i_nlink)
3985 shmem_free_inode(inode->i_sb, 0);
3986 return ret;
3987 }
3988
3989 dir->i_size += BOGO_DIRENT_SIZE;
3990 inode_inc_iversion(dir);
3991 return simple_link(old_dentry, dir, dentry);
3992 }
3993
shmem_unlink(struct inode * dir,struct dentry * dentry)3994 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
3995 {
3996 struct inode *inode = d_inode(dentry);
3997
3998 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
3999 shmem_free_inode(inode->i_sb, 0);
4000
4001 simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
4002
4003 dir->i_size -= BOGO_DIRENT_SIZE;
4004 inode_inc_iversion(dir);
4005 simple_unlink(dir, dentry);
4006
4007 /*
4008 * For now, VFS can't deal with case-insensitive negative dentries, so
4009 * we invalidate them
4010 */
4011 if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
4012 d_invalidate(dentry);
4013
4014 return 0;
4015 }
4016
shmem_rmdir(struct inode * dir,struct dentry * dentry)4017 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
4018 {
4019 if (!simple_empty(dentry))
4020 return -ENOTEMPTY;
4021
4022 drop_nlink(d_inode(dentry));
4023 drop_nlink(dir);
4024 return shmem_unlink(dir, dentry);
4025 }
4026
shmem_whiteout(struct mnt_idmap * idmap,struct inode * old_dir,struct dentry * old_dentry)4027 static int shmem_whiteout(struct mnt_idmap *idmap,
4028 struct inode *old_dir, struct dentry *old_dentry)
4029 {
4030 struct dentry *whiteout;
4031 int error;
4032
4033 whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
4034 if (!whiteout)
4035 return -ENOMEM;
4036 error = shmem_mknod(idmap, old_dir, whiteout,
4037 S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
4038 dput(whiteout);
4039 return error;
4040 }
4041
4042 /*
4043 * The VFS layer already does all the dentry stuff for rename,
4044 * we just have to decrement the usage count for the target if
4045 * it exists so that the VFS layer correctly free's it when it
4046 * gets overwritten.
4047 */
shmem_rename2(struct mnt_idmap * idmap,struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry,unsigned int flags)4048 static int shmem_rename2(struct mnt_idmap *idmap,
4049 struct inode *old_dir, struct dentry *old_dentry,
4050 struct inode *new_dir, struct dentry *new_dentry,
4051 unsigned int flags)
4052 {
4053 struct inode *inode = d_inode(old_dentry);
4054 int they_are_dirs = S_ISDIR(inode->i_mode);
4055 bool had_offset = false;
4056 int error;
4057
4058 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
4059 return -EINVAL;
4060
4061 if (flags & RENAME_EXCHANGE)
4062 return simple_offset_rename_exchange(old_dir, old_dentry,
4063 new_dir, new_dentry);
4064
4065 if (!simple_empty(new_dentry))
4066 return -ENOTEMPTY;
4067
4068 error = simple_offset_add(shmem_get_offset_ctx(new_dir), new_dentry);
4069 if (error == -EBUSY)
4070 had_offset = true;
4071 else if (unlikely(error))
4072 return error;
4073
4074 if (flags & RENAME_WHITEOUT) {
4075 error = shmem_whiteout(idmap, old_dir, old_dentry);
4076 if (error) {
4077 if (!had_offset)
4078 simple_offset_remove(shmem_get_offset_ctx(new_dir),
4079 new_dentry);
4080 return error;
4081 }
4082 }
4083
4084 simple_offset_rename(old_dir, old_dentry, new_dir, new_dentry);
4085 if (d_really_is_positive(new_dentry)) {
4086 (void) shmem_unlink(new_dir, new_dentry);
4087 if (they_are_dirs) {
4088 drop_nlink(d_inode(new_dentry));
4089 drop_nlink(old_dir);
4090 }
4091 } else if (they_are_dirs) {
4092 drop_nlink(old_dir);
4093 inc_nlink(new_dir);
4094 }
4095
4096 old_dir->i_size -= BOGO_DIRENT_SIZE;
4097 new_dir->i_size += BOGO_DIRENT_SIZE;
4098 simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
4099 inode_inc_iversion(old_dir);
4100 inode_inc_iversion(new_dir);
4101 return 0;
4102 }
4103
shmem_symlink(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,const char * symname)4104 static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir,
4105 struct dentry *dentry, const char *symname)
4106 {
4107 int error;
4108 int len;
4109 struct inode *inode;
4110 struct folio *folio;
4111 char *link;
4112
4113 len = strlen(symname) + 1;
4114 if (len > PAGE_SIZE)
4115 return -ENAMETOOLONG;
4116
4117 inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0,
4118 mk_vma_flags(VMA_NORESERVE_BIT));
4119 if (IS_ERR(inode))
4120 return PTR_ERR(inode);
4121
4122 error = security_inode_init_security(inode, dir, &dentry->d_name,
4123 shmem_initxattrs, NULL);
4124 if (error && error != -EOPNOTSUPP)
4125 goto out_iput;
4126
4127 error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
4128 if (error)
4129 goto out_iput;
4130
4131 inode->i_size = len-1;
4132 if (len <= SHORT_SYMLINK_LEN) {
4133 link = kmemdup(symname, len, GFP_KERNEL);
4134 if (!link) {
4135 error = -ENOMEM;
4136 goto out_remove_offset;
4137 }
4138 inode->i_op = &shmem_short_symlink_operations;
4139 inode_set_cached_link(inode, link, len - 1);
4140 } else {
4141 inode_nohighmem(inode);
4142 inode->i_mapping->a_ops = &shmem_aops;
4143 error = shmem_get_folio(inode, 0, 0, &folio, SGP_WRITE);
4144 if (error)
4145 goto out_remove_offset;
4146 inode->i_op = &shmem_symlink_inode_operations;
4147 memcpy(folio_address(folio), symname, len);
4148 folio_mark_uptodate(folio);
4149 folio_mark_dirty(folio);
4150 folio_unlock(folio);
4151 folio_put(folio);
4152 }
4153 dir->i_size += BOGO_DIRENT_SIZE;
4154 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
4155 inode_inc_iversion(dir);
4156 d_make_persistent(dentry, inode);
4157 return 0;
4158
4159 out_remove_offset:
4160 simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
4161 out_iput:
4162 iput(inode);
4163 return error;
4164 }
4165
shmem_put_link(void * arg)4166 static void shmem_put_link(void *arg)
4167 {
4168 folio_mark_accessed(arg);
4169 folio_put(arg);
4170 }
4171
shmem_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)4172 static const char *shmem_get_link(struct dentry *dentry, struct inode *inode,
4173 struct delayed_call *done)
4174 {
4175 struct folio *folio = NULL;
4176 int error;
4177
4178 if (!dentry) {
4179 folio = filemap_get_folio(inode->i_mapping, 0);
4180 if (IS_ERR(folio))
4181 return ERR_PTR(-ECHILD);
4182 if (PageHWPoison(folio_page(folio, 0)) ||
4183 !folio_test_uptodate(folio)) {
4184 folio_put(folio);
4185 return ERR_PTR(-ECHILD);
4186 }
4187 } else {
4188 error = shmem_get_folio(inode, 0, 0, &folio, SGP_READ);
4189 if (error)
4190 return ERR_PTR(error);
4191 if (!folio)
4192 return ERR_PTR(-ECHILD);
4193 if (PageHWPoison(folio_page(folio, 0))) {
4194 folio_unlock(folio);
4195 folio_put(folio);
4196 return ERR_PTR(-ECHILD);
4197 }
4198 folio_unlock(folio);
4199 }
4200 set_delayed_call(done, shmem_put_link, folio);
4201 return folio_address(folio);
4202 }
4203
4204 #ifdef CONFIG_TMPFS_XATTR
4205
shmem_fileattr_get(struct dentry * dentry,struct file_kattr * fa)4206 static int shmem_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
4207 {
4208 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
4209
4210 fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE);
4211
4212 return 0;
4213 }
4214
shmem_fileattr_set(struct mnt_idmap * idmap,struct dentry * dentry,struct file_kattr * fa)4215 static int shmem_fileattr_set(struct mnt_idmap *idmap,
4216 struct dentry *dentry, struct file_kattr *fa)
4217 {
4218 struct inode *inode = d_inode(dentry);
4219 struct shmem_inode_info *info = SHMEM_I(inode);
4220 int ret, flags;
4221
4222 if (fileattr_has_fsx(fa))
4223 return -EOPNOTSUPP;
4224 if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE)
4225 return -EOPNOTSUPP;
4226
4227 flags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) |
4228 (fa->flags & SHMEM_FL_USER_MODIFIABLE);
4229
4230 ret = shmem_set_inode_flags(inode, flags, dentry);
4231
4232 if (ret)
4233 return ret;
4234
4235 info->fsflags = flags;
4236
4237 inode_set_ctime_current(inode);
4238 inode_inc_iversion(inode);
4239 return 0;
4240 }
4241
4242 /*
4243 * Superblocks without xattr inode operations may get some security.* xattr
4244 * support from the LSM "for free". As soon as we have any other xattrs
4245 * like ACLs, we also need to implement the security.* handlers at
4246 * filesystem level, though.
4247 */
4248
4249 /*
4250 * Callback for security_inode_init_security() for acquiring xattrs.
4251 */
shmem_initxattrs(struct inode * inode,const struct xattr * xattr_array,void * fs_info)4252 static int shmem_initxattrs(struct inode *inode,
4253 const struct xattr *xattr_array, void *fs_info)
4254 {
4255 struct shmem_inode_info *info = SHMEM_I(inode);
4256 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
4257 const struct xattr *xattr;
4258 struct simple_xattr *new_xattr;
4259 size_t ispace = 0;
4260 size_t len;
4261
4262 if (sbinfo->max_inodes) {
4263 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
4264 ispace += simple_xattr_space(xattr->name,
4265 xattr->value_len + XATTR_SECURITY_PREFIX_LEN);
4266 }
4267 if (ispace) {
4268 raw_spin_lock(&sbinfo->stat_lock);
4269 if (sbinfo->free_ispace < ispace)
4270 ispace = 0;
4271 else
4272 sbinfo->free_ispace -= ispace;
4273 raw_spin_unlock(&sbinfo->stat_lock);
4274 if (!ispace)
4275 return -ENOSPC;
4276 }
4277 }
4278
4279 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
4280 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
4281 if (!new_xattr)
4282 break;
4283
4284 len = strlen(xattr->name) + 1;
4285 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
4286 GFP_KERNEL_ACCOUNT);
4287 if (!new_xattr->name) {
4288 kvfree(new_xattr);
4289 break;
4290 }
4291
4292 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
4293 XATTR_SECURITY_PREFIX_LEN);
4294 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
4295 xattr->name, len);
4296
4297 simple_xattr_add(&info->xattrs, new_xattr);
4298 }
4299
4300 if (xattr->name != NULL) {
4301 if (ispace) {
4302 raw_spin_lock(&sbinfo->stat_lock);
4303 sbinfo->free_ispace += ispace;
4304 raw_spin_unlock(&sbinfo->stat_lock);
4305 }
4306 simple_xattrs_free(&info->xattrs, NULL);
4307 return -ENOMEM;
4308 }
4309
4310 return 0;
4311 }
4312
shmem_xattr_handler_get(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * name,void * buffer,size_t size)4313 static int shmem_xattr_handler_get(const struct xattr_handler *handler,
4314 struct dentry *unused, struct inode *inode,
4315 const char *name, void *buffer, size_t size)
4316 {
4317 struct shmem_inode_info *info = SHMEM_I(inode);
4318
4319 name = xattr_full_name(handler, name);
4320 return simple_xattr_get(&info->xattrs, name, buffer, size);
4321 }
4322
shmem_xattr_handler_set(const struct xattr_handler * handler,struct mnt_idmap * idmap,struct dentry * unused,struct inode * inode,const char * name,const void * value,size_t size,int flags)4323 static int shmem_xattr_handler_set(const struct xattr_handler *handler,
4324 struct mnt_idmap *idmap,
4325 struct dentry *unused, struct inode *inode,
4326 const char *name, const void *value,
4327 size_t size, int flags)
4328 {
4329 struct shmem_inode_info *info = SHMEM_I(inode);
4330 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
4331 struct simple_xattr *old_xattr;
4332 size_t ispace = 0;
4333
4334 name = xattr_full_name(handler, name);
4335 if (value && sbinfo->max_inodes) {
4336 ispace = simple_xattr_space(name, size);
4337 raw_spin_lock(&sbinfo->stat_lock);
4338 if (sbinfo->free_ispace < ispace)
4339 ispace = 0;
4340 else
4341 sbinfo->free_ispace -= ispace;
4342 raw_spin_unlock(&sbinfo->stat_lock);
4343 if (!ispace)
4344 return -ENOSPC;
4345 }
4346
4347 old_xattr = simple_xattr_set(&info->xattrs, name, value, size, flags);
4348 if (!IS_ERR(old_xattr)) {
4349 ispace = 0;
4350 if (old_xattr && sbinfo->max_inodes)
4351 ispace = simple_xattr_space(old_xattr->name,
4352 old_xattr->size);
4353 simple_xattr_free(old_xattr);
4354 old_xattr = NULL;
4355 inode_set_ctime_current(inode);
4356 inode_inc_iversion(inode);
4357 }
4358 if (ispace) {
4359 raw_spin_lock(&sbinfo->stat_lock);
4360 sbinfo->free_ispace += ispace;
4361 raw_spin_unlock(&sbinfo->stat_lock);
4362 }
4363 return PTR_ERR(old_xattr);
4364 }
4365
4366 static const struct xattr_handler shmem_security_xattr_handler = {
4367 .prefix = XATTR_SECURITY_PREFIX,
4368 .get = shmem_xattr_handler_get,
4369 .set = shmem_xattr_handler_set,
4370 };
4371
4372 static const struct xattr_handler shmem_trusted_xattr_handler = {
4373 .prefix = XATTR_TRUSTED_PREFIX,
4374 .get = shmem_xattr_handler_get,
4375 .set = shmem_xattr_handler_set,
4376 };
4377
4378 static const struct xattr_handler shmem_user_xattr_handler = {
4379 .prefix = XATTR_USER_PREFIX,
4380 .get = shmem_xattr_handler_get,
4381 .set = shmem_xattr_handler_set,
4382 };
4383
4384 static const struct xattr_handler * const shmem_xattr_handlers[] = {
4385 &shmem_security_xattr_handler,
4386 &shmem_trusted_xattr_handler,
4387 &shmem_user_xattr_handler,
4388 NULL
4389 };
4390
shmem_listxattr(struct dentry * dentry,char * buffer,size_t size)4391 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
4392 {
4393 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
4394 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
4395 }
4396 #endif /* CONFIG_TMPFS_XATTR */
4397
4398 static const struct inode_operations shmem_short_symlink_operations = {
4399 .getattr = shmem_getattr,
4400 .setattr = shmem_setattr,
4401 .get_link = simple_get_link,
4402 #ifdef CONFIG_TMPFS_XATTR
4403 .listxattr = shmem_listxattr,
4404 #endif
4405 };
4406
4407 static const struct inode_operations shmem_symlink_inode_operations = {
4408 .getattr = shmem_getattr,
4409 .setattr = shmem_setattr,
4410 .get_link = shmem_get_link,
4411 #ifdef CONFIG_TMPFS_XATTR
4412 .listxattr = shmem_listxattr,
4413 #endif
4414 };
4415
shmem_get_parent(struct dentry * child)4416 static struct dentry *shmem_get_parent(struct dentry *child)
4417 {
4418 return ERR_PTR(-ESTALE);
4419 }
4420
shmem_match(struct inode * ino,void * vfh)4421 static int shmem_match(struct inode *ino, void *vfh)
4422 {
4423 __u32 *fh = vfh;
4424 __u64 inum = fh[2];
4425 inum = (inum << 32) | fh[1];
4426 return ino->i_ino == inum && fh[0] == ino->i_generation;
4427 }
4428
4429 /* Find any alias of inode, but prefer a hashed alias */
shmem_find_alias(struct inode * inode)4430 static struct dentry *shmem_find_alias(struct inode *inode)
4431 {
4432 struct dentry *alias = d_find_alias(inode);
4433
4434 return alias ?: d_find_any_alias(inode);
4435 }
4436
shmem_fh_to_dentry(struct super_block * sb,struct fid * fid,int fh_len,int fh_type)4437 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
4438 struct fid *fid, int fh_len, int fh_type)
4439 {
4440 struct inode *inode;
4441 struct dentry *dentry = NULL;
4442 u64 inum;
4443
4444 if (fh_len < 3)
4445 return NULL;
4446
4447 inum = fid->raw[2];
4448 inum = (inum << 32) | fid->raw[1];
4449
4450 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
4451 shmem_match, fid->raw);
4452 if (inode) {
4453 dentry = shmem_find_alias(inode);
4454 iput(inode);
4455 }
4456
4457 return dentry;
4458 }
4459
shmem_encode_fh(struct inode * inode,__u32 * fh,int * len,struct inode * parent)4460 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
4461 struct inode *parent)
4462 {
4463 if (*len < 3) {
4464 *len = 3;
4465 return FILEID_INVALID;
4466 }
4467
4468 if (inode_unhashed(inode)) {
4469 /* Unfortunately insert_inode_hash is not idempotent,
4470 * so as we hash inodes here rather than at creation
4471 * time, we need a lock to ensure we only try
4472 * to do it once
4473 */
4474 static DEFINE_SPINLOCK(lock);
4475 spin_lock(&lock);
4476 if (inode_unhashed(inode))
4477 __insert_inode_hash(inode,
4478 inode->i_ino + inode->i_generation);
4479 spin_unlock(&lock);
4480 }
4481
4482 fh[0] = inode->i_generation;
4483 fh[1] = inode->i_ino;
4484 fh[2] = ((__u64)inode->i_ino) >> 32;
4485
4486 *len = 3;
4487 return 1;
4488 }
4489
4490 static const struct export_operations shmem_export_ops = {
4491 .get_parent = shmem_get_parent,
4492 .encode_fh = shmem_encode_fh,
4493 .fh_to_dentry = shmem_fh_to_dentry,
4494 };
4495
4496 enum shmem_param {
4497 Opt_gid,
4498 Opt_huge,
4499 Opt_mode,
4500 Opt_mpol,
4501 Opt_nr_blocks,
4502 Opt_nr_inodes,
4503 Opt_size,
4504 Opt_uid,
4505 Opt_inode32,
4506 Opt_inode64,
4507 Opt_noswap,
4508 Opt_quota,
4509 Opt_usrquota,
4510 Opt_grpquota,
4511 Opt_usrquota_block_hardlimit,
4512 Opt_usrquota_inode_hardlimit,
4513 Opt_grpquota_block_hardlimit,
4514 Opt_grpquota_inode_hardlimit,
4515 Opt_casefold_version,
4516 Opt_casefold,
4517 Opt_strict_encoding,
4518 };
4519
4520 static const struct constant_table shmem_param_enums_huge[] = {
4521 {"never", SHMEM_HUGE_NEVER },
4522 {"always", SHMEM_HUGE_ALWAYS },
4523 {"within_size", SHMEM_HUGE_WITHIN_SIZE },
4524 {"advise", SHMEM_HUGE_ADVISE },
4525 {}
4526 };
4527
4528 const struct fs_parameter_spec shmem_fs_parameters[] = {
4529 fsparam_gid ("gid", Opt_gid),
4530 fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge),
4531 fsparam_u32oct("mode", Opt_mode),
4532 fsparam_string("mpol", Opt_mpol),
4533 fsparam_string("nr_blocks", Opt_nr_blocks),
4534 fsparam_string("nr_inodes", Opt_nr_inodes),
4535 fsparam_string("size", Opt_size),
4536 fsparam_uid ("uid", Opt_uid),
4537 fsparam_flag ("inode32", Opt_inode32),
4538 fsparam_flag ("inode64", Opt_inode64),
4539 fsparam_flag ("noswap", Opt_noswap),
4540 #ifdef CONFIG_TMPFS_QUOTA
4541 fsparam_flag ("quota", Opt_quota),
4542 fsparam_flag ("usrquota", Opt_usrquota),
4543 fsparam_flag ("grpquota", Opt_grpquota),
4544 fsparam_string("usrquota_block_hardlimit", Opt_usrquota_block_hardlimit),
4545 fsparam_string("usrquota_inode_hardlimit", Opt_usrquota_inode_hardlimit),
4546 fsparam_string("grpquota_block_hardlimit", Opt_grpquota_block_hardlimit),
4547 fsparam_string("grpquota_inode_hardlimit", Opt_grpquota_inode_hardlimit),
4548 #endif
4549 fsparam_string("casefold", Opt_casefold_version),
4550 fsparam_flag ("casefold", Opt_casefold),
4551 fsparam_flag ("strict_encoding", Opt_strict_encoding),
4552 {}
4553 };
4554
4555 #if IS_ENABLED(CONFIG_UNICODE)
shmem_parse_opt_casefold(struct fs_context * fc,struct fs_parameter * param,bool latest_version)4556 static int shmem_parse_opt_casefold(struct fs_context *fc, struct fs_parameter *param,
4557 bool latest_version)
4558 {
4559 struct shmem_options *ctx = fc->fs_private;
4560 int version = UTF8_LATEST;
4561 struct unicode_map *encoding;
4562 char *version_str = param->string + 5;
4563
4564 if (!latest_version) {
4565 if (strncmp(param->string, "utf8-", 5))
4566 return invalfc(fc, "Only UTF-8 encodings are supported "
4567 "in the format: utf8-<version number>");
4568
4569 version = utf8_parse_version(version_str);
4570 if (version < 0)
4571 return invalfc(fc, "Invalid UTF-8 version: %s", version_str);
4572 }
4573
4574 encoding = utf8_load(version);
4575
4576 if (IS_ERR(encoding)) {
4577 return invalfc(fc, "Failed loading UTF-8 version: utf8-%u.%u.%u\n",
4578 unicode_major(version), unicode_minor(version),
4579 unicode_rev(version));
4580 }
4581
4582 pr_info("tmpfs: Using encoding : utf8-%u.%u.%u\n",
4583 unicode_major(version), unicode_minor(version), unicode_rev(version));
4584
4585 ctx->encoding = encoding;
4586
4587 return 0;
4588 }
4589 #else
shmem_parse_opt_casefold(struct fs_context * fc,struct fs_parameter * param,bool latest_version)4590 static int shmem_parse_opt_casefold(struct fs_context *fc, struct fs_parameter *param,
4591 bool latest_version)
4592 {
4593 return invalfc(fc, "tmpfs: Kernel not built with CONFIG_UNICODE\n");
4594 }
4595 #endif
4596
shmem_parse_one(struct fs_context * fc,struct fs_parameter * param)4597 static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
4598 {
4599 struct shmem_options *ctx = fc->fs_private;
4600 struct fs_parse_result result;
4601 unsigned long long size;
4602 char *rest;
4603 int opt;
4604 kuid_t kuid;
4605 kgid_t kgid;
4606
4607 opt = fs_parse(fc, shmem_fs_parameters, param, &result);
4608 if (opt < 0)
4609 return opt;
4610
4611 switch (opt) {
4612 case Opt_size:
4613 size = memparse(param->string, &rest);
4614 if (*rest == '%') {
4615 size <<= PAGE_SHIFT;
4616 size *= totalram_pages();
4617 do_div(size, 100);
4618 rest++;
4619 }
4620 if (*rest)
4621 goto bad_value;
4622 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
4623 ctx->seen |= SHMEM_SEEN_BLOCKS;
4624 break;
4625 case Opt_nr_blocks:
4626 ctx->blocks = memparse(param->string, &rest);
4627 if (*rest || ctx->blocks > LONG_MAX)
4628 goto bad_value;
4629 ctx->seen |= SHMEM_SEEN_BLOCKS;
4630 break;
4631 case Opt_nr_inodes:
4632 ctx->inodes = memparse(param->string, &rest);
4633 if (*rest || ctx->inodes > ULONG_MAX / BOGO_INODE_SIZE)
4634 goto bad_value;
4635 ctx->seen |= SHMEM_SEEN_INODES;
4636 break;
4637 case Opt_mode:
4638 ctx->mode = result.uint_32 & 07777;
4639 break;
4640 case Opt_uid:
4641 kuid = result.uid;
4642
4643 /*
4644 * The requested uid must be representable in the
4645 * filesystem's idmapping.
4646 */
4647 if (!kuid_has_mapping(fc->user_ns, kuid))
4648 goto bad_value;
4649
4650 ctx->uid = kuid;
4651 break;
4652 case Opt_gid:
4653 kgid = result.gid;
4654
4655 /*
4656 * The requested gid must be representable in the
4657 * filesystem's idmapping.
4658 */
4659 if (!kgid_has_mapping(fc->user_ns, kgid))
4660 goto bad_value;
4661
4662 ctx->gid = kgid;
4663 break;
4664 case Opt_huge:
4665 ctx->huge = result.uint_32;
4666 if (ctx->huge != SHMEM_HUGE_NEVER &&
4667 !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
4668 has_transparent_hugepage()))
4669 goto unsupported_parameter;
4670 ctx->seen |= SHMEM_SEEN_HUGE;
4671 break;
4672 case Opt_mpol:
4673 if (IS_ENABLED(CONFIG_NUMA)) {
4674 mpol_put(ctx->mpol);
4675 ctx->mpol = NULL;
4676 if (mpol_parse_str(param->string, &ctx->mpol))
4677 goto bad_value;
4678 break;
4679 }
4680 goto unsupported_parameter;
4681 case Opt_inode32:
4682 ctx->full_inums = false;
4683 ctx->seen |= SHMEM_SEEN_INUMS;
4684 break;
4685 case Opt_inode64:
4686 if (sizeof(ino_t) < 8) {
4687 return invalfc(fc,
4688 "Cannot use inode64 with <64bit inums in kernel\n");
4689 }
4690 ctx->full_inums = true;
4691 ctx->seen |= SHMEM_SEEN_INUMS;
4692 break;
4693 case Opt_noswap:
4694 if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) {
4695 return invalfc(fc,
4696 "Turning off swap in unprivileged tmpfs mounts unsupported");
4697 }
4698 ctx->noswap = true;
4699 break;
4700 case Opt_quota:
4701 if (fc->user_ns != &init_user_ns)
4702 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4703 ctx->seen |= SHMEM_SEEN_QUOTA;
4704 ctx->quota_types |= (QTYPE_MASK_USR | QTYPE_MASK_GRP);
4705 break;
4706 case Opt_usrquota:
4707 if (fc->user_ns != &init_user_ns)
4708 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4709 ctx->seen |= SHMEM_SEEN_QUOTA;
4710 ctx->quota_types |= QTYPE_MASK_USR;
4711 break;
4712 case Opt_grpquota:
4713 if (fc->user_ns != &init_user_ns)
4714 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4715 ctx->seen |= SHMEM_SEEN_QUOTA;
4716 ctx->quota_types |= QTYPE_MASK_GRP;
4717 break;
4718 case Opt_usrquota_block_hardlimit:
4719 size = memparse(param->string, &rest);
4720 if (*rest || !size)
4721 goto bad_value;
4722 if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
4723 return invalfc(fc,
4724 "User quota block hardlimit too large.");
4725 ctx->qlimits.usrquota_bhardlimit = size;
4726 break;
4727 case Opt_grpquota_block_hardlimit:
4728 size = memparse(param->string, &rest);
4729 if (*rest || !size)
4730 goto bad_value;
4731 if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
4732 return invalfc(fc,
4733 "Group quota block hardlimit too large.");
4734 ctx->qlimits.grpquota_bhardlimit = size;
4735 break;
4736 case Opt_usrquota_inode_hardlimit:
4737 size = memparse(param->string, &rest);
4738 if (*rest || !size)
4739 goto bad_value;
4740 if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
4741 return invalfc(fc,
4742 "User quota inode hardlimit too large.");
4743 ctx->qlimits.usrquota_ihardlimit = size;
4744 break;
4745 case Opt_grpquota_inode_hardlimit:
4746 size = memparse(param->string, &rest);
4747 if (*rest || !size)
4748 goto bad_value;
4749 if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
4750 return invalfc(fc,
4751 "Group quota inode hardlimit too large.");
4752 ctx->qlimits.grpquota_ihardlimit = size;
4753 break;
4754 case Opt_casefold_version:
4755 return shmem_parse_opt_casefold(fc, param, false);
4756 case Opt_casefold:
4757 return shmem_parse_opt_casefold(fc, param, true);
4758 case Opt_strict_encoding:
4759 #if IS_ENABLED(CONFIG_UNICODE)
4760 ctx->strict_encoding = true;
4761 break;
4762 #else
4763 return invalfc(fc, "tmpfs: Kernel not built with CONFIG_UNICODE\n");
4764 #endif
4765 }
4766 return 0;
4767
4768 unsupported_parameter:
4769 return invalfc(fc, "Unsupported parameter '%s'", param->key);
4770 bad_value:
4771 return invalfc(fc, "Bad value for '%s'", param->key);
4772 }
4773
shmem_next_opt(char ** s)4774 static char *shmem_next_opt(char **s)
4775 {
4776 char *sbegin = *s;
4777 char *p;
4778
4779 if (sbegin == NULL)
4780 return NULL;
4781
4782 /*
4783 * NUL-terminate this option: unfortunately,
4784 * mount options form a comma-separated list,
4785 * but mpol's nodelist may also contain commas.
4786 */
4787 for (;;) {
4788 p = strchr(*s, ',');
4789 if (p == NULL)
4790 break;
4791 *s = p + 1;
4792 if (!isdigit(*(p+1))) {
4793 *p = '\0';
4794 return sbegin;
4795 }
4796 }
4797
4798 *s = NULL;
4799 return sbegin;
4800 }
4801
shmem_parse_monolithic(struct fs_context * fc,void * data)4802 static int shmem_parse_monolithic(struct fs_context *fc, void *data)
4803 {
4804 return vfs_parse_monolithic_sep(fc, data, shmem_next_opt);
4805 }
4806
4807 /*
4808 * Reconfigure a shmem filesystem.
4809 */
shmem_reconfigure(struct fs_context * fc)4810 static int shmem_reconfigure(struct fs_context *fc)
4811 {
4812 struct shmem_options *ctx = fc->fs_private;
4813 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
4814 unsigned long used_isp;
4815 struct mempolicy *mpol = NULL;
4816 const char *err;
4817
4818 raw_spin_lock(&sbinfo->stat_lock);
4819 used_isp = sbinfo->max_inodes * BOGO_INODE_SIZE - sbinfo->free_ispace;
4820
4821 if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
4822 if (!sbinfo->max_blocks) {
4823 err = "Cannot retroactively limit size";
4824 goto out;
4825 }
4826 if (percpu_counter_compare(&sbinfo->used_blocks,
4827 ctx->blocks) > 0) {
4828 err = "Too small a size for current use";
4829 goto out;
4830 }
4831 }
4832 if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
4833 if (!sbinfo->max_inodes) {
4834 err = "Cannot retroactively limit inodes";
4835 goto out;
4836 }
4837 if (ctx->inodes * BOGO_INODE_SIZE < used_isp) {
4838 err = "Too few inodes for current use";
4839 goto out;
4840 }
4841 }
4842
4843 if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
4844 sbinfo->next_ino > UINT_MAX) {
4845 err = "Current inum too high to switch to 32-bit inums";
4846 goto out;
4847 }
4848
4849 /*
4850 * "noswap" doesn't use fsparam_flag_no, i.e. there's no "swap"
4851 * counterpart for (re-)enabling swap.
4852 */
4853 if (ctx->noswap && !sbinfo->noswap) {
4854 err = "Cannot disable swap on remount";
4855 goto out;
4856 }
4857
4858 if (ctx->seen & SHMEM_SEEN_QUOTA &&
4859 !sb_any_quota_loaded(fc->root->d_sb)) {
4860 err = "Cannot enable quota on remount";
4861 goto out;
4862 }
4863
4864 #ifdef CONFIG_TMPFS_QUOTA
4865 #define CHANGED_LIMIT(name) \
4866 (ctx->qlimits.name## hardlimit && \
4867 (ctx->qlimits.name## hardlimit != sbinfo->qlimits.name## hardlimit))
4868
4869 if (CHANGED_LIMIT(usrquota_b) || CHANGED_LIMIT(usrquota_i) ||
4870 CHANGED_LIMIT(grpquota_b) || CHANGED_LIMIT(grpquota_i)) {
4871 err = "Cannot change global quota limit on remount";
4872 goto out;
4873 }
4874 #endif /* CONFIG_TMPFS_QUOTA */
4875
4876 if (ctx->seen & SHMEM_SEEN_HUGE)
4877 sbinfo->huge = ctx->huge;
4878 if (ctx->seen & SHMEM_SEEN_INUMS)
4879 sbinfo->full_inums = ctx->full_inums;
4880 if (ctx->seen & SHMEM_SEEN_BLOCKS)
4881 sbinfo->max_blocks = ctx->blocks;
4882 if (ctx->seen & SHMEM_SEEN_INODES) {
4883 sbinfo->max_inodes = ctx->inodes;
4884 sbinfo->free_ispace = ctx->inodes * BOGO_INODE_SIZE - used_isp;
4885 }
4886
4887 /*
4888 * Preserve previous mempolicy unless mpol remount option was specified.
4889 */
4890 if (ctx->mpol) {
4891 mpol = sbinfo->mpol;
4892 sbinfo->mpol = ctx->mpol; /* transfers initial ref */
4893 ctx->mpol = NULL;
4894 }
4895
4896 if (ctx->noswap)
4897 sbinfo->noswap = true;
4898
4899 raw_spin_unlock(&sbinfo->stat_lock);
4900 mpol_put(mpol);
4901 return 0;
4902 out:
4903 raw_spin_unlock(&sbinfo->stat_lock);
4904 return invalfc(fc, "%s", err);
4905 }
4906
shmem_show_options(struct seq_file * seq,struct dentry * root)4907 static int shmem_show_options(struct seq_file *seq, struct dentry *root)
4908 {
4909 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
4910 struct mempolicy *mpol;
4911
4912 if (sbinfo->max_blocks != shmem_default_max_blocks())
4913 seq_printf(seq, ",size=%luk", K(sbinfo->max_blocks));
4914 if (sbinfo->max_inodes != shmem_default_max_inodes())
4915 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
4916 if (sbinfo->mode != (0777 | S_ISVTX))
4917 seq_printf(seq, ",mode=%03ho", sbinfo->mode);
4918 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
4919 seq_printf(seq, ",uid=%u",
4920 from_kuid_munged(&init_user_ns, sbinfo->uid));
4921 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
4922 seq_printf(seq, ",gid=%u",
4923 from_kgid_munged(&init_user_ns, sbinfo->gid));
4924
4925 /*
4926 * Showing inode{64,32} might be useful even if it's the system default,
4927 * since then people don't have to resort to checking both here and
4928 * /proc/config.gz to confirm 64-bit inums were successfully applied
4929 * (which may not even exist if IKCONFIG_PROC isn't enabled).
4930 *
4931 * We hide it when inode64 isn't the default and we are using 32-bit
4932 * inodes, since that probably just means the feature isn't even under
4933 * consideration.
4934 *
4935 * As such:
4936 *
4937 * +-----------------+-----------------+
4938 * | TMPFS_INODE64=y | TMPFS_INODE64=n |
4939 * +------------------+-----------------+-----------------+
4940 * | full_inums=true | show | show |
4941 * | full_inums=false | show | hide |
4942 * +------------------+-----------------+-----------------+
4943 *
4944 */
4945 if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
4946 seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
4947 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4948 /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
4949 if (sbinfo->huge)
4950 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
4951 #endif
4952 mpol = shmem_get_sbmpol(sbinfo);
4953 shmem_show_mpol(seq, mpol);
4954 mpol_put(mpol);
4955 if (sbinfo->noswap)
4956 seq_printf(seq, ",noswap");
4957 #ifdef CONFIG_TMPFS_QUOTA
4958 if (sb_has_quota_active(root->d_sb, USRQUOTA))
4959 seq_printf(seq, ",usrquota");
4960 if (sb_has_quota_active(root->d_sb, GRPQUOTA))
4961 seq_printf(seq, ",grpquota");
4962 if (sbinfo->qlimits.usrquota_bhardlimit)
4963 seq_printf(seq, ",usrquota_block_hardlimit=%lld",
4964 sbinfo->qlimits.usrquota_bhardlimit);
4965 if (sbinfo->qlimits.grpquota_bhardlimit)
4966 seq_printf(seq, ",grpquota_block_hardlimit=%lld",
4967 sbinfo->qlimits.grpquota_bhardlimit);
4968 if (sbinfo->qlimits.usrquota_ihardlimit)
4969 seq_printf(seq, ",usrquota_inode_hardlimit=%lld",
4970 sbinfo->qlimits.usrquota_ihardlimit);
4971 if (sbinfo->qlimits.grpquota_ihardlimit)
4972 seq_printf(seq, ",grpquota_inode_hardlimit=%lld",
4973 sbinfo->qlimits.grpquota_ihardlimit);
4974 #endif
4975 return 0;
4976 }
4977
4978 #endif /* CONFIG_TMPFS */
4979
shmem_put_super(struct super_block * sb)4980 static void shmem_put_super(struct super_block *sb)
4981 {
4982 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
4983
4984 #if IS_ENABLED(CONFIG_UNICODE)
4985 if (sb->s_encoding)
4986 utf8_unload(sb->s_encoding);
4987 #endif
4988
4989 #ifdef CONFIG_TMPFS_QUOTA
4990 shmem_disable_quotas(sb);
4991 #endif
4992 free_percpu(sbinfo->ino_batch);
4993 percpu_counter_destroy(&sbinfo->used_blocks);
4994 mpol_put(sbinfo->mpol);
4995 kfree(sbinfo);
4996 sb->s_fs_info = NULL;
4997 }
4998
4999 #if IS_ENABLED(CONFIG_UNICODE) && defined(CONFIG_TMPFS)
5000 static const struct dentry_operations shmem_ci_dentry_ops = {
5001 .d_hash = generic_ci_d_hash,
5002 .d_compare = generic_ci_d_compare,
5003 };
5004 #endif
5005
shmem_fill_super(struct super_block * sb,struct fs_context * fc)5006 static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
5007 {
5008 struct shmem_options *ctx = fc->fs_private;
5009 struct inode *inode;
5010 struct shmem_sb_info *sbinfo;
5011 int error = -ENOMEM;
5012
5013 /* Round up to L1_CACHE_BYTES to resist false sharing */
5014 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
5015 L1_CACHE_BYTES), GFP_KERNEL);
5016 if (!sbinfo)
5017 return error;
5018
5019 sb->s_fs_info = sbinfo;
5020
5021 #ifdef CONFIG_TMPFS
5022 /*
5023 * Per default we only allow half of the physical ram per
5024 * tmpfs instance, limiting inodes to one per page of lowmem;
5025 * but the internal instance is left unlimited.
5026 */
5027 if (!(sb->s_flags & SB_KERNMOUNT)) {
5028 if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
5029 ctx->blocks = shmem_default_max_blocks();
5030 if (!(ctx->seen & SHMEM_SEEN_INODES))
5031 ctx->inodes = shmem_default_max_inodes();
5032 if (!(ctx->seen & SHMEM_SEEN_INUMS))
5033 ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
5034 sbinfo->noswap = ctx->noswap;
5035 } else {
5036 sb->s_flags |= SB_NOUSER;
5037 }
5038 sb->s_export_op = &shmem_export_ops;
5039 sb->s_flags |= SB_NOSEC;
5040
5041 #if IS_ENABLED(CONFIG_UNICODE)
5042 if (!ctx->encoding && ctx->strict_encoding) {
5043 pr_err("tmpfs: strict_encoding option without encoding is forbidden\n");
5044 error = -EINVAL;
5045 goto failed;
5046 }
5047
5048 if (ctx->encoding) {
5049 sb->s_encoding = ctx->encoding;
5050 set_default_d_op(sb, &shmem_ci_dentry_ops);
5051 if (ctx->strict_encoding)
5052 sb->s_encoding_flags = SB_ENC_STRICT_MODE_FL;
5053 }
5054 #endif
5055
5056 #else
5057 sb->s_flags |= SB_NOUSER;
5058 #endif /* CONFIG_TMPFS */
5059 sb->s_d_flags |= DCACHE_DONTCACHE;
5060 sbinfo->max_blocks = ctx->blocks;
5061 sbinfo->max_inodes = ctx->inodes;
5062 sbinfo->free_ispace = sbinfo->max_inodes * BOGO_INODE_SIZE;
5063 if (sb->s_flags & SB_KERNMOUNT) {
5064 sbinfo->ino_batch = alloc_percpu(ino_t);
5065 if (!sbinfo->ino_batch)
5066 goto failed;
5067 }
5068 sbinfo->uid = ctx->uid;
5069 sbinfo->gid = ctx->gid;
5070 sbinfo->full_inums = ctx->full_inums;
5071 sbinfo->mode = ctx->mode;
5072 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5073 if (ctx->seen & SHMEM_SEEN_HUGE)
5074 sbinfo->huge = ctx->huge;
5075 else
5076 sbinfo->huge = tmpfs_huge;
5077 #endif
5078 sbinfo->mpol = ctx->mpol;
5079 ctx->mpol = NULL;
5080
5081 raw_spin_lock_init(&sbinfo->stat_lock);
5082 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
5083 goto failed;
5084 spin_lock_init(&sbinfo->shrinklist_lock);
5085 INIT_LIST_HEAD(&sbinfo->shrinklist);
5086
5087 sb->s_maxbytes = MAX_LFS_FILESIZE;
5088 sb->s_blocksize = PAGE_SIZE;
5089 sb->s_blocksize_bits = PAGE_SHIFT;
5090 sb->s_magic = TMPFS_MAGIC;
5091 sb->s_op = &shmem_ops;
5092 sb->s_time_gran = 1;
5093 #ifdef CONFIG_TMPFS_XATTR
5094 sb->s_xattr = shmem_xattr_handlers;
5095 #endif
5096 #ifdef CONFIG_TMPFS_POSIX_ACL
5097 sb->s_flags |= SB_POSIXACL;
5098 #endif
5099 uuid_t uuid;
5100 uuid_gen(&uuid);
5101 super_set_uuid(sb, uuid.b, sizeof(uuid));
5102
5103 #ifdef CONFIG_TMPFS_QUOTA
5104 if (ctx->seen & SHMEM_SEEN_QUOTA) {
5105 sb->dq_op = &shmem_quota_operations;
5106 sb->s_qcop = &dquot_quotactl_sysfile_ops;
5107 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
5108
5109 /* Copy the default limits from ctx into sbinfo */
5110 memcpy(&sbinfo->qlimits, &ctx->qlimits,
5111 sizeof(struct shmem_quota_limits));
5112
5113 if (shmem_enable_quotas(sb, ctx->quota_types))
5114 goto failed;
5115 }
5116 #endif /* CONFIG_TMPFS_QUOTA */
5117
5118 inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL,
5119 S_IFDIR | sbinfo->mode, 0,
5120 mk_vma_flags(VMA_NORESERVE_BIT));
5121 if (IS_ERR(inode)) {
5122 error = PTR_ERR(inode);
5123 goto failed;
5124 }
5125 inode->i_uid = sbinfo->uid;
5126 inode->i_gid = sbinfo->gid;
5127 sb->s_root = d_make_root(inode);
5128 if (!sb->s_root)
5129 goto failed;
5130 return 0;
5131
5132 failed:
5133 shmem_put_super(sb);
5134 return error;
5135 }
5136
shmem_get_tree(struct fs_context * fc)5137 static int shmem_get_tree(struct fs_context *fc)
5138 {
5139 return get_tree_nodev(fc, shmem_fill_super);
5140 }
5141
shmem_free_fc(struct fs_context * fc)5142 static void shmem_free_fc(struct fs_context *fc)
5143 {
5144 struct shmem_options *ctx = fc->fs_private;
5145
5146 if (ctx) {
5147 mpol_put(ctx->mpol);
5148 kfree(ctx);
5149 }
5150 }
5151
5152 static const struct fs_context_operations shmem_fs_context_ops = {
5153 .free = shmem_free_fc,
5154 .get_tree = shmem_get_tree,
5155 #ifdef CONFIG_TMPFS
5156 .parse_monolithic = shmem_parse_monolithic,
5157 .parse_param = shmem_parse_one,
5158 .reconfigure = shmem_reconfigure,
5159 #endif
5160 };
5161
5162 static struct kmem_cache *shmem_inode_cachep __ro_after_init;
5163
shmem_alloc_inode(struct super_block * sb)5164 static struct inode *shmem_alloc_inode(struct super_block *sb)
5165 {
5166 struct shmem_inode_info *info;
5167 info = alloc_inode_sb(sb, shmem_inode_cachep, GFP_KERNEL);
5168 if (!info)
5169 return NULL;
5170 return &info->vfs_inode;
5171 }
5172
shmem_free_in_core_inode(struct inode * inode)5173 static void shmem_free_in_core_inode(struct inode *inode)
5174 {
5175 if (S_ISLNK(inode->i_mode))
5176 kfree(inode->i_link);
5177 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
5178 }
5179
shmem_destroy_inode(struct inode * inode)5180 static void shmem_destroy_inode(struct inode *inode)
5181 {
5182 if (S_ISREG(inode->i_mode))
5183 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
5184 if (S_ISDIR(inode->i_mode))
5185 simple_offset_destroy(shmem_get_offset_ctx(inode));
5186 }
5187
shmem_init_inode(void * foo)5188 static void shmem_init_inode(void *foo)
5189 {
5190 struct shmem_inode_info *info = foo;
5191 inode_init_once(&info->vfs_inode);
5192 }
5193
shmem_init_inodecache(void)5194 static void __init shmem_init_inodecache(void)
5195 {
5196 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
5197 sizeof(struct shmem_inode_info),
5198 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
5199 }
5200
shmem_destroy_inodecache(void)5201 static void __init shmem_destroy_inodecache(void)
5202 {
5203 kmem_cache_destroy(shmem_inode_cachep);
5204 }
5205
5206 /* Keep the page in page cache instead of truncating it */
shmem_error_remove_folio(struct address_space * mapping,struct folio * folio)5207 static int shmem_error_remove_folio(struct address_space *mapping,
5208 struct folio *folio)
5209 {
5210 return 0;
5211 }
5212
5213 static const struct address_space_operations shmem_aops = {
5214 .dirty_folio = noop_dirty_folio,
5215 #ifdef CONFIG_TMPFS
5216 .write_begin = shmem_write_begin,
5217 .write_end = shmem_write_end,
5218 #endif
5219 #ifdef CONFIG_MIGRATION
5220 .migrate_folio = migrate_folio,
5221 #endif
5222 .error_remove_folio = shmem_error_remove_folio,
5223 };
5224
5225 static const struct file_operations shmem_file_operations = {
5226 .mmap_prepare = shmem_mmap_prepare,
5227 .open = shmem_file_open,
5228 .get_unmapped_area = shmem_get_unmapped_area,
5229 #ifdef CONFIG_TMPFS
5230 .llseek = shmem_file_llseek,
5231 .read_iter = shmem_file_read_iter,
5232 .write_iter = shmem_file_write_iter,
5233 .fsync = noop_fsync,
5234 .splice_read = shmem_file_splice_read,
5235 .splice_write = iter_file_splice_write,
5236 .fallocate = shmem_fallocate,
5237 .setlease = generic_setlease,
5238 #endif
5239 };
5240
5241 static const struct inode_operations shmem_inode_operations = {
5242 .getattr = shmem_getattr,
5243 .setattr = shmem_setattr,
5244 #ifdef CONFIG_TMPFS_XATTR
5245 .listxattr = shmem_listxattr,
5246 .set_acl = simple_set_acl,
5247 .fileattr_get = shmem_fileattr_get,
5248 .fileattr_set = shmem_fileattr_set,
5249 #endif
5250 };
5251
5252 static const struct inode_operations shmem_dir_inode_operations = {
5253 #ifdef CONFIG_TMPFS
5254 .getattr = shmem_getattr,
5255 .create = shmem_create,
5256 .lookup = simple_lookup,
5257 .link = shmem_link,
5258 .unlink = shmem_unlink,
5259 .symlink = shmem_symlink,
5260 .mkdir = shmem_mkdir,
5261 .rmdir = shmem_rmdir,
5262 .mknod = shmem_mknod,
5263 .rename = shmem_rename2,
5264 .tmpfile = shmem_tmpfile,
5265 .get_offset_ctx = shmem_get_offset_ctx,
5266 #endif
5267 #ifdef CONFIG_TMPFS_XATTR
5268 .listxattr = shmem_listxattr,
5269 .fileattr_get = shmem_fileattr_get,
5270 .fileattr_set = shmem_fileattr_set,
5271 #endif
5272 #ifdef CONFIG_TMPFS_POSIX_ACL
5273 .setattr = shmem_setattr,
5274 .set_acl = simple_set_acl,
5275 #endif
5276 };
5277
5278 static const struct inode_operations shmem_special_inode_operations = {
5279 .getattr = shmem_getattr,
5280 #ifdef CONFIG_TMPFS_XATTR
5281 .listxattr = shmem_listxattr,
5282 #endif
5283 #ifdef CONFIG_TMPFS_POSIX_ACL
5284 .setattr = shmem_setattr,
5285 .set_acl = simple_set_acl,
5286 #endif
5287 };
5288
5289 static const struct super_operations shmem_ops = {
5290 .alloc_inode = shmem_alloc_inode,
5291 .free_inode = shmem_free_in_core_inode,
5292 .destroy_inode = shmem_destroy_inode,
5293 #ifdef CONFIG_TMPFS
5294 .statfs = shmem_statfs,
5295 .show_options = shmem_show_options,
5296 #endif
5297 #ifdef CONFIG_TMPFS_QUOTA
5298 .get_dquots = shmem_get_dquots,
5299 #endif
5300 .evict_inode = shmem_evict_inode,
5301 .drop_inode = inode_just_drop,
5302 .put_super = shmem_put_super,
5303 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5304 .nr_cached_objects = shmem_unused_huge_count,
5305 .free_cached_objects = shmem_unused_huge_scan,
5306 #endif
5307 };
5308
5309 static const struct vm_operations_struct shmem_vm_ops = {
5310 .fault = shmem_fault,
5311 .map_pages = filemap_map_pages,
5312 #ifdef CONFIG_NUMA
5313 .set_policy = shmem_set_policy,
5314 .get_policy = shmem_get_policy,
5315 #endif
5316 };
5317
5318 static const struct vm_operations_struct shmem_anon_vm_ops = {
5319 .fault = shmem_fault,
5320 .map_pages = filemap_map_pages,
5321 #ifdef CONFIG_NUMA
5322 .set_policy = shmem_set_policy,
5323 .get_policy = shmem_get_policy,
5324 #endif
5325 };
5326
shmem_init_fs_context(struct fs_context * fc)5327 int shmem_init_fs_context(struct fs_context *fc)
5328 {
5329 struct shmem_options *ctx;
5330
5331 ctx = kzalloc_obj(struct shmem_options);
5332 if (!ctx)
5333 return -ENOMEM;
5334
5335 ctx->mode = 0777 | S_ISVTX;
5336 ctx->uid = current_fsuid();
5337 ctx->gid = current_fsgid();
5338
5339 #if IS_ENABLED(CONFIG_UNICODE)
5340 ctx->encoding = NULL;
5341 #endif
5342
5343 fc->fs_private = ctx;
5344 fc->ops = &shmem_fs_context_ops;
5345 #ifdef CONFIG_TMPFS
5346 fc->sb_flags |= SB_I_VERSION;
5347 #endif
5348 return 0;
5349 }
5350
5351 static struct file_system_type shmem_fs_type = {
5352 .owner = THIS_MODULE,
5353 .name = "tmpfs",
5354 .init_fs_context = shmem_init_fs_context,
5355 #ifdef CONFIG_TMPFS
5356 .parameters = shmem_fs_parameters,
5357 #endif
5358 .kill_sb = kill_anon_super,
5359 .fs_flags = FS_USERNS_MOUNT | FS_ALLOW_IDMAP | FS_MGTIME,
5360 };
5361
5362 #if defined(CONFIG_SYSFS) && defined(CONFIG_TMPFS)
5363
5364 #define __INIT_KOBJ_ATTR(_name, _mode, _show, _store) \
5365 { \
5366 .attr = { .name = __stringify(_name), .mode = _mode }, \
5367 .show = _show, \
5368 .store = _store, \
5369 }
5370
5371 #define TMPFS_ATTR_W(_name, _store) \
5372 static struct kobj_attribute tmpfs_attr_##_name = \
5373 __INIT_KOBJ_ATTR(_name, 0200, NULL, _store)
5374
5375 #define TMPFS_ATTR_RW(_name, _show, _store) \
5376 static struct kobj_attribute tmpfs_attr_##_name = \
5377 __INIT_KOBJ_ATTR(_name, 0644, _show, _store)
5378
5379 #define TMPFS_ATTR_RO(_name, _show) \
5380 static struct kobj_attribute tmpfs_attr_##_name = \
5381 __INIT_KOBJ_ATTR(_name, 0444, _show, NULL)
5382
5383 #if IS_ENABLED(CONFIG_UNICODE)
casefold_show(struct kobject * kobj,struct kobj_attribute * a,char * buf)5384 static ssize_t casefold_show(struct kobject *kobj, struct kobj_attribute *a,
5385 char *buf)
5386 {
5387 return sysfs_emit(buf, "supported\n");
5388 }
5389 TMPFS_ATTR_RO(casefold, casefold_show);
5390 #endif
5391
5392 static struct attribute *tmpfs_attributes[] = {
5393 #if IS_ENABLED(CONFIG_UNICODE)
5394 &tmpfs_attr_casefold.attr,
5395 #endif
5396 NULL
5397 };
5398
5399 static const struct attribute_group tmpfs_attribute_group = {
5400 .attrs = tmpfs_attributes,
5401 .name = "features"
5402 };
5403
5404 static struct kobject *tmpfs_kobj;
5405
tmpfs_sysfs_init(void)5406 static int __init tmpfs_sysfs_init(void)
5407 {
5408 int ret;
5409
5410 tmpfs_kobj = kobject_create_and_add("tmpfs", fs_kobj);
5411 if (!tmpfs_kobj)
5412 return -ENOMEM;
5413
5414 ret = sysfs_create_group(tmpfs_kobj, &tmpfs_attribute_group);
5415 if (ret)
5416 kobject_put(tmpfs_kobj);
5417
5418 return ret;
5419 }
5420 #endif /* CONFIG_SYSFS && CONFIG_TMPFS */
5421
shmem_init(void)5422 void __init shmem_init(void)
5423 {
5424 int error;
5425
5426 shmem_init_inodecache();
5427
5428 #ifdef CONFIG_TMPFS_QUOTA
5429 register_quota_format(&shmem_quota_format);
5430 #endif
5431
5432 error = register_filesystem(&shmem_fs_type);
5433 if (error) {
5434 pr_err("Could not register tmpfs\n");
5435 goto out2;
5436 }
5437
5438 shm_mnt = kern_mount(&shmem_fs_type);
5439 if (IS_ERR(shm_mnt)) {
5440 error = PTR_ERR(shm_mnt);
5441 pr_err("Could not kern_mount tmpfs\n");
5442 goto out1;
5443 }
5444
5445 #if defined(CONFIG_SYSFS) && defined(CONFIG_TMPFS)
5446 error = tmpfs_sysfs_init();
5447 if (error) {
5448 pr_err("Could not init tmpfs sysfs\n");
5449 goto out1;
5450 }
5451 #endif
5452
5453 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5454 if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
5455 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
5456 else
5457 shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
5458
5459 /*
5460 * Default to setting PMD-sized THP to inherit the global setting and
5461 * disable all other multi-size THPs.
5462 */
5463 if (!shmem_orders_configured)
5464 huge_shmem_orders_inherit = BIT(HPAGE_PMD_ORDER);
5465 #endif
5466 return;
5467
5468 out1:
5469 unregister_filesystem(&shmem_fs_type);
5470 out2:
5471 #ifdef CONFIG_TMPFS_QUOTA
5472 unregister_quota_format(&shmem_quota_format);
5473 #endif
5474 shmem_destroy_inodecache();
5475 shm_mnt = ERR_PTR(error);
5476 }
5477
5478 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
shmem_enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)5479 static ssize_t shmem_enabled_show(struct kobject *kobj,
5480 struct kobj_attribute *attr, char *buf)
5481 {
5482 static const int values[] = {
5483 SHMEM_HUGE_ALWAYS,
5484 SHMEM_HUGE_WITHIN_SIZE,
5485 SHMEM_HUGE_ADVISE,
5486 SHMEM_HUGE_NEVER,
5487 SHMEM_HUGE_DENY,
5488 SHMEM_HUGE_FORCE,
5489 };
5490 int len = 0;
5491 int i;
5492
5493 for (i = 0; i < ARRAY_SIZE(values); i++) {
5494 len += sysfs_emit_at(buf, len,
5495 shmem_huge == values[i] ? "%s[%s]" : "%s%s",
5496 i ? " " : "", shmem_format_huge(values[i]));
5497 }
5498 len += sysfs_emit_at(buf, len, "\n");
5499
5500 return len;
5501 }
5502
shmem_enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)5503 static ssize_t shmem_enabled_store(struct kobject *kobj,
5504 struct kobj_attribute *attr, const char *buf, size_t count)
5505 {
5506 char tmp[16];
5507 int huge, err;
5508
5509 if (count + 1 > sizeof(tmp))
5510 return -EINVAL;
5511 memcpy(tmp, buf, count);
5512 tmp[count] = '\0';
5513 if (count && tmp[count - 1] == '\n')
5514 tmp[count - 1] = '\0';
5515
5516 huge = shmem_parse_huge(tmp);
5517 if (huge == -EINVAL)
5518 return huge;
5519
5520 shmem_huge = huge;
5521 if (shmem_huge > SHMEM_HUGE_DENY)
5522 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
5523
5524 err = start_stop_khugepaged();
5525 return err ? err : count;
5526 }
5527
5528 struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled);
5529 static DEFINE_SPINLOCK(huge_shmem_orders_lock);
5530
thpsize_shmem_enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)5531 static ssize_t thpsize_shmem_enabled_show(struct kobject *kobj,
5532 struct kobj_attribute *attr, char *buf)
5533 {
5534 int order = to_thpsize(kobj)->order;
5535 const char *output;
5536
5537 if (test_bit(order, &huge_shmem_orders_always))
5538 output = "[always] inherit within_size advise never";
5539 else if (test_bit(order, &huge_shmem_orders_inherit))
5540 output = "always [inherit] within_size advise never";
5541 else if (test_bit(order, &huge_shmem_orders_within_size))
5542 output = "always inherit [within_size] advise never";
5543 else if (test_bit(order, &huge_shmem_orders_madvise))
5544 output = "always inherit within_size [advise] never";
5545 else
5546 output = "always inherit within_size advise [never]";
5547
5548 return sysfs_emit(buf, "%s\n", output);
5549 }
5550
thpsize_shmem_enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)5551 static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj,
5552 struct kobj_attribute *attr,
5553 const char *buf, size_t count)
5554 {
5555 int order = to_thpsize(kobj)->order;
5556 ssize_t ret = count;
5557
5558 if (sysfs_streq(buf, "always")) {
5559 spin_lock(&huge_shmem_orders_lock);
5560 clear_bit(order, &huge_shmem_orders_inherit);
5561 clear_bit(order, &huge_shmem_orders_madvise);
5562 clear_bit(order, &huge_shmem_orders_within_size);
5563 set_bit(order, &huge_shmem_orders_always);
5564 spin_unlock(&huge_shmem_orders_lock);
5565 } else if (sysfs_streq(buf, "inherit")) {
5566 /* Do not override huge allocation policy with non-PMD sized mTHP */
5567 if (shmem_huge == SHMEM_HUGE_FORCE &&
5568 order != HPAGE_PMD_ORDER)
5569 return -EINVAL;
5570
5571 spin_lock(&huge_shmem_orders_lock);
5572 clear_bit(order, &huge_shmem_orders_always);
5573 clear_bit(order, &huge_shmem_orders_madvise);
5574 clear_bit(order, &huge_shmem_orders_within_size);
5575 set_bit(order, &huge_shmem_orders_inherit);
5576 spin_unlock(&huge_shmem_orders_lock);
5577 } else if (sysfs_streq(buf, "within_size")) {
5578 spin_lock(&huge_shmem_orders_lock);
5579 clear_bit(order, &huge_shmem_orders_always);
5580 clear_bit(order, &huge_shmem_orders_inherit);
5581 clear_bit(order, &huge_shmem_orders_madvise);
5582 set_bit(order, &huge_shmem_orders_within_size);
5583 spin_unlock(&huge_shmem_orders_lock);
5584 } else if (sysfs_streq(buf, "advise")) {
5585 spin_lock(&huge_shmem_orders_lock);
5586 clear_bit(order, &huge_shmem_orders_always);
5587 clear_bit(order, &huge_shmem_orders_inherit);
5588 clear_bit(order, &huge_shmem_orders_within_size);
5589 set_bit(order, &huge_shmem_orders_madvise);
5590 spin_unlock(&huge_shmem_orders_lock);
5591 } else if (sysfs_streq(buf, "never")) {
5592 spin_lock(&huge_shmem_orders_lock);
5593 clear_bit(order, &huge_shmem_orders_always);
5594 clear_bit(order, &huge_shmem_orders_inherit);
5595 clear_bit(order, &huge_shmem_orders_within_size);
5596 clear_bit(order, &huge_shmem_orders_madvise);
5597 spin_unlock(&huge_shmem_orders_lock);
5598 } else {
5599 ret = -EINVAL;
5600 }
5601
5602 if (ret > 0) {
5603 int err = start_stop_khugepaged();
5604
5605 if (err)
5606 ret = err;
5607 }
5608 return ret;
5609 }
5610
5611 struct kobj_attribute thpsize_shmem_enabled_attr =
5612 __ATTR(shmem_enabled, 0644, thpsize_shmem_enabled_show, thpsize_shmem_enabled_store);
5613 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
5614
5615 #if defined(CONFIG_TRANSPARENT_HUGEPAGE)
5616
setup_transparent_hugepage_shmem(char * str)5617 static int __init setup_transparent_hugepage_shmem(char *str)
5618 {
5619 int huge;
5620
5621 huge = shmem_parse_huge(str);
5622 if (huge == -EINVAL) {
5623 pr_warn("transparent_hugepage_shmem= cannot parse, ignored\n");
5624 return huge;
5625 }
5626
5627 shmem_huge = huge;
5628 return 1;
5629 }
5630 __setup("transparent_hugepage_shmem=", setup_transparent_hugepage_shmem);
5631
setup_transparent_hugepage_tmpfs(char * str)5632 static int __init setup_transparent_hugepage_tmpfs(char *str)
5633 {
5634 int huge;
5635
5636 huge = shmem_parse_huge(str);
5637 if (huge < 0) {
5638 pr_warn("transparent_hugepage_tmpfs= cannot parse, ignored\n");
5639 return huge;
5640 }
5641
5642 tmpfs_huge = huge;
5643 return 1;
5644 }
5645 __setup("transparent_hugepage_tmpfs=", setup_transparent_hugepage_tmpfs);
5646
5647 static char str_dup[PAGE_SIZE] __initdata;
setup_thp_shmem(char * str)5648 static int __init setup_thp_shmem(char *str)
5649 {
5650 char *token, *range, *policy, *subtoken;
5651 unsigned long always, inherit, madvise, within_size;
5652 char *start_size, *end_size;
5653 int start, end, nr;
5654 char *p;
5655
5656 if (!str || strlen(str) + 1 > PAGE_SIZE)
5657 goto err;
5658 strscpy(str_dup, str);
5659
5660 always = huge_shmem_orders_always;
5661 inherit = huge_shmem_orders_inherit;
5662 madvise = huge_shmem_orders_madvise;
5663 within_size = huge_shmem_orders_within_size;
5664 p = str_dup;
5665 while ((token = strsep(&p, ";")) != NULL) {
5666 range = strsep(&token, ":");
5667 policy = token;
5668
5669 if (!policy)
5670 goto err;
5671
5672 while ((subtoken = strsep(&range, ",")) != NULL) {
5673 if (strchr(subtoken, '-')) {
5674 start_size = strsep(&subtoken, "-");
5675 end_size = subtoken;
5676
5677 start = get_order_from_str(start_size,
5678 THP_ORDERS_ALL_FILE_DEFAULT);
5679 end = get_order_from_str(end_size,
5680 THP_ORDERS_ALL_FILE_DEFAULT);
5681 } else {
5682 start_size = end_size = subtoken;
5683 start = end = get_order_from_str(subtoken,
5684 THP_ORDERS_ALL_FILE_DEFAULT);
5685 }
5686
5687 if (start < 0) {
5688 pr_err("invalid size %s in thp_shmem boot parameter\n",
5689 start_size);
5690 goto err;
5691 }
5692
5693 if (end < 0) {
5694 pr_err("invalid size %s in thp_shmem boot parameter\n",
5695 end_size);
5696 goto err;
5697 }
5698
5699 if (start > end)
5700 goto err;
5701
5702 nr = end - start + 1;
5703 if (!strcmp(policy, "always")) {
5704 bitmap_set(&always, start, nr);
5705 bitmap_clear(&inherit, start, nr);
5706 bitmap_clear(&madvise, start, nr);
5707 bitmap_clear(&within_size, start, nr);
5708 } else if (!strcmp(policy, "advise")) {
5709 bitmap_set(&madvise, start, nr);
5710 bitmap_clear(&inherit, start, nr);
5711 bitmap_clear(&always, start, nr);
5712 bitmap_clear(&within_size, start, nr);
5713 } else if (!strcmp(policy, "inherit")) {
5714 bitmap_set(&inherit, start, nr);
5715 bitmap_clear(&madvise, start, nr);
5716 bitmap_clear(&always, start, nr);
5717 bitmap_clear(&within_size, start, nr);
5718 } else if (!strcmp(policy, "within_size")) {
5719 bitmap_set(&within_size, start, nr);
5720 bitmap_clear(&inherit, start, nr);
5721 bitmap_clear(&madvise, start, nr);
5722 bitmap_clear(&always, start, nr);
5723 } else if (!strcmp(policy, "never")) {
5724 bitmap_clear(&inherit, start, nr);
5725 bitmap_clear(&madvise, start, nr);
5726 bitmap_clear(&always, start, nr);
5727 bitmap_clear(&within_size, start, nr);
5728 } else {
5729 pr_err("invalid policy %s in thp_shmem boot parameter\n", policy);
5730 goto err;
5731 }
5732 }
5733 }
5734
5735 huge_shmem_orders_always = always;
5736 huge_shmem_orders_madvise = madvise;
5737 huge_shmem_orders_inherit = inherit;
5738 huge_shmem_orders_within_size = within_size;
5739 shmem_orders_configured = true;
5740 return 1;
5741
5742 err:
5743 pr_warn("thp_shmem=%s: error parsing string, ignoring setting\n", str);
5744 return 0;
5745 }
5746 __setup("thp_shmem=", setup_thp_shmem);
5747
5748 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
5749
5750 #else /* !CONFIG_SHMEM */
5751
5752 /*
5753 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
5754 *
5755 * This is intended for small system where the benefits of the full
5756 * shmem code (swap-backed and resource-limited) are outweighed by
5757 * their complexity. On systems without swap this code should be
5758 * effectively equivalent, but much lighter weight.
5759 */
5760
5761 static struct file_system_type shmem_fs_type = {
5762 .name = "tmpfs",
5763 .init_fs_context = ramfs_init_fs_context,
5764 .parameters = ramfs_fs_parameters,
5765 .kill_sb = ramfs_kill_sb,
5766 .fs_flags = FS_USERNS_MOUNT,
5767 };
5768
shmem_init(void)5769 void __init shmem_init(void)
5770 {
5771 BUG_ON(register_filesystem(&shmem_fs_type) != 0);
5772
5773 shm_mnt = kern_mount(&shmem_fs_type);
5774 BUG_ON(IS_ERR(shm_mnt));
5775 }
5776
shmem_unuse(unsigned int type)5777 int shmem_unuse(unsigned int type)
5778 {
5779 return 0;
5780 }
5781
shmem_lock(struct file * file,int lock,struct ucounts * ucounts)5782 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
5783 {
5784 return 0;
5785 }
5786
shmem_unlock_mapping(struct address_space * mapping)5787 void shmem_unlock_mapping(struct address_space *mapping)
5788 {
5789 }
5790
5791 #ifdef CONFIG_MMU
shmem_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)5792 unsigned long shmem_get_unmapped_area(struct file *file,
5793 unsigned long addr, unsigned long len,
5794 unsigned long pgoff, unsigned long flags)
5795 {
5796 return mm_get_unmapped_area(file, addr, len, pgoff, flags);
5797 }
5798 #endif
5799
shmem_truncate_range(struct inode * inode,loff_t lstart,uoff_t lend)5800 void shmem_truncate_range(struct inode *inode, loff_t lstart, uoff_t lend)
5801 {
5802 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
5803 }
5804 EXPORT_SYMBOL_GPL(shmem_truncate_range);
5805
5806 #define shmem_vm_ops generic_file_vm_ops
5807 #define shmem_anon_vm_ops generic_file_vm_ops
5808 #define shmem_file_operations ramfs_file_operations
5809
shmem_acct_size(unsigned long flags,loff_t size)5810 static inline int shmem_acct_size(unsigned long flags, loff_t size)
5811 {
5812 return 0;
5813 }
5814
shmem_unacct_size(unsigned long flags,loff_t size)5815 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
5816 {
5817 }
5818
shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,vma_flags_t flags)5819 static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
5820 struct super_block *sb, struct inode *dir,
5821 umode_t mode, dev_t dev, vma_flags_t flags)
5822 {
5823 struct inode *inode = ramfs_get_inode(sb, dir, mode, dev);
5824 return inode ? inode : ERR_PTR(-ENOSPC);
5825 }
5826
5827 #endif /* CONFIG_SHMEM */
5828
5829 /* common code */
5830
__shmem_file_setup(struct vfsmount * mnt,const char * name,loff_t size,vma_flags_t flags,unsigned int i_flags)5831 static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name,
5832 loff_t size, vma_flags_t flags,
5833 unsigned int i_flags)
5834 {
5835 const unsigned long shmem_flags =
5836 vma_flags_test(&flags, VMA_NORESERVE_BIT) ? SHMEM_F_NORESERVE : 0;
5837 struct inode *inode;
5838 struct file *res;
5839
5840 if (IS_ERR(mnt))
5841 return ERR_CAST(mnt);
5842
5843 if (size < 0 || size > MAX_LFS_FILESIZE)
5844 return ERR_PTR(-EINVAL);
5845
5846 if (is_idmapped_mnt(mnt))
5847 return ERR_PTR(-EINVAL);
5848
5849 if (shmem_acct_size(shmem_flags, size))
5850 return ERR_PTR(-ENOMEM);
5851
5852 inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL,
5853 S_IFREG | S_IRWXUGO, 0, flags);
5854 if (IS_ERR(inode)) {
5855 shmem_unacct_size(shmem_flags, size);
5856 return ERR_CAST(inode);
5857 }
5858 inode->i_flags |= i_flags;
5859 inode->i_size = size;
5860 clear_nlink(inode); /* It is unlinked */
5861 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
5862 if (!IS_ERR(res))
5863 res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
5864 &shmem_file_operations);
5865 if (IS_ERR(res))
5866 iput(inode);
5867 return res;
5868 }
5869
5870 /**
5871 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
5872 * kernel internal. There will be NO LSM permission checks against the
5873 * underlying inode. So users of this interface must do LSM checks at a
5874 * higher layer. The users are the big_key and shm implementations. LSM
5875 * checks are provided at the key or shm level rather than the inode.
5876 * @name: name for dentry (to be seen in /proc/<pid>/maps)
5877 * @size: size to be set for the file
5878 * @flags: VMA_NORESERVE_BIT suppresses pre-accounting of the entire object size
5879 */
shmem_kernel_file_setup(const char * name,loff_t size,vma_flags_t flags)5880 struct file *shmem_kernel_file_setup(const char *name, loff_t size,
5881 vma_flags_t flags)
5882 {
5883 return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
5884 }
5885 EXPORT_SYMBOL_GPL(shmem_kernel_file_setup);
5886
5887 /**
5888 * shmem_file_setup - get an unlinked file living in tmpfs
5889 * @name: name for dentry (to be seen in /proc/<pid>/maps)
5890 * @size: size to be set for the file
5891 * @flags: VMA_NORESERVE_BIT suppresses pre-accounting of the entire object size
5892 */
shmem_file_setup(const char * name,loff_t size,vma_flags_t flags)5893 struct file *shmem_file_setup(const char *name, loff_t size, vma_flags_t flags)
5894 {
5895 return __shmem_file_setup(shm_mnt, name, size, flags, 0);
5896 }
5897 EXPORT_SYMBOL_GPL(shmem_file_setup);
5898
5899 /**
5900 * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
5901 * @mnt: the tmpfs mount where the file will be created
5902 * @name: name for dentry (to be seen in /proc/<pid>/maps)
5903 * @size: size to be set for the file
5904 * @flags: VMA_NORESERVE_BIT suppresses pre-accounting of the entire object size
5905 */
shmem_file_setup_with_mnt(struct vfsmount * mnt,const char * name,loff_t size,vma_flags_t flags)5906 struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
5907 loff_t size, vma_flags_t flags)
5908 {
5909 return __shmem_file_setup(mnt, name, size, flags, 0);
5910 }
5911 EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
5912
__shmem_zero_setup(unsigned long start,unsigned long end,vma_flags_t flags)5913 static struct file *__shmem_zero_setup(unsigned long start, unsigned long end,
5914 vma_flags_t flags)
5915 {
5916 loff_t size = end - start;
5917
5918 /*
5919 * Cloning a new file under mmap_lock leads to a lock ordering conflict
5920 * between XFS directory reading and selinux: since this file is only
5921 * accessible to the user through its mapping, use S_PRIVATE flag to
5922 * bypass file security, in the same way as shmem_kernel_file_setup().
5923 */
5924 return shmem_kernel_file_setup("dev/zero", size, flags);
5925 }
5926
5927 /**
5928 * shmem_zero_setup - setup a shared anonymous mapping
5929 * @vma: the vma to be mmapped is prepared by do_mmap
5930 * Returns: 0 on success, or error
5931 */
shmem_zero_setup(struct vm_area_struct * vma)5932 int shmem_zero_setup(struct vm_area_struct *vma)
5933 {
5934 struct file *file = __shmem_zero_setup(vma->vm_start, vma->vm_end, vma->flags);
5935
5936 if (IS_ERR(file))
5937 return PTR_ERR(file);
5938
5939 if (vma->vm_file)
5940 fput(vma->vm_file);
5941 vma->vm_file = file;
5942 vma->vm_ops = &shmem_anon_vm_ops;
5943
5944 return 0;
5945 }
5946
5947 /**
5948 * shmem_zero_setup_desc - same as shmem_zero_setup, but determined by VMA
5949 * descriptor for convenience.
5950 * @desc: Describes VMA
5951 * Returns: 0 on success, or error
5952 */
shmem_zero_setup_desc(struct vm_area_desc * desc)5953 int shmem_zero_setup_desc(struct vm_area_desc *desc)
5954 {
5955 struct file *file = __shmem_zero_setup(desc->start, desc->end, desc->vma_flags);
5956
5957 if (IS_ERR(file))
5958 return PTR_ERR(file);
5959
5960 desc->vm_file = file;
5961 desc->vm_ops = &shmem_anon_vm_ops;
5962
5963 return 0;
5964 }
5965
5966 /**
5967 * shmem_read_folio_gfp - read into page cache, using specified page allocation flags.
5968 * @mapping: the folio's address_space
5969 * @index: the folio index
5970 * @gfp: the page allocator flags to use if allocating
5971 *
5972 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
5973 * with any new page allocations done using the specified allocation flags.
5974 * But read_cache_page_gfp() uses the ->read_folio() method: which does not
5975 * suit tmpfs, since it may have pages in swapcache, and needs to find those
5976 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
5977 *
5978 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
5979 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
5980 */
shmem_read_folio_gfp(struct address_space * mapping,pgoff_t index,gfp_t gfp)5981 struct folio *shmem_read_folio_gfp(struct address_space *mapping,
5982 pgoff_t index, gfp_t gfp)
5983 {
5984 #ifdef CONFIG_SHMEM
5985 struct inode *inode = mapping->host;
5986 struct folio *folio;
5987 int error;
5988
5989 error = shmem_get_folio_gfp(inode, index, i_size_read(inode),
5990 &folio, SGP_CACHE, gfp, NULL, NULL);
5991 if (error)
5992 return ERR_PTR(error);
5993
5994 folio_unlock(folio);
5995 return folio;
5996 #else
5997 /*
5998 * The tiny !SHMEM case uses ramfs without swap
5999 */
6000 return mapping_read_folio_gfp(mapping, index, gfp);
6001 #endif
6002 }
6003 EXPORT_SYMBOL_GPL(shmem_read_folio_gfp);
6004
shmem_read_mapping_page_gfp(struct address_space * mapping,pgoff_t index,gfp_t gfp)6005 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
6006 pgoff_t index, gfp_t gfp)
6007 {
6008 struct folio *folio = shmem_read_folio_gfp(mapping, index, gfp);
6009 struct page *page;
6010
6011 if (IS_ERR(folio))
6012 return &folio->page;
6013
6014 page = folio_file_page(folio, index);
6015 if (PageHWPoison(page)) {
6016 folio_put(folio);
6017 return ERR_PTR(-EIO);
6018 }
6019
6020 return page;
6021 }
6022 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
6023