xref: /linux/mm/shmem.c (revision a3f143c461444c0b56360bbf468615fa814a8372)
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *		 2000 Transmeta Corp.
6  *		 2000-2001 Christoph Rohland
7  *		 2000-2001 SAP AG
8  *		 2002 Red Hat Inc.
9  * Copyright (C) 2002-2011 Hugh Dickins.
10  * Copyright (C) 2011 Google Inc.
11  * Copyright (C) 2002-2005 VERITAS Software Corporation.
12  * Copyright (C) 2004 Andi Kleen, SuSE Labs
13  *
14  * Extended attribute support for tmpfs:
15  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17  *
18  * tiny-shmem:
19  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20  *
21  * This file is released under the GPL.
22  */
23 
24 #include <linux/fs.h>
25 #include <linux/init.h>
26 #include <linux/vfs.h>
27 #include <linux/mount.h>
28 #include <linux/ramfs.h>
29 #include <linux/pagemap.h>
30 #include <linux/file.h>
31 #include <linux/fileattr.h>
32 #include <linux/mm.h>
33 #include <linux/random.h>
34 #include <linux/sched/signal.h>
35 #include <linux/export.h>
36 #include <linux/shmem_fs.h>
37 #include <linux/swap.h>
38 #include <linux/uio.h>
39 #include <linux/hugetlb.h>
40 #include <linux/fs_parser.h>
41 #include <linux/swapfile.h>
42 #include <linux/iversion.h>
43 #include "swap.h"
44 
45 static struct vfsmount *shm_mnt __ro_after_init;
46 
47 #ifdef CONFIG_SHMEM
48 /*
49  * This virtual memory filesystem is heavily based on the ramfs. It
50  * extends ramfs by the ability to use swap and honor resource limits
51  * which makes it a completely usable filesystem.
52  */
53 
54 #include <linux/xattr.h>
55 #include <linux/exportfs.h>
56 #include <linux/posix_acl.h>
57 #include <linux/posix_acl_xattr.h>
58 #include <linux/mman.h>
59 #include <linux/string.h>
60 #include <linux/slab.h>
61 #include <linux/backing-dev.h>
62 #include <linux/writeback.h>
63 #include <linux/pagevec.h>
64 #include <linux/percpu_counter.h>
65 #include <linux/falloc.h>
66 #include <linux/splice.h>
67 #include <linux/security.h>
68 #include <linux/swapops.h>
69 #include <linux/mempolicy.h>
70 #include <linux/namei.h>
71 #include <linux/ctype.h>
72 #include <linux/migrate.h>
73 #include <linux/highmem.h>
74 #include <linux/seq_file.h>
75 #include <linux/magic.h>
76 #include <linux/syscalls.h>
77 #include <linux/fcntl.h>
78 #include <uapi/linux/memfd.h>
79 #include <linux/rmap.h>
80 #include <linux/uuid.h>
81 #include <linux/quotaops.h>
82 #include <linux/rcupdate_wait.h>
83 
84 #include <linux/uaccess.h>
85 
86 #include "internal.h"
87 
88 #define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
89 #define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
90 
91 /* Pretend that each entry is of this size in directory's i_size */
92 #define BOGO_DIRENT_SIZE 20
93 
94 /* Pretend that one inode + its dentry occupy this much memory */
95 #define BOGO_INODE_SIZE 1024
96 
97 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
98 #define SHORT_SYMLINK_LEN 128
99 
100 /*
101  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
102  * inode->i_private (with i_rwsem making sure that it has only one user at
103  * a time): we would prefer not to enlarge the shmem inode just for that.
104  */
105 struct shmem_falloc {
106 	wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
107 	pgoff_t start;		/* start of range currently being fallocated */
108 	pgoff_t next;		/* the next page offset to be fallocated */
109 	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
110 	pgoff_t nr_unswapped;	/* how often writepage refused to swap out */
111 };
112 
113 struct shmem_options {
114 	unsigned long long blocks;
115 	unsigned long long inodes;
116 	struct mempolicy *mpol;
117 	kuid_t uid;
118 	kgid_t gid;
119 	umode_t mode;
120 	bool full_inums;
121 	int huge;
122 	int seen;
123 	bool noswap;
124 	unsigned short quota_types;
125 	struct shmem_quota_limits qlimits;
126 #define SHMEM_SEEN_BLOCKS 1
127 #define SHMEM_SEEN_INODES 2
128 #define SHMEM_SEEN_HUGE 4
129 #define SHMEM_SEEN_INUMS 8
130 #define SHMEM_SEEN_NOSWAP 16
131 #define SHMEM_SEEN_QUOTA 32
132 };
133 
134 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
135 static unsigned long huge_shmem_orders_always __read_mostly;
136 static unsigned long huge_shmem_orders_madvise __read_mostly;
137 static unsigned long huge_shmem_orders_inherit __read_mostly;
138 static unsigned long huge_shmem_orders_within_size __read_mostly;
139 #endif
140 
141 #ifdef CONFIG_TMPFS
142 static unsigned long shmem_default_max_blocks(void)
143 {
144 	return totalram_pages() / 2;
145 }
146 
147 static unsigned long shmem_default_max_inodes(void)
148 {
149 	unsigned long nr_pages = totalram_pages();
150 
151 	return min3(nr_pages - totalhigh_pages(), nr_pages / 2,
152 			ULONG_MAX / BOGO_INODE_SIZE);
153 }
154 #endif
155 
156 static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
157 			struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
158 			struct vm_area_struct *vma, vm_fault_t *fault_type);
159 
160 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
161 {
162 	return sb->s_fs_info;
163 }
164 
165 /*
166  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
167  * for shared memory and for shared anonymous (/dev/zero) mappings
168  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
169  * consistent with the pre-accounting of private mappings ...
170  */
171 static inline int shmem_acct_size(unsigned long flags, loff_t size)
172 {
173 	return (flags & VM_NORESERVE) ?
174 		0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
175 }
176 
177 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
178 {
179 	if (!(flags & VM_NORESERVE))
180 		vm_unacct_memory(VM_ACCT(size));
181 }
182 
183 static inline int shmem_reacct_size(unsigned long flags,
184 		loff_t oldsize, loff_t newsize)
185 {
186 	if (!(flags & VM_NORESERVE)) {
187 		if (VM_ACCT(newsize) > VM_ACCT(oldsize))
188 			return security_vm_enough_memory_mm(current->mm,
189 					VM_ACCT(newsize) - VM_ACCT(oldsize));
190 		else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
191 			vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
192 	}
193 	return 0;
194 }
195 
196 /*
197  * ... whereas tmpfs objects are accounted incrementally as
198  * pages are allocated, in order to allow large sparse files.
199  * shmem_get_folio reports shmem_acct_blocks failure as -ENOSPC not -ENOMEM,
200  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
201  */
202 static inline int shmem_acct_blocks(unsigned long flags, long pages)
203 {
204 	if (!(flags & VM_NORESERVE))
205 		return 0;
206 
207 	return security_vm_enough_memory_mm(current->mm,
208 			pages * VM_ACCT(PAGE_SIZE));
209 }
210 
211 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
212 {
213 	if (flags & VM_NORESERVE)
214 		vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
215 }
216 
217 static int shmem_inode_acct_blocks(struct inode *inode, long pages)
218 {
219 	struct shmem_inode_info *info = SHMEM_I(inode);
220 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
221 	int err = -ENOSPC;
222 
223 	if (shmem_acct_blocks(info->flags, pages))
224 		return err;
225 
226 	might_sleep();	/* when quotas */
227 	if (sbinfo->max_blocks) {
228 		if (!percpu_counter_limited_add(&sbinfo->used_blocks,
229 						sbinfo->max_blocks, pages))
230 			goto unacct;
231 
232 		err = dquot_alloc_block_nodirty(inode, pages);
233 		if (err) {
234 			percpu_counter_sub(&sbinfo->used_blocks, pages);
235 			goto unacct;
236 		}
237 	} else {
238 		err = dquot_alloc_block_nodirty(inode, pages);
239 		if (err)
240 			goto unacct;
241 	}
242 
243 	return 0;
244 
245 unacct:
246 	shmem_unacct_blocks(info->flags, pages);
247 	return err;
248 }
249 
250 static void shmem_inode_unacct_blocks(struct inode *inode, long pages)
251 {
252 	struct shmem_inode_info *info = SHMEM_I(inode);
253 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
254 
255 	might_sleep();	/* when quotas */
256 	dquot_free_block_nodirty(inode, pages);
257 
258 	if (sbinfo->max_blocks)
259 		percpu_counter_sub(&sbinfo->used_blocks, pages);
260 	shmem_unacct_blocks(info->flags, pages);
261 }
262 
263 static const struct super_operations shmem_ops;
264 static const struct address_space_operations shmem_aops;
265 static const struct file_operations shmem_file_operations;
266 static const struct inode_operations shmem_inode_operations;
267 static const struct inode_operations shmem_dir_inode_operations;
268 static const struct inode_operations shmem_special_inode_operations;
269 static const struct vm_operations_struct shmem_vm_ops;
270 static const struct vm_operations_struct shmem_anon_vm_ops;
271 static struct file_system_type shmem_fs_type;
272 
273 bool shmem_mapping(struct address_space *mapping)
274 {
275 	return mapping->a_ops == &shmem_aops;
276 }
277 EXPORT_SYMBOL_GPL(shmem_mapping);
278 
279 bool vma_is_anon_shmem(struct vm_area_struct *vma)
280 {
281 	return vma->vm_ops == &shmem_anon_vm_ops;
282 }
283 
284 bool vma_is_shmem(struct vm_area_struct *vma)
285 {
286 	return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops;
287 }
288 
289 static LIST_HEAD(shmem_swaplist);
290 static DEFINE_MUTEX(shmem_swaplist_mutex);
291 
292 #ifdef CONFIG_TMPFS_QUOTA
293 
294 static int shmem_enable_quotas(struct super_block *sb,
295 			       unsigned short quota_types)
296 {
297 	int type, err = 0;
298 
299 	sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
300 	for (type = 0; type < SHMEM_MAXQUOTAS; type++) {
301 		if (!(quota_types & (1 << type)))
302 			continue;
303 		err = dquot_load_quota_sb(sb, type, QFMT_SHMEM,
304 					  DQUOT_USAGE_ENABLED |
305 					  DQUOT_LIMITS_ENABLED);
306 		if (err)
307 			goto out_err;
308 	}
309 	return 0;
310 
311 out_err:
312 	pr_warn("tmpfs: failed to enable quota tracking (type=%d, err=%d)\n",
313 		type, err);
314 	for (type--; type >= 0; type--)
315 		dquot_quota_off(sb, type);
316 	return err;
317 }
318 
319 static void shmem_disable_quotas(struct super_block *sb)
320 {
321 	int type;
322 
323 	for (type = 0; type < SHMEM_MAXQUOTAS; type++)
324 		dquot_quota_off(sb, type);
325 }
326 
327 static struct dquot __rcu **shmem_get_dquots(struct inode *inode)
328 {
329 	return SHMEM_I(inode)->i_dquot;
330 }
331 #endif /* CONFIG_TMPFS_QUOTA */
332 
333 /*
334  * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
335  * produces a novel ino for the newly allocated inode.
336  *
337  * It may also be called when making a hard link to permit the space needed by
338  * each dentry. However, in that case, no new inode number is needed since that
339  * internally draws from another pool of inode numbers (currently global
340  * get_next_ino()). This case is indicated by passing NULL as inop.
341  */
342 #define SHMEM_INO_BATCH 1024
343 static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
344 {
345 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
346 	ino_t ino;
347 
348 	if (!(sb->s_flags & SB_KERNMOUNT)) {
349 		raw_spin_lock(&sbinfo->stat_lock);
350 		if (sbinfo->max_inodes) {
351 			if (sbinfo->free_ispace < BOGO_INODE_SIZE) {
352 				raw_spin_unlock(&sbinfo->stat_lock);
353 				return -ENOSPC;
354 			}
355 			sbinfo->free_ispace -= BOGO_INODE_SIZE;
356 		}
357 		if (inop) {
358 			ino = sbinfo->next_ino++;
359 			if (unlikely(is_zero_ino(ino)))
360 				ino = sbinfo->next_ino++;
361 			if (unlikely(!sbinfo->full_inums &&
362 				     ino > UINT_MAX)) {
363 				/*
364 				 * Emulate get_next_ino uint wraparound for
365 				 * compatibility
366 				 */
367 				if (IS_ENABLED(CONFIG_64BIT))
368 					pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
369 						__func__, MINOR(sb->s_dev));
370 				sbinfo->next_ino = 1;
371 				ino = sbinfo->next_ino++;
372 			}
373 			*inop = ino;
374 		}
375 		raw_spin_unlock(&sbinfo->stat_lock);
376 	} else if (inop) {
377 		/*
378 		 * __shmem_file_setup, one of our callers, is lock-free: it
379 		 * doesn't hold stat_lock in shmem_reserve_inode since
380 		 * max_inodes is always 0, and is called from potentially
381 		 * unknown contexts. As such, use a per-cpu batched allocator
382 		 * which doesn't require the per-sb stat_lock unless we are at
383 		 * the batch boundary.
384 		 *
385 		 * We don't need to worry about inode{32,64} since SB_KERNMOUNT
386 		 * shmem mounts are not exposed to userspace, so we don't need
387 		 * to worry about things like glibc compatibility.
388 		 */
389 		ino_t *next_ino;
390 
391 		next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
392 		ino = *next_ino;
393 		if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
394 			raw_spin_lock(&sbinfo->stat_lock);
395 			ino = sbinfo->next_ino;
396 			sbinfo->next_ino += SHMEM_INO_BATCH;
397 			raw_spin_unlock(&sbinfo->stat_lock);
398 			if (unlikely(is_zero_ino(ino)))
399 				ino++;
400 		}
401 		*inop = ino;
402 		*next_ino = ++ino;
403 		put_cpu();
404 	}
405 
406 	return 0;
407 }
408 
409 static void shmem_free_inode(struct super_block *sb, size_t freed_ispace)
410 {
411 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
412 	if (sbinfo->max_inodes) {
413 		raw_spin_lock(&sbinfo->stat_lock);
414 		sbinfo->free_ispace += BOGO_INODE_SIZE + freed_ispace;
415 		raw_spin_unlock(&sbinfo->stat_lock);
416 	}
417 }
418 
419 /**
420  * shmem_recalc_inode - recalculate the block usage of an inode
421  * @inode: inode to recalc
422  * @alloced: the change in number of pages allocated to inode
423  * @swapped: the change in number of pages swapped from inode
424  *
425  * We have to calculate the free blocks since the mm can drop
426  * undirtied hole pages behind our back.
427  *
428  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
429  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
430  */
431 static void shmem_recalc_inode(struct inode *inode, long alloced, long swapped)
432 {
433 	struct shmem_inode_info *info = SHMEM_I(inode);
434 	long freed;
435 
436 	spin_lock(&info->lock);
437 	info->alloced += alloced;
438 	info->swapped += swapped;
439 	freed = info->alloced - info->swapped -
440 		READ_ONCE(inode->i_mapping->nrpages);
441 	/*
442 	 * Special case: whereas normally shmem_recalc_inode() is called
443 	 * after i_mapping->nrpages has already been adjusted (up or down),
444 	 * shmem_writepage() has to raise swapped before nrpages is lowered -
445 	 * to stop a racing shmem_recalc_inode() from thinking that a page has
446 	 * been freed.  Compensate here, to avoid the need for a followup call.
447 	 */
448 	if (swapped > 0)
449 		freed += swapped;
450 	if (freed > 0)
451 		info->alloced -= freed;
452 	spin_unlock(&info->lock);
453 
454 	/* The quota case may block */
455 	if (freed > 0)
456 		shmem_inode_unacct_blocks(inode, freed);
457 }
458 
459 bool shmem_charge(struct inode *inode, long pages)
460 {
461 	struct address_space *mapping = inode->i_mapping;
462 
463 	if (shmem_inode_acct_blocks(inode, pages))
464 		return false;
465 
466 	/* nrpages adjustment first, then shmem_recalc_inode() when balanced */
467 	xa_lock_irq(&mapping->i_pages);
468 	mapping->nrpages += pages;
469 	xa_unlock_irq(&mapping->i_pages);
470 
471 	shmem_recalc_inode(inode, pages, 0);
472 	return true;
473 }
474 
475 void shmem_uncharge(struct inode *inode, long pages)
476 {
477 	/* pages argument is currently unused: keep it to help debugging */
478 	/* nrpages adjustment done by __filemap_remove_folio() or caller */
479 
480 	shmem_recalc_inode(inode, 0, 0);
481 }
482 
483 /*
484  * Replace item expected in xarray by a new item, while holding xa_lock.
485  */
486 static int shmem_replace_entry(struct address_space *mapping,
487 			pgoff_t index, void *expected, void *replacement)
488 {
489 	XA_STATE(xas, &mapping->i_pages, index);
490 	void *item;
491 
492 	VM_BUG_ON(!expected);
493 	VM_BUG_ON(!replacement);
494 	item = xas_load(&xas);
495 	if (item != expected)
496 		return -ENOENT;
497 	xas_store(&xas, replacement);
498 	return 0;
499 }
500 
501 /*
502  * Sometimes, before we decide whether to proceed or to fail, we must check
503  * that an entry was not already brought back from swap by a racing thread.
504  *
505  * Checking folio is not enough: by the time a swapcache folio is locked, it
506  * might be reused, and again be swapcache, using the same swap as before.
507  */
508 static bool shmem_confirm_swap(struct address_space *mapping,
509 			       pgoff_t index, swp_entry_t swap)
510 {
511 	return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
512 }
513 
514 /*
515  * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
516  *
517  * SHMEM_HUGE_NEVER:
518  *	disables huge pages for the mount;
519  * SHMEM_HUGE_ALWAYS:
520  *	enables huge pages for the mount;
521  * SHMEM_HUGE_WITHIN_SIZE:
522  *	only allocate huge pages if the page will be fully within i_size,
523  *	also respect fadvise()/madvise() hints;
524  * SHMEM_HUGE_ADVISE:
525  *	only allocate huge pages if requested with fadvise()/madvise();
526  */
527 
528 #define SHMEM_HUGE_NEVER	0
529 #define SHMEM_HUGE_ALWAYS	1
530 #define SHMEM_HUGE_WITHIN_SIZE	2
531 #define SHMEM_HUGE_ADVISE	3
532 
533 /*
534  * Special values.
535  * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
536  *
537  * SHMEM_HUGE_DENY:
538  *	disables huge on shm_mnt and all mounts, for emergency use;
539  * SHMEM_HUGE_FORCE:
540  *	enables huge on shm_mnt and all mounts, w/o needing option, for testing;
541  *
542  */
543 #define SHMEM_HUGE_DENY		(-1)
544 #define SHMEM_HUGE_FORCE	(-2)
545 
546 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
547 /* ifdef here to avoid bloating shmem.o when not necessary */
548 
549 static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
550 
551 static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
552 					loff_t write_end, bool shmem_huge_force,
553 					struct vm_area_struct *vma,
554 					unsigned long vm_flags)
555 {
556 	struct mm_struct *mm = vma ? vma->vm_mm : NULL;
557 	loff_t i_size;
558 
559 	if (!S_ISREG(inode->i_mode))
560 		return false;
561 	if (mm && ((vm_flags & VM_NOHUGEPAGE) || test_bit(MMF_DISABLE_THP, &mm->flags)))
562 		return false;
563 	if (shmem_huge == SHMEM_HUGE_DENY)
564 		return false;
565 	if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
566 		return true;
567 
568 	switch (SHMEM_SB(inode->i_sb)->huge) {
569 	case SHMEM_HUGE_ALWAYS:
570 		return true;
571 	case SHMEM_HUGE_WITHIN_SIZE:
572 		index = round_up(index + 1, HPAGE_PMD_NR);
573 		i_size = max(write_end, i_size_read(inode));
574 		i_size = round_up(i_size, PAGE_SIZE);
575 		if (i_size >> PAGE_SHIFT >= index)
576 			return true;
577 		fallthrough;
578 	case SHMEM_HUGE_ADVISE:
579 		if (mm && (vm_flags & VM_HUGEPAGE))
580 			return true;
581 		fallthrough;
582 	default:
583 		return false;
584 	}
585 }
586 
587 static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
588 		   loff_t write_end, bool shmem_huge_force,
589 		   struct vm_area_struct *vma, unsigned long vm_flags)
590 {
591 	if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
592 		return false;
593 
594 	return __shmem_huge_global_enabled(inode, index, write_end,
595 					   shmem_huge_force, vma, vm_flags);
596 }
597 
598 #if defined(CONFIG_SYSFS)
599 static int shmem_parse_huge(const char *str)
600 {
601 	if (!strcmp(str, "never"))
602 		return SHMEM_HUGE_NEVER;
603 	if (!strcmp(str, "always"))
604 		return SHMEM_HUGE_ALWAYS;
605 	if (!strcmp(str, "within_size"))
606 		return SHMEM_HUGE_WITHIN_SIZE;
607 	if (!strcmp(str, "advise"))
608 		return SHMEM_HUGE_ADVISE;
609 	if (!strcmp(str, "deny"))
610 		return SHMEM_HUGE_DENY;
611 	if (!strcmp(str, "force"))
612 		return SHMEM_HUGE_FORCE;
613 	return -EINVAL;
614 }
615 #endif
616 
617 #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
618 static const char *shmem_format_huge(int huge)
619 {
620 	switch (huge) {
621 	case SHMEM_HUGE_NEVER:
622 		return "never";
623 	case SHMEM_HUGE_ALWAYS:
624 		return "always";
625 	case SHMEM_HUGE_WITHIN_SIZE:
626 		return "within_size";
627 	case SHMEM_HUGE_ADVISE:
628 		return "advise";
629 	case SHMEM_HUGE_DENY:
630 		return "deny";
631 	case SHMEM_HUGE_FORCE:
632 		return "force";
633 	default:
634 		VM_BUG_ON(1);
635 		return "bad_val";
636 	}
637 }
638 #endif
639 
640 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
641 		struct shrink_control *sc, unsigned long nr_to_free)
642 {
643 	LIST_HEAD(list), *pos, *next;
644 	struct inode *inode;
645 	struct shmem_inode_info *info;
646 	struct folio *folio;
647 	unsigned long batch = sc ? sc->nr_to_scan : 128;
648 	unsigned long split = 0, freed = 0;
649 
650 	if (list_empty(&sbinfo->shrinklist))
651 		return SHRINK_STOP;
652 
653 	spin_lock(&sbinfo->shrinklist_lock);
654 	list_for_each_safe(pos, next, &sbinfo->shrinklist) {
655 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
656 
657 		/* pin the inode */
658 		inode = igrab(&info->vfs_inode);
659 
660 		/* inode is about to be evicted */
661 		if (!inode) {
662 			list_del_init(&info->shrinklist);
663 			goto next;
664 		}
665 
666 		list_move(&info->shrinklist, &list);
667 next:
668 		sbinfo->shrinklist_len--;
669 		if (!--batch)
670 			break;
671 	}
672 	spin_unlock(&sbinfo->shrinklist_lock);
673 
674 	list_for_each_safe(pos, next, &list) {
675 		pgoff_t next, end;
676 		loff_t i_size;
677 		int ret;
678 
679 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
680 		inode = &info->vfs_inode;
681 
682 		if (nr_to_free && freed >= nr_to_free)
683 			goto move_back;
684 
685 		i_size = i_size_read(inode);
686 		folio = filemap_get_entry(inode->i_mapping, i_size / PAGE_SIZE);
687 		if (!folio || xa_is_value(folio))
688 			goto drop;
689 
690 		/* No large folio at the end of the file: nothing to split */
691 		if (!folio_test_large(folio)) {
692 			folio_put(folio);
693 			goto drop;
694 		}
695 
696 		/* Check if there is anything to gain from splitting */
697 		next = folio_next_index(folio);
698 		end = shmem_fallocend(inode, DIV_ROUND_UP(i_size, PAGE_SIZE));
699 		if (end <= folio->index || end >= next) {
700 			folio_put(folio);
701 			goto drop;
702 		}
703 
704 		/*
705 		 * Move the inode on the list back to shrinklist if we failed
706 		 * to lock the page at this time.
707 		 *
708 		 * Waiting for the lock may lead to deadlock in the
709 		 * reclaim path.
710 		 */
711 		if (!folio_trylock(folio)) {
712 			folio_put(folio);
713 			goto move_back;
714 		}
715 
716 		ret = split_folio(folio);
717 		folio_unlock(folio);
718 		folio_put(folio);
719 
720 		/* If split failed move the inode on the list back to shrinklist */
721 		if (ret)
722 			goto move_back;
723 
724 		freed += next - end;
725 		split++;
726 drop:
727 		list_del_init(&info->shrinklist);
728 		goto put;
729 move_back:
730 		/*
731 		 * Make sure the inode is either on the global list or deleted
732 		 * from any local list before iput() since it could be deleted
733 		 * in another thread once we put the inode (then the local list
734 		 * is corrupted).
735 		 */
736 		spin_lock(&sbinfo->shrinklist_lock);
737 		list_move(&info->shrinklist, &sbinfo->shrinklist);
738 		sbinfo->shrinklist_len++;
739 		spin_unlock(&sbinfo->shrinklist_lock);
740 put:
741 		iput(inode);
742 	}
743 
744 	return split;
745 }
746 
747 static long shmem_unused_huge_scan(struct super_block *sb,
748 		struct shrink_control *sc)
749 {
750 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
751 
752 	if (!READ_ONCE(sbinfo->shrinklist_len))
753 		return SHRINK_STOP;
754 
755 	return shmem_unused_huge_shrink(sbinfo, sc, 0);
756 }
757 
758 static long shmem_unused_huge_count(struct super_block *sb,
759 		struct shrink_control *sc)
760 {
761 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
762 	return READ_ONCE(sbinfo->shrinklist_len);
763 }
764 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
765 
766 #define shmem_huge SHMEM_HUGE_DENY
767 
768 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
769 		struct shrink_control *sc, unsigned long nr_to_free)
770 {
771 	return 0;
772 }
773 
774 static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
775 		loff_t write_end, bool shmem_huge_force,
776 		struct vm_area_struct *vma, unsigned long vm_flags)
777 {
778 	return false;
779 }
780 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
781 
782 /*
783  * Somewhat like filemap_add_folio, but error if expected item has gone.
784  */
785 static int shmem_add_to_page_cache(struct folio *folio,
786 				   struct address_space *mapping,
787 				   pgoff_t index, void *expected, gfp_t gfp)
788 {
789 	XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
790 	long nr = folio_nr_pages(folio);
791 
792 	VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
793 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
794 	VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
795 
796 	folio_ref_add(folio, nr);
797 	folio->mapping = mapping;
798 	folio->index = index;
799 
800 	gfp &= GFP_RECLAIM_MASK;
801 	folio_throttle_swaprate(folio, gfp);
802 
803 	do {
804 		xas_lock_irq(&xas);
805 		if (expected != xas_find_conflict(&xas)) {
806 			xas_set_err(&xas, -EEXIST);
807 			goto unlock;
808 		}
809 		if (expected && xas_find_conflict(&xas)) {
810 			xas_set_err(&xas, -EEXIST);
811 			goto unlock;
812 		}
813 		xas_store(&xas, folio);
814 		if (xas_error(&xas))
815 			goto unlock;
816 		if (folio_test_pmd_mappable(folio))
817 			__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
818 		__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
819 		__lruvec_stat_mod_folio(folio, NR_SHMEM, nr);
820 		mapping->nrpages += nr;
821 unlock:
822 		xas_unlock_irq(&xas);
823 	} while (xas_nomem(&xas, gfp));
824 
825 	if (xas_error(&xas)) {
826 		folio->mapping = NULL;
827 		folio_ref_sub(folio, nr);
828 		return xas_error(&xas);
829 	}
830 
831 	return 0;
832 }
833 
834 /*
835  * Somewhat like filemap_remove_folio, but substitutes swap for @folio.
836  */
837 static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
838 {
839 	struct address_space *mapping = folio->mapping;
840 	long nr = folio_nr_pages(folio);
841 	int error;
842 
843 	xa_lock_irq(&mapping->i_pages);
844 	error = shmem_replace_entry(mapping, folio->index, folio, radswap);
845 	folio->mapping = NULL;
846 	mapping->nrpages -= nr;
847 	__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
848 	__lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
849 	xa_unlock_irq(&mapping->i_pages);
850 	folio_put_refs(folio, nr);
851 	BUG_ON(error);
852 }
853 
854 /*
855  * Remove swap entry from page cache, free the swap and its page cache. Returns
856  * the number of pages being freed. 0 means entry not found in XArray (0 pages
857  * being freed).
858  */
859 static long shmem_free_swap(struct address_space *mapping,
860 			    pgoff_t index, void *radswap)
861 {
862 	int order = xa_get_order(&mapping->i_pages, index);
863 	void *old;
864 
865 	old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
866 	if (old != radswap)
867 		return 0;
868 	free_swap_and_cache_nr(radix_to_swp_entry(radswap), 1 << order);
869 
870 	return 1 << order;
871 }
872 
873 /*
874  * Determine (in bytes) how many of the shmem object's pages mapped by the
875  * given offsets are swapped out.
876  *
877  * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
878  * as long as the inode doesn't go away and racy results are not a problem.
879  */
880 unsigned long shmem_partial_swap_usage(struct address_space *mapping,
881 						pgoff_t start, pgoff_t end)
882 {
883 	XA_STATE(xas, &mapping->i_pages, start);
884 	struct page *page;
885 	unsigned long swapped = 0;
886 	unsigned long max = end - 1;
887 
888 	rcu_read_lock();
889 	xas_for_each(&xas, page, max) {
890 		if (xas_retry(&xas, page))
891 			continue;
892 		if (xa_is_value(page))
893 			swapped += 1 << xas_get_order(&xas);
894 		if (xas.xa_index == max)
895 			break;
896 		if (need_resched()) {
897 			xas_pause(&xas);
898 			cond_resched_rcu();
899 		}
900 	}
901 	rcu_read_unlock();
902 
903 	return swapped << PAGE_SHIFT;
904 }
905 
906 /*
907  * Determine (in bytes) how many of the shmem object's pages mapped by the
908  * given vma is swapped out.
909  *
910  * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
911  * as long as the inode doesn't go away and racy results are not a problem.
912  */
913 unsigned long shmem_swap_usage(struct vm_area_struct *vma)
914 {
915 	struct inode *inode = file_inode(vma->vm_file);
916 	struct shmem_inode_info *info = SHMEM_I(inode);
917 	struct address_space *mapping = inode->i_mapping;
918 	unsigned long swapped;
919 
920 	/* Be careful as we don't hold info->lock */
921 	swapped = READ_ONCE(info->swapped);
922 
923 	/*
924 	 * The easier cases are when the shmem object has nothing in swap, or
925 	 * the vma maps it whole. Then we can simply use the stats that we
926 	 * already track.
927 	 */
928 	if (!swapped)
929 		return 0;
930 
931 	if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
932 		return swapped << PAGE_SHIFT;
933 
934 	/* Here comes the more involved part */
935 	return shmem_partial_swap_usage(mapping, vma->vm_pgoff,
936 					vma->vm_pgoff + vma_pages(vma));
937 }
938 
939 /*
940  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
941  */
942 void shmem_unlock_mapping(struct address_space *mapping)
943 {
944 	struct folio_batch fbatch;
945 	pgoff_t index = 0;
946 
947 	folio_batch_init(&fbatch);
948 	/*
949 	 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
950 	 */
951 	while (!mapping_unevictable(mapping) &&
952 	       filemap_get_folios(mapping, &index, ~0UL, &fbatch)) {
953 		check_move_unevictable_folios(&fbatch);
954 		folio_batch_release(&fbatch);
955 		cond_resched();
956 	}
957 }
958 
959 static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
960 {
961 	struct folio *folio;
962 
963 	/*
964 	 * At first avoid shmem_get_folio(,,,SGP_READ): that fails
965 	 * beyond i_size, and reports fallocated folios as holes.
966 	 */
967 	folio = filemap_get_entry(inode->i_mapping, index);
968 	if (!folio)
969 		return folio;
970 	if (!xa_is_value(folio)) {
971 		folio_lock(folio);
972 		if (folio->mapping == inode->i_mapping)
973 			return folio;
974 		/* The folio has been swapped out */
975 		folio_unlock(folio);
976 		folio_put(folio);
977 	}
978 	/*
979 	 * But read a folio back from swap if any of it is within i_size
980 	 * (although in some cases this is just a waste of time).
981 	 */
982 	folio = NULL;
983 	shmem_get_folio(inode, index, 0, &folio, SGP_READ);
984 	return folio;
985 }
986 
987 /*
988  * Remove range of pages and swap entries from page cache, and free them.
989  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
990  */
991 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
992 								 bool unfalloc)
993 {
994 	struct address_space *mapping = inode->i_mapping;
995 	struct shmem_inode_info *info = SHMEM_I(inode);
996 	pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
997 	pgoff_t end = (lend + 1) >> PAGE_SHIFT;
998 	struct folio_batch fbatch;
999 	pgoff_t indices[PAGEVEC_SIZE];
1000 	struct folio *folio;
1001 	bool same_folio;
1002 	long nr_swaps_freed = 0;
1003 	pgoff_t index;
1004 	int i;
1005 
1006 	if (lend == -1)
1007 		end = -1;	/* unsigned, so actually very big */
1008 
1009 	if (info->fallocend > start && info->fallocend <= end && !unfalloc)
1010 		info->fallocend = start;
1011 
1012 	folio_batch_init(&fbatch);
1013 	index = start;
1014 	while (index < end && find_lock_entries(mapping, &index, end - 1,
1015 			&fbatch, indices)) {
1016 		for (i = 0; i < folio_batch_count(&fbatch); i++) {
1017 			folio = fbatch.folios[i];
1018 
1019 			if (xa_is_value(folio)) {
1020 				if (unfalloc)
1021 					continue;
1022 				nr_swaps_freed += shmem_free_swap(mapping,
1023 							indices[i], folio);
1024 				continue;
1025 			}
1026 
1027 			if (!unfalloc || !folio_test_uptodate(folio))
1028 				truncate_inode_folio(mapping, folio);
1029 			folio_unlock(folio);
1030 		}
1031 		folio_batch_remove_exceptionals(&fbatch);
1032 		folio_batch_release(&fbatch);
1033 		cond_resched();
1034 	}
1035 
1036 	/*
1037 	 * When undoing a failed fallocate, we want none of the partial folio
1038 	 * zeroing and splitting below, but shall want to truncate the whole
1039 	 * folio when !uptodate indicates that it was added by this fallocate,
1040 	 * even when [lstart, lend] covers only a part of the folio.
1041 	 */
1042 	if (unfalloc)
1043 		goto whole_folios;
1044 
1045 	same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
1046 	folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
1047 	if (folio) {
1048 		same_folio = lend < folio_pos(folio) + folio_size(folio);
1049 		folio_mark_dirty(folio);
1050 		if (!truncate_inode_partial_folio(folio, lstart, lend)) {
1051 			start = folio_next_index(folio);
1052 			if (same_folio)
1053 				end = folio->index;
1054 		}
1055 		folio_unlock(folio);
1056 		folio_put(folio);
1057 		folio = NULL;
1058 	}
1059 
1060 	if (!same_folio)
1061 		folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT);
1062 	if (folio) {
1063 		folio_mark_dirty(folio);
1064 		if (!truncate_inode_partial_folio(folio, lstart, lend))
1065 			end = folio->index;
1066 		folio_unlock(folio);
1067 		folio_put(folio);
1068 	}
1069 
1070 whole_folios:
1071 
1072 	index = start;
1073 	while (index < end) {
1074 		cond_resched();
1075 
1076 		if (!find_get_entries(mapping, &index, end - 1, &fbatch,
1077 				indices)) {
1078 			/* If all gone or hole-punch or unfalloc, we're done */
1079 			if (index == start || end != -1)
1080 				break;
1081 			/* But if truncating, restart to make sure all gone */
1082 			index = start;
1083 			continue;
1084 		}
1085 		for (i = 0; i < folio_batch_count(&fbatch); i++) {
1086 			folio = fbatch.folios[i];
1087 
1088 			if (xa_is_value(folio)) {
1089 				long swaps_freed;
1090 
1091 				if (unfalloc)
1092 					continue;
1093 				swaps_freed = shmem_free_swap(mapping, indices[i], folio);
1094 				if (!swaps_freed) {
1095 					/* Swap was replaced by page: retry */
1096 					index = indices[i];
1097 					break;
1098 				}
1099 				nr_swaps_freed += swaps_freed;
1100 				continue;
1101 			}
1102 
1103 			folio_lock(folio);
1104 
1105 			if (!unfalloc || !folio_test_uptodate(folio)) {
1106 				if (folio_mapping(folio) != mapping) {
1107 					/* Page was replaced by swap: retry */
1108 					folio_unlock(folio);
1109 					index = indices[i];
1110 					break;
1111 				}
1112 				VM_BUG_ON_FOLIO(folio_test_writeback(folio),
1113 						folio);
1114 
1115 				if (!folio_test_large(folio)) {
1116 					truncate_inode_folio(mapping, folio);
1117 				} else if (truncate_inode_partial_folio(folio, lstart, lend)) {
1118 					/*
1119 					 * If we split a page, reset the loop so
1120 					 * that we pick up the new sub pages.
1121 					 * Otherwise the THP was entirely
1122 					 * dropped or the target range was
1123 					 * zeroed, so just continue the loop as
1124 					 * is.
1125 					 */
1126 					if (!folio_test_large(folio)) {
1127 						folio_unlock(folio);
1128 						index = start;
1129 						break;
1130 					}
1131 				}
1132 			}
1133 			folio_unlock(folio);
1134 		}
1135 		folio_batch_remove_exceptionals(&fbatch);
1136 		folio_batch_release(&fbatch);
1137 	}
1138 
1139 	shmem_recalc_inode(inode, 0, -nr_swaps_freed);
1140 }
1141 
1142 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1143 {
1144 	shmem_undo_range(inode, lstart, lend, false);
1145 	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1146 	inode_inc_iversion(inode);
1147 }
1148 EXPORT_SYMBOL_GPL(shmem_truncate_range);
1149 
1150 static int shmem_getattr(struct mnt_idmap *idmap,
1151 			 const struct path *path, struct kstat *stat,
1152 			 u32 request_mask, unsigned int query_flags)
1153 {
1154 	struct inode *inode = path->dentry->d_inode;
1155 	struct shmem_inode_info *info = SHMEM_I(inode);
1156 
1157 	if (info->alloced - info->swapped != inode->i_mapping->nrpages)
1158 		shmem_recalc_inode(inode, 0, 0);
1159 
1160 	if (info->fsflags & FS_APPEND_FL)
1161 		stat->attributes |= STATX_ATTR_APPEND;
1162 	if (info->fsflags & FS_IMMUTABLE_FL)
1163 		stat->attributes |= STATX_ATTR_IMMUTABLE;
1164 	if (info->fsflags & FS_NODUMP_FL)
1165 		stat->attributes |= STATX_ATTR_NODUMP;
1166 	stat->attributes_mask |= (STATX_ATTR_APPEND |
1167 			STATX_ATTR_IMMUTABLE |
1168 			STATX_ATTR_NODUMP);
1169 	generic_fillattr(idmap, request_mask, inode, stat);
1170 
1171 	if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0))
1172 		stat->blksize = HPAGE_PMD_SIZE;
1173 
1174 	if (request_mask & STATX_BTIME) {
1175 		stat->result_mask |= STATX_BTIME;
1176 		stat->btime.tv_sec = info->i_crtime.tv_sec;
1177 		stat->btime.tv_nsec = info->i_crtime.tv_nsec;
1178 	}
1179 
1180 	return 0;
1181 }
1182 
1183 static int shmem_setattr(struct mnt_idmap *idmap,
1184 			 struct dentry *dentry, struct iattr *attr)
1185 {
1186 	struct inode *inode = d_inode(dentry);
1187 	struct shmem_inode_info *info = SHMEM_I(inode);
1188 	int error;
1189 	bool update_mtime = false;
1190 	bool update_ctime = true;
1191 
1192 	error = setattr_prepare(idmap, dentry, attr);
1193 	if (error)
1194 		return error;
1195 
1196 	if ((info->seals & F_SEAL_EXEC) && (attr->ia_valid & ATTR_MODE)) {
1197 		if ((inode->i_mode ^ attr->ia_mode) & 0111) {
1198 			return -EPERM;
1199 		}
1200 	}
1201 
1202 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1203 		loff_t oldsize = inode->i_size;
1204 		loff_t newsize = attr->ia_size;
1205 
1206 		/* protected by i_rwsem */
1207 		if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1208 		    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1209 			return -EPERM;
1210 
1211 		if (newsize != oldsize) {
1212 			error = shmem_reacct_size(SHMEM_I(inode)->flags,
1213 					oldsize, newsize);
1214 			if (error)
1215 				return error;
1216 			i_size_write(inode, newsize);
1217 			update_mtime = true;
1218 		} else {
1219 			update_ctime = false;
1220 		}
1221 		if (newsize <= oldsize) {
1222 			loff_t holebegin = round_up(newsize, PAGE_SIZE);
1223 			if (oldsize > holebegin)
1224 				unmap_mapping_range(inode->i_mapping,
1225 							holebegin, 0, 1);
1226 			if (info->alloced)
1227 				shmem_truncate_range(inode,
1228 							newsize, (loff_t)-1);
1229 			/* unmap again to remove racily COWed private pages */
1230 			if (oldsize > holebegin)
1231 				unmap_mapping_range(inode->i_mapping,
1232 							holebegin, 0, 1);
1233 		}
1234 	}
1235 
1236 	if (is_quota_modification(idmap, inode, attr)) {
1237 		error = dquot_initialize(inode);
1238 		if (error)
1239 			return error;
1240 	}
1241 
1242 	/* Transfer quota accounting */
1243 	if (i_uid_needs_update(idmap, attr, inode) ||
1244 	    i_gid_needs_update(idmap, attr, inode)) {
1245 		error = dquot_transfer(idmap, inode, attr);
1246 		if (error)
1247 			return error;
1248 	}
1249 
1250 	setattr_copy(idmap, inode, attr);
1251 	if (attr->ia_valid & ATTR_MODE)
1252 		error = posix_acl_chmod(idmap, dentry, inode->i_mode);
1253 	if (!error && update_ctime) {
1254 		inode_set_ctime_current(inode);
1255 		if (update_mtime)
1256 			inode_set_mtime_to_ts(inode, inode_get_ctime(inode));
1257 		inode_inc_iversion(inode);
1258 	}
1259 	return error;
1260 }
1261 
1262 static void shmem_evict_inode(struct inode *inode)
1263 {
1264 	struct shmem_inode_info *info = SHMEM_I(inode);
1265 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1266 	size_t freed = 0;
1267 
1268 	if (shmem_mapping(inode->i_mapping)) {
1269 		shmem_unacct_size(info->flags, inode->i_size);
1270 		inode->i_size = 0;
1271 		mapping_set_exiting(inode->i_mapping);
1272 		shmem_truncate_range(inode, 0, (loff_t)-1);
1273 		if (!list_empty(&info->shrinklist)) {
1274 			spin_lock(&sbinfo->shrinklist_lock);
1275 			if (!list_empty(&info->shrinklist)) {
1276 				list_del_init(&info->shrinklist);
1277 				sbinfo->shrinklist_len--;
1278 			}
1279 			spin_unlock(&sbinfo->shrinklist_lock);
1280 		}
1281 		while (!list_empty(&info->swaplist)) {
1282 			/* Wait while shmem_unuse() is scanning this inode... */
1283 			wait_var_event(&info->stop_eviction,
1284 				       !atomic_read(&info->stop_eviction));
1285 			mutex_lock(&shmem_swaplist_mutex);
1286 			/* ...but beware of the race if we peeked too early */
1287 			if (!atomic_read(&info->stop_eviction))
1288 				list_del_init(&info->swaplist);
1289 			mutex_unlock(&shmem_swaplist_mutex);
1290 		}
1291 	}
1292 
1293 	simple_xattrs_free(&info->xattrs, sbinfo->max_inodes ? &freed : NULL);
1294 	shmem_free_inode(inode->i_sb, freed);
1295 	WARN_ON(inode->i_blocks);
1296 	clear_inode(inode);
1297 #ifdef CONFIG_TMPFS_QUOTA
1298 	dquot_free_inode(inode);
1299 	dquot_drop(inode);
1300 #endif
1301 }
1302 
1303 static int shmem_find_swap_entries(struct address_space *mapping,
1304 				   pgoff_t start, struct folio_batch *fbatch,
1305 				   pgoff_t *indices, unsigned int type)
1306 {
1307 	XA_STATE(xas, &mapping->i_pages, start);
1308 	struct folio *folio;
1309 	swp_entry_t entry;
1310 
1311 	rcu_read_lock();
1312 	xas_for_each(&xas, folio, ULONG_MAX) {
1313 		if (xas_retry(&xas, folio))
1314 			continue;
1315 
1316 		if (!xa_is_value(folio))
1317 			continue;
1318 
1319 		entry = radix_to_swp_entry(folio);
1320 		/*
1321 		 * swapin error entries can be found in the mapping. But they're
1322 		 * deliberately ignored here as we've done everything we can do.
1323 		 */
1324 		if (swp_type(entry) != type)
1325 			continue;
1326 
1327 		indices[folio_batch_count(fbatch)] = xas.xa_index;
1328 		if (!folio_batch_add(fbatch, folio))
1329 			break;
1330 
1331 		if (need_resched()) {
1332 			xas_pause(&xas);
1333 			cond_resched_rcu();
1334 		}
1335 	}
1336 	rcu_read_unlock();
1337 
1338 	return xas.xa_index;
1339 }
1340 
1341 /*
1342  * Move the swapped pages for an inode to page cache. Returns the count
1343  * of pages swapped in, or the error in case of failure.
1344  */
1345 static int shmem_unuse_swap_entries(struct inode *inode,
1346 		struct folio_batch *fbatch, pgoff_t *indices)
1347 {
1348 	int i = 0;
1349 	int ret = 0;
1350 	int error = 0;
1351 	struct address_space *mapping = inode->i_mapping;
1352 
1353 	for (i = 0; i < folio_batch_count(fbatch); i++) {
1354 		struct folio *folio = fbatch->folios[i];
1355 
1356 		if (!xa_is_value(folio))
1357 			continue;
1358 		error = shmem_swapin_folio(inode, indices[i], &folio, SGP_CACHE,
1359 					mapping_gfp_mask(mapping), NULL, NULL);
1360 		if (error == 0) {
1361 			folio_unlock(folio);
1362 			folio_put(folio);
1363 			ret++;
1364 		}
1365 		if (error == -ENOMEM)
1366 			break;
1367 		error = 0;
1368 	}
1369 	return error ? error : ret;
1370 }
1371 
1372 /*
1373  * If swap found in inode, free it and move page from swapcache to filecache.
1374  */
1375 static int shmem_unuse_inode(struct inode *inode, unsigned int type)
1376 {
1377 	struct address_space *mapping = inode->i_mapping;
1378 	pgoff_t start = 0;
1379 	struct folio_batch fbatch;
1380 	pgoff_t indices[PAGEVEC_SIZE];
1381 	int ret = 0;
1382 
1383 	do {
1384 		folio_batch_init(&fbatch);
1385 		shmem_find_swap_entries(mapping, start, &fbatch, indices, type);
1386 		if (folio_batch_count(&fbatch) == 0) {
1387 			ret = 0;
1388 			break;
1389 		}
1390 
1391 		ret = shmem_unuse_swap_entries(inode, &fbatch, indices);
1392 		if (ret < 0)
1393 			break;
1394 
1395 		start = indices[folio_batch_count(&fbatch) - 1];
1396 	} while (true);
1397 
1398 	return ret;
1399 }
1400 
1401 /*
1402  * Read all the shared memory data that resides in the swap
1403  * device 'type' back into memory, so the swap device can be
1404  * unused.
1405  */
1406 int shmem_unuse(unsigned int type)
1407 {
1408 	struct shmem_inode_info *info, *next;
1409 	int error = 0;
1410 
1411 	if (list_empty(&shmem_swaplist))
1412 		return 0;
1413 
1414 	mutex_lock(&shmem_swaplist_mutex);
1415 	list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1416 		if (!info->swapped) {
1417 			list_del_init(&info->swaplist);
1418 			continue;
1419 		}
1420 		/*
1421 		 * Drop the swaplist mutex while searching the inode for swap;
1422 		 * but before doing so, make sure shmem_evict_inode() will not
1423 		 * remove placeholder inode from swaplist, nor let it be freed
1424 		 * (igrab() would protect from unlink, but not from unmount).
1425 		 */
1426 		atomic_inc(&info->stop_eviction);
1427 		mutex_unlock(&shmem_swaplist_mutex);
1428 
1429 		error = shmem_unuse_inode(&info->vfs_inode, type);
1430 		cond_resched();
1431 
1432 		mutex_lock(&shmem_swaplist_mutex);
1433 		next = list_next_entry(info, swaplist);
1434 		if (!info->swapped)
1435 			list_del_init(&info->swaplist);
1436 		if (atomic_dec_and_test(&info->stop_eviction))
1437 			wake_up_var(&info->stop_eviction);
1438 		if (error)
1439 			break;
1440 	}
1441 	mutex_unlock(&shmem_swaplist_mutex);
1442 
1443 	return error;
1444 }
1445 
1446 /*
1447  * Move the page from the page cache to the swap cache.
1448  */
1449 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1450 {
1451 	struct folio *folio = page_folio(page);
1452 	struct address_space *mapping = folio->mapping;
1453 	struct inode *inode = mapping->host;
1454 	struct shmem_inode_info *info = SHMEM_I(inode);
1455 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1456 	swp_entry_t swap;
1457 	pgoff_t index;
1458 	int nr_pages;
1459 	bool split = false;
1460 
1461 	/*
1462 	 * Our capabilities prevent regular writeback or sync from ever calling
1463 	 * shmem_writepage; but a stacking filesystem might use ->writepage of
1464 	 * its underlying filesystem, in which case tmpfs should write out to
1465 	 * swap only in response to memory pressure, and not for the writeback
1466 	 * threads or sync.
1467 	 */
1468 	if (WARN_ON_ONCE(!wbc->for_reclaim))
1469 		goto redirty;
1470 
1471 	if (WARN_ON_ONCE((info->flags & VM_LOCKED) || sbinfo->noswap))
1472 		goto redirty;
1473 
1474 	if (!total_swap_pages)
1475 		goto redirty;
1476 
1477 	/*
1478 	 * If CONFIG_THP_SWAP is not enabled, the large folio should be
1479 	 * split when swapping.
1480 	 *
1481 	 * And shrinkage of pages beyond i_size does not split swap, so
1482 	 * swapout of a large folio crossing i_size needs to split too
1483 	 * (unless fallocate has been used to preallocate beyond EOF).
1484 	 */
1485 	if (folio_test_large(folio)) {
1486 		index = shmem_fallocend(inode,
1487 			DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE));
1488 		if ((index > folio->index && index < folio_next_index(folio)) ||
1489 		    !IS_ENABLED(CONFIG_THP_SWAP))
1490 			split = true;
1491 	}
1492 
1493 	if (split) {
1494 try_split:
1495 		/* Ensure the subpages are still dirty */
1496 		folio_test_set_dirty(folio);
1497 		if (split_huge_page_to_list_to_order(page, wbc->list, 0))
1498 			goto redirty;
1499 		folio = page_folio(page);
1500 		folio_clear_dirty(folio);
1501 	}
1502 
1503 	index = folio->index;
1504 	nr_pages = folio_nr_pages(folio);
1505 
1506 	/*
1507 	 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1508 	 * value into swapfile.c, the only way we can correctly account for a
1509 	 * fallocated folio arriving here is now to initialize it and write it.
1510 	 *
1511 	 * That's okay for a folio already fallocated earlier, but if we have
1512 	 * not yet completed the fallocation, then (a) we want to keep track
1513 	 * of this folio in case we have to undo it, and (b) it may not be a
1514 	 * good idea to continue anyway, once we're pushing into swap.  So
1515 	 * reactivate the folio, and let shmem_fallocate() quit when too many.
1516 	 */
1517 	if (!folio_test_uptodate(folio)) {
1518 		if (inode->i_private) {
1519 			struct shmem_falloc *shmem_falloc;
1520 			spin_lock(&inode->i_lock);
1521 			shmem_falloc = inode->i_private;
1522 			if (shmem_falloc &&
1523 			    !shmem_falloc->waitq &&
1524 			    index >= shmem_falloc->start &&
1525 			    index < shmem_falloc->next)
1526 				shmem_falloc->nr_unswapped++;
1527 			else
1528 				shmem_falloc = NULL;
1529 			spin_unlock(&inode->i_lock);
1530 			if (shmem_falloc)
1531 				goto redirty;
1532 		}
1533 		folio_zero_range(folio, 0, folio_size(folio));
1534 		flush_dcache_folio(folio);
1535 		folio_mark_uptodate(folio);
1536 	}
1537 
1538 	swap = folio_alloc_swap(folio);
1539 	if (!swap.val) {
1540 		if (nr_pages > 1)
1541 			goto try_split;
1542 
1543 		goto redirty;
1544 	}
1545 
1546 	/*
1547 	 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1548 	 * if it's not already there.  Do it now before the folio is
1549 	 * moved to swap cache, when its pagelock no longer protects
1550 	 * the inode from eviction.  But don't unlock the mutex until
1551 	 * we've incremented swapped, because shmem_unuse_inode() will
1552 	 * prune a !swapped inode from the swaplist under this mutex.
1553 	 */
1554 	mutex_lock(&shmem_swaplist_mutex);
1555 	if (list_empty(&info->swaplist))
1556 		list_add(&info->swaplist, &shmem_swaplist);
1557 
1558 	if (add_to_swap_cache(folio, swap,
1559 			__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
1560 			NULL) == 0) {
1561 		shmem_recalc_inode(inode, 0, nr_pages);
1562 		swap_shmem_alloc(swap, nr_pages);
1563 		shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap));
1564 
1565 		mutex_unlock(&shmem_swaplist_mutex);
1566 		BUG_ON(folio_mapped(folio));
1567 		return swap_writepage(&folio->page, wbc);
1568 	}
1569 
1570 	mutex_unlock(&shmem_swaplist_mutex);
1571 	put_swap_folio(folio, swap);
1572 redirty:
1573 	folio_mark_dirty(folio);
1574 	if (wbc->for_reclaim)
1575 		return AOP_WRITEPAGE_ACTIVATE;	/* Return with folio locked */
1576 	folio_unlock(folio);
1577 	return 0;
1578 }
1579 
1580 #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
1581 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1582 {
1583 	char buffer[64];
1584 
1585 	if (!mpol || mpol->mode == MPOL_DEFAULT)
1586 		return;		/* show nothing */
1587 
1588 	mpol_to_str(buffer, sizeof(buffer), mpol);
1589 
1590 	seq_printf(seq, ",mpol=%s", buffer);
1591 }
1592 
1593 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1594 {
1595 	struct mempolicy *mpol = NULL;
1596 	if (sbinfo->mpol) {
1597 		raw_spin_lock(&sbinfo->stat_lock);	/* prevent replace/use races */
1598 		mpol = sbinfo->mpol;
1599 		mpol_get(mpol);
1600 		raw_spin_unlock(&sbinfo->stat_lock);
1601 	}
1602 	return mpol;
1603 }
1604 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
1605 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1606 {
1607 }
1608 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1609 {
1610 	return NULL;
1611 }
1612 #endif /* CONFIG_NUMA && CONFIG_TMPFS */
1613 
1614 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
1615 			pgoff_t index, unsigned int order, pgoff_t *ilx);
1616 
1617 static struct folio *shmem_swapin_cluster(swp_entry_t swap, gfp_t gfp,
1618 			struct shmem_inode_info *info, pgoff_t index)
1619 {
1620 	struct mempolicy *mpol;
1621 	pgoff_t ilx;
1622 	struct folio *folio;
1623 
1624 	mpol = shmem_get_pgoff_policy(info, index, 0, &ilx);
1625 	folio = swap_cluster_readahead(swap, gfp, mpol, ilx);
1626 	mpol_cond_put(mpol);
1627 
1628 	return folio;
1629 }
1630 
1631 /*
1632  * Make sure huge_gfp is always more limited than limit_gfp.
1633  * Some of the flags set permissions, while others set limitations.
1634  */
1635 static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
1636 {
1637 	gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
1638 	gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
1639 	gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
1640 	gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
1641 
1642 	/* Allow allocations only from the originally specified zones. */
1643 	result |= zoneflags;
1644 
1645 	/*
1646 	 * Minimize the result gfp by taking the union with the deny flags,
1647 	 * and the intersection of the allow flags.
1648 	 */
1649 	result |= (limit_gfp & denyflags);
1650 	result |= (huge_gfp & limit_gfp) & allowflags;
1651 
1652 	return result;
1653 }
1654 
1655 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1656 unsigned long shmem_allowable_huge_orders(struct inode *inode,
1657 				struct vm_area_struct *vma, pgoff_t index,
1658 				loff_t write_end, bool shmem_huge_force)
1659 {
1660 	unsigned long mask = READ_ONCE(huge_shmem_orders_always);
1661 	unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
1662 	unsigned long vm_flags = vma ? vma->vm_flags : 0;
1663 	bool global_huge;
1664 	loff_t i_size;
1665 	int order;
1666 
1667 	if (thp_disabled_by_hw() || (vma && vma_thp_disabled(vma, vm_flags)))
1668 		return 0;
1669 
1670 	global_huge = shmem_huge_global_enabled(inode, index, write_end,
1671 					shmem_huge_force, vma, vm_flags);
1672 	if (!vma || !vma_is_anon_shmem(vma)) {
1673 		/*
1674 		 * For tmpfs, we now only support PMD sized THP if huge page
1675 		 * is enabled, otherwise fallback to order 0.
1676 		 */
1677 		return global_huge ? BIT(HPAGE_PMD_ORDER) : 0;
1678 	}
1679 
1680 	/*
1681 	 * Following the 'deny' semantics of the top level, force the huge
1682 	 * option off from all mounts.
1683 	 */
1684 	if (shmem_huge == SHMEM_HUGE_DENY)
1685 		return 0;
1686 
1687 	/*
1688 	 * Only allow inherit orders if the top-level value is 'force', which
1689 	 * means non-PMD sized THP can not override 'huge' mount option now.
1690 	 */
1691 	if (shmem_huge == SHMEM_HUGE_FORCE)
1692 		return READ_ONCE(huge_shmem_orders_inherit);
1693 
1694 	/* Allow mTHP that will be fully within i_size. */
1695 	order = highest_order(within_size_orders);
1696 	while (within_size_orders) {
1697 		index = round_up(index + 1, order);
1698 		i_size = round_up(i_size_read(inode), PAGE_SIZE);
1699 		if (i_size >> PAGE_SHIFT >= index) {
1700 			mask |= within_size_orders;
1701 			break;
1702 		}
1703 
1704 		order = next_order(&within_size_orders, order);
1705 	}
1706 
1707 	if (vm_flags & VM_HUGEPAGE)
1708 		mask |= READ_ONCE(huge_shmem_orders_madvise);
1709 
1710 	if (global_huge)
1711 		mask |= READ_ONCE(huge_shmem_orders_inherit);
1712 
1713 	return THP_ORDERS_ALL_FILE_DEFAULT & mask;
1714 }
1715 
1716 static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
1717 					   struct address_space *mapping, pgoff_t index,
1718 					   unsigned long orders)
1719 {
1720 	struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
1721 	pgoff_t aligned_index;
1722 	unsigned long pages;
1723 	int order;
1724 
1725 	if (vma) {
1726 		orders = thp_vma_suitable_orders(vma, vmf->address, orders);
1727 		if (!orders)
1728 			return 0;
1729 	}
1730 
1731 	/* Find the highest order that can add into the page cache */
1732 	order = highest_order(orders);
1733 	while (orders) {
1734 		pages = 1UL << order;
1735 		aligned_index = round_down(index, pages);
1736 		/*
1737 		 * Check for conflict before waiting on a huge allocation.
1738 		 * Conflict might be that a huge page has just been allocated
1739 		 * and added to page cache by a racing thread, or that there
1740 		 * is already at least one small page in the huge extent.
1741 		 * Be careful to retry when appropriate, but not forever!
1742 		 * Elsewhere -EEXIST would be the right code, but not here.
1743 		 */
1744 		if (!xa_find(&mapping->i_pages, &aligned_index,
1745 			     aligned_index + pages - 1, XA_PRESENT))
1746 			break;
1747 		order = next_order(&orders, order);
1748 	}
1749 
1750 	return orders;
1751 }
1752 #else
1753 static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
1754 					   struct address_space *mapping, pgoff_t index,
1755 					   unsigned long orders)
1756 {
1757 	return 0;
1758 }
1759 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1760 
1761 static struct folio *shmem_alloc_folio(gfp_t gfp, int order,
1762 		struct shmem_inode_info *info, pgoff_t index)
1763 {
1764 	struct mempolicy *mpol;
1765 	pgoff_t ilx;
1766 	struct folio *folio;
1767 
1768 	mpol = shmem_get_pgoff_policy(info, index, order, &ilx);
1769 	folio = folio_alloc_mpol(gfp, order, mpol, ilx, numa_node_id());
1770 	mpol_cond_put(mpol);
1771 
1772 	return folio;
1773 }
1774 
1775 static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
1776 		gfp_t gfp, struct inode *inode, pgoff_t index,
1777 		struct mm_struct *fault_mm, unsigned long orders)
1778 {
1779 	struct address_space *mapping = inode->i_mapping;
1780 	struct shmem_inode_info *info = SHMEM_I(inode);
1781 	unsigned long suitable_orders = 0;
1782 	struct folio *folio = NULL;
1783 	long pages;
1784 	int error, order;
1785 
1786 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1787 		orders = 0;
1788 
1789 	if (orders > 0) {
1790 		suitable_orders = shmem_suitable_orders(inode, vmf,
1791 							mapping, index, orders);
1792 
1793 		order = highest_order(suitable_orders);
1794 		while (suitable_orders) {
1795 			pages = 1UL << order;
1796 			index = round_down(index, pages);
1797 			folio = shmem_alloc_folio(gfp, order, info, index);
1798 			if (folio)
1799 				goto allocated;
1800 
1801 			if (pages == HPAGE_PMD_NR)
1802 				count_vm_event(THP_FILE_FALLBACK);
1803 			count_mthp_stat(order, MTHP_STAT_SHMEM_FALLBACK);
1804 			order = next_order(&suitable_orders, order);
1805 		}
1806 	} else {
1807 		pages = 1;
1808 		folio = shmem_alloc_folio(gfp, 0, info, index);
1809 	}
1810 	if (!folio)
1811 		return ERR_PTR(-ENOMEM);
1812 
1813 allocated:
1814 	__folio_set_locked(folio);
1815 	__folio_set_swapbacked(folio);
1816 
1817 	gfp &= GFP_RECLAIM_MASK;
1818 	error = mem_cgroup_charge(folio, fault_mm, gfp);
1819 	if (error) {
1820 		if (xa_find(&mapping->i_pages, &index,
1821 				index + pages - 1, XA_PRESENT)) {
1822 			error = -EEXIST;
1823 		} else if (pages > 1) {
1824 			if (pages == HPAGE_PMD_NR) {
1825 				count_vm_event(THP_FILE_FALLBACK);
1826 				count_vm_event(THP_FILE_FALLBACK_CHARGE);
1827 			}
1828 			count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK);
1829 			count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE);
1830 		}
1831 		goto unlock;
1832 	}
1833 
1834 	error = shmem_add_to_page_cache(folio, mapping, index, NULL, gfp);
1835 	if (error)
1836 		goto unlock;
1837 
1838 	error = shmem_inode_acct_blocks(inode, pages);
1839 	if (error) {
1840 		struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1841 		long freed;
1842 		/*
1843 		 * Try to reclaim some space by splitting a few
1844 		 * large folios beyond i_size on the filesystem.
1845 		 */
1846 		shmem_unused_huge_shrink(sbinfo, NULL, pages);
1847 		/*
1848 		 * And do a shmem_recalc_inode() to account for freed pages:
1849 		 * except our folio is there in cache, so not quite balanced.
1850 		 */
1851 		spin_lock(&info->lock);
1852 		freed = pages + info->alloced - info->swapped -
1853 			READ_ONCE(mapping->nrpages);
1854 		if (freed > 0)
1855 			info->alloced -= freed;
1856 		spin_unlock(&info->lock);
1857 		if (freed > 0)
1858 			shmem_inode_unacct_blocks(inode, freed);
1859 		error = shmem_inode_acct_blocks(inode, pages);
1860 		if (error) {
1861 			filemap_remove_folio(folio);
1862 			goto unlock;
1863 		}
1864 	}
1865 
1866 	shmem_recalc_inode(inode, pages, 0);
1867 	folio_add_lru(folio);
1868 	return folio;
1869 
1870 unlock:
1871 	folio_unlock(folio);
1872 	folio_put(folio);
1873 	return ERR_PTR(error);
1874 }
1875 
1876 /*
1877  * When a page is moved from swapcache to shmem filecache (either by the
1878  * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of
1879  * shmem_unuse_inode()), it may have been read in earlier from swap, in
1880  * ignorance of the mapping it belongs to.  If that mapping has special
1881  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1882  * we may need to copy to a suitable page before moving to filecache.
1883  *
1884  * In a future release, this may well be extended to respect cpuset and
1885  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1886  * but for now it is a simple matter of zone.
1887  */
1888 static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp)
1889 {
1890 	return folio_zonenum(folio) > gfp_zone(gfp);
1891 }
1892 
1893 static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
1894 				struct shmem_inode_info *info, pgoff_t index,
1895 				struct vm_area_struct *vma)
1896 {
1897 	struct folio *new, *old = *foliop;
1898 	swp_entry_t entry = old->swap;
1899 	struct address_space *swap_mapping = swap_address_space(entry);
1900 	pgoff_t swap_index = swap_cache_index(entry);
1901 	XA_STATE(xas, &swap_mapping->i_pages, swap_index);
1902 	int nr_pages = folio_nr_pages(old);
1903 	int error = 0, i;
1904 
1905 	/*
1906 	 * We have arrived here because our zones are constrained, so don't
1907 	 * limit chance of success by further cpuset and node constraints.
1908 	 */
1909 	gfp &= ~GFP_CONSTRAINT_MASK;
1910 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1911 	if (nr_pages > 1) {
1912 		gfp_t huge_gfp = vma_thp_gfp_mask(vma);
1913 
1914 		gfp = limit_gfp_mask(huge_gfp, gfp);
1915 	}
1916 #endif
1917 
1918 	new = shmem_alloc_folio(gfp, folio_order(old), info, index);
1919 	if (!new)
1920 		return -ENOMEM;
1921 
1922 	folio_ref_add(new, nr_pages);
1923 	folio_copy(new, old);
1924 	flush_dcache_folio(new);
1925 
1926 	__folio_set_locked(new);
1927 	__folio_set_swapbacked(new);
1928 	folio_mark_uptodate(new);
1929 	new->swap = entry;
1930 	folio_set_swapcache(new);
1931 
1932 	/* Swap cache still stores N entries instead of a high-order entry */
1933 	xa_lock_irq(&swap_mapping->i_pages);
1934 	for (i = 0; i < nr_pages; i++) {
1935 		void *item = xas_load(&xas);
1936 
1937 		if (item != old) {
1938 			error = -ENOENT;
1939 			break;
1940 		}
1941 
1942 		xas_store(&xas, new);
1943 		xas_next(&xas);
1944 	}
1945 	if (!error) {
1946 		mem_cgroup_replace_folio(old, new);
1947 		__lruvec_stat_mod_folio(new, NR_FILE_PAGES, nr_pages);
1948 		__lruvec_stat_mod_folio(new, NR_SHMEM, nr_pages);
1949 		__lruvec_stat_mod_folio(old, NR_FILE_PAGES, -nr_pages);
1950 		__lruvec_stat_mod_folio(old, NR_SHMEM, -nr_pages);
1951 	}
1952 	xa_unlock_irq(&swap_mapping->i_pages);
1953 
1954 	if (unlikely(error)) {
1955 		/*
1956 		 * Is this possible?  I think not, now that our callers
1957 		 * check both the swapcache flag and folio->private
1958 		 * after getting the folio lock; but be defensive.
1959 		 * Reverse old to newpage for clear and free.
1960 		 */
1961 		old = new;
1962 	} else {
1963 		folio_add_lru(new);
1964 		*foliop = new;
1965 	}
1966 
1967 	folio_clear_swapcache(old);
1968 	old->private = NULL;
1969 
1970 	folio_unlock(old);
1971 	/*
1972 	 * The old folio are removed from swap cache, drop the 'nr_pages'
1973 	 * reference, as well as one temporary reference getting from swap
1974 	 * cache.
1975 	 */
1976 	folio_put_refs(old, nr_pages + 1);
1977 	return error;
1978 }
1979 
1980 static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
1981 					 struct folio *folio, swp_entry_t swap)
1982 {
1983 	struct address_space *mapping = inode->i_mapping;
1984 	swp_entry_t swapin_error;
1985 	void *old;
1986 	int nr_pages;
1987 
1988 	swapin_error = make_poisoned_swp_entry();
1989 	old = xa_cmpxchg_irq(&mapping->i_pages, index,
1990 			     swp_to_radix_entry(swap),
1991 			     swp_to_radix_entry(swapin_error), 0);
1992 	if (old != swp_to_radix_entry(swap))
1993 		return;
1994 
1995 	nr_pages = folio_nr_pages(folio);
1996 	folio_wait_writeback(folio);
1997 	delete_from_swap_cache(folio);
1998 	/*
1999 	 * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
2000 	 * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
2001 	 * in shmem_evict_inode().
2002 	 */
2003 	shmem_recalc_inode(inode, -nr_pages, -nr_pages);
2004 	swap_free_nr(swap, nr_pages);
2005 }
2006 
2007 static int shmem_split_large_entry(struct inode *inode, pgoff_t index,
2008 				   swp_entry_t swap, gfp_t gfp)
2009 {
2010 	struct address_space *mapping = inode->i_mapping;
2011 	XA_STATE_ORDER(xas, &mapping->i_pages, index, 0);
2012 	void *alloced_shadow = NULL;
2013 	int alloced_order = 0, i;
2014 
2015 	/* Convert user data gfp flags to xarray node gfp flags */
2016 	gfp &= GFP_RECLAIM_MASK;
2017 
2018 	for (;;) {
2019 		int order = -1, split_order = 0;
2020 		void *old = NULL;
2021 
2022 		xas_lock_irq(&xas);
2023 		old = xas_load(&xas);
2024 		if (!xa_is_value(old) || swp_to_radix_entry(swap) != old) {
2025 			xas_set_err(&xas, -EEXIST);
2026 			goto unlock;
2027 		}
2028 
2029 		order = xas_get_order(&xas);
2030 
2031 		/* Swap entry may have changed before we re-acquire the lock */
2032 		if (alloced_order &&
2033 		    (old != alloced_shadow || order != alloced_order)) {
2034 			xas_destroy(&xas);
2035 			alloced_order = 0;
2036 		}
2037 
2038 		/* Try to split large swap entry in pagecache */
2039 		if (order > 0) {
2040 			if (!alloced_order) {
2041 				split_order = order;
2042 				goto unlock;
2043 			}
2044 			xas_split(&xas, old, order);
2045 
2046 			/*
2047 			 * Re-set the swap entry after splitting, and the swap
2048 			 * offset of the original large entry must be continuous.
2049 			 */
2050 			for (i = 0; i < 1 << order; i++) {
2051 				pgoff_t aligned_index = round_down(index, 1 << order);
2052 				swp_entry_t tmp;
2053 
2054 				tmp = swp_entry(swp_type(swap), swp_offset(swap) + i);
2055 				__xa_store(&mapping->i_pages, aligned_index + i,
2056 					   swp_to_radix_entry(tmp), 0);
2057 			}
2058 		}
2059 
2060 unlock:
2061 		xas_unlock_irq(&xas);
2062 
2063 		/* split needed, alloc here and retry. */
2064 		if (split_order) {
2065 			xas_split_alloc(&xas, old, split_order, gfp);
2066 			if (xas_error(&xas))
2067 				goto error;
2068 			alloced_shadow = old;
2069 			alloced_order = split_order;
2070 			xas_reset(&xas);
2071 			continue;
2072 		}
2073 
2074 		if (!xas_nomem(&xas, gfp))
2075 			break;
2076 	}
2077 
2078 error:
2079 	if (xas_error(&xas))
2080 		return xas_error(&xas);
2081 
2082 	return alloced_order;
2083 }
2084 
2085 /*
2086  * Swap in the folio pointed to by *foliop.
2087  * Caller has to make sure that *foliop contains a valid swapped folio.
2088  * Returns 0 and the folio in foliop if success. On failure, returns the
2089  * error code and NULL in *foliop.
2090  */
2091 static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
2092 			     struct folio **foliop, enum sgp_type sgp,
2093 			     gfp_t gfp, struct vm_area_struct *vma,
2094 			     vm_fault_t *fault_type)
2095 {
2096 	struct address_space *mapping = inode->i_mapping;
2097 	struct mm_struct *fault_mm = vma ? vma->vm_mm : NULL;
2098 	struct shmem_inode_info *info = SHMEM_I(inode);
2099 	struct swap_info_struct *si;
2100 	struct folio *folio = NULL;
2101 	swp_entry_t swap;
2102 	int error, nr_pages;
2103 
2104 	VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
2105 	swap = radix_to_swp_entry(*foliop);
2106 	*foliop = NULL;
2107 
2108 	if (is_poisoned_swp_entry(swap))
2109 		return -EIO;
2110 
2111 	si = get_swap_device(swap);
2112 	if (!si) {
2113 		if (!shmem_confirm_swap(mapping, index, swap))
2114 			return -EEXIST;
2115 		else
2116 			return -EINVAL;
2117 	}
2118 
2119 	/* Look it up and read it in.. */
2120 	folio = swap_cache_get_folio(swap, NULL, 0);
2121 	if (!folio) {
2122 		int split_order;
2123 
2124 		/* Or update major stats only when swapin succeeds?? */
2125 		if (fault_type) {
2126 			*fault_type |= VM_FAULT_MAJOR;
2127 			count_vm_event(PGMAJFAULT);
2128 			count_memcg_event_mm(fault_mm, PGMAJFAULT);
2129 		}
2130 
2131 		/*
2132 		 * Now swap device can only swap in order 0 folio, then we
2133 		 * should split the large swap entry stored in the pagecache
2134 		 * if necessary.
2135 		 */
2136 		split_order = shmem_split_large_entry(inode, index, swap, gfp);
2137 		if (split_order < 0) {
2138 			error = split_order;
2139 			goto failed;
2140 		}
2141 
2142 		/*
2143 		 * If the large swap entry has already been split, it is
2144 		 * necessary to recalculate the new swap entry based on
2145 		 * the old order alignment.
2146 		 */
2147 		if (split_order > 0) {
2148 			pgoff_t offset = index - round_down(index, 1 << split_order);
2149 
2150 			swap = swp_entry(swp_type(swap), swp_offset(swap) + offset);
2151 		}
2152 
2153 		/* Here we actually start the io */
2154 		folio = shmem_swapin_cluster(swap, gfp, info, index);
2155 		if (!folio) {
2156 			error = -ENOMEM;
2157 			goto failed;
2158 		}
2159 	}
2160 
2161 	/* We have to do this with folio locked to prevent races */
2162 	folio_lock(folio);
2163 	if (!folio_test_swapcache(folio) ||
2164 	    folio->swap.val != swap.val ||
2165 	    !shmem_confirm_swap(mapping, index, swap)) {
2166 		error = -EEXIST;
2167 		goto unlock;
2168 	}
2169 	if (!folio_test_uptodate(folio)) {
2170 		error = -EIO;
2171 		goto failed;
2172 	}
2173 	folio_wait_writeback(folio);
2174 	nr_pages = folio_nr_pages(folio);
2175 
2176 	/*
2177 	 * Some architectures may have to restore extra metadata to the
2178 	 * folio after reading from swap.
2179 	 */
2180 	arch_swap_restore(folio_swap(swap, folio), folio);
2181 
2182 	if (shmem_should_replace_folio(folio, gfp)) {
2183 		error = shmem_replace_folio(&folio, gfp, info, index, vma);
2184 		if (error)
2185 			goto failed;
2186 	}
2187 
2188 	error = shmem_add_to_page_cache(folio, mapping,
2189 					round_down(index, nr_pages),
2190 					swp_to_radix_entry(swap), gfp);
2191 	if (error)
2192 		goto failed;
2193 
2194 	shmem_recalc_inode(inode, 0, -nr_pages);
2195 
2196 	if (sgp == SGP_WRITE)
2197 		folio_mark_accessed(folio);
2198 
2199 	delete_from_swap_cache(folio);
2200 	folio_mark_dirty(folio);
2201 	swap_free_nr(swap, nr_pages);
2202 	put_swap_device(si);
2203 
2204 	*foliop = folio;
2205 	return 0;
2206 failed:
2207 	if (!shmem_confirm_swap(mapping, index, swap))
2208 		error = -EEXIST;
2209 	if (error == -EIO)
2210 		shmem_set_folio_swapin_error(inode, index, folio, swap);
2211 unlock:
2212 	if (folio) {
2213 		folio_unlock(folio);
2214 		folio_put(folio);
2215 	}
2216 	put_swap_device(si);
2217 
2218 	return error;
2219 }
2220 
2221 /*
2222  * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate
2223  *
2224  * If we allocate a new one we do not mark it dirty. That's up to the
2225  * vm. If we swap it in we mark it dirty since we also free the swap
2226  * entry since a page cannot live in both the swap and page cache.
2227  *
2228  * vmf and fault_type are only supplied by shmem_fault: otherwise they are NULL.
2229  */
2230 static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
2231 		loff_t write_end, struct folio **foliop, enum sgp_type sgp,
2232 		gfp_t gfp, struct vm_fault *vmf, vm_fault_t *fault_type)
2233 {
2234 	struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
2235 	struct mm_struct *fault_mm;
2236 	struct folio *folio;
2237 	int error;
2238 	bool alloced;
2239 	unsigned long orders = 0;
2240 
2241 	if (WARN_ON_ONCE(!shmem_mapping(inode->i_mapping)))
2242 		return -EINVAL;
2243 
2244 	if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
2245 		return -EFBIG;
2246 repeat:
2247 	if (sgp <= SGP_CACHE &&
2248 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode))
2249 		return -EINVAL;
2250 
2251 	alloced = false;
2252 	fault_mm = vma ? vma->vm_mm : NULL;
2253 
2254 	folio = filemap_get_entry(inode->i_mapping, index);
2255 	if (folio && vma && userfaultfd_minor(vma)) {
2256 		if (!xa_is_value(folio))
2257 			folio_put(folio);
2258 		*fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
2259 		return 0;
2260 	}
2261 
2262 	if (xa_is_value(folio)) {
2263 		error = shmem_swapin_folio(inode, index, &folio,
2264 					   sgp, gfp, vma, fault_type);
2265 		if (error == -EEXIST)
2266 			goto repeat;
2267 
2268 		*foliop = folio;
2269 		return error;
2270 	}
2271 
2272 	if (folio) {
2273 		folio_lock(folio);
2274 
2275 		/* Has the folio been truncated or swapped out? */
2276 		if (unlikely(folio->mapping != inode->i_mapping)) {
2277 			folio_unlock(folio);
2278 			folio_put(folio);
2279 			goto repeat;
2280 		}
2281 		if (sgp == SGP_WRITE)
2282 			folio_mark_accessed(folio);
2283 		if (folio_test_uptodate(folio))
2284 			goto out;
2285 		/* fallocated folio */
2286 		if (sgp != SGP_READ)
2287 			goto clear;
2288 		folio_unlock(folio);
2289 		folio_put(folio);
2290 	}
2291 
2292 	/*
2293 	 * SGP_READ: succeed on hole, with NULL folio, letting caller zero.
2294 	 * SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail.
2295 	 */
2296 	*foliop = NULL;
2297 	if (sgp == SGP_READ)
2298 		return 0;
2299 	if (sgp == SGP_NOALLOC)
2300 		return -ENOENT;
2301 
2302 	/*
2303 	 * Fast cache lookup and swap lookup did not find it: allocate.
2304 	 */
2305 
2306 	if (vma && userfaultfd_missing(vma)) {
2307 		*fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
2308 		return 0;
2309 	}
2310 
2311 	/* Find hugepage orders that are allowed for anonymous shmem and tmpfs. */
2312 	orders = shmem_allowable_huge_orders(inode, vma, index, write_end, false);
2313 	if (orders > 0) {
2314 		gfp_t huge_gfp;
2315 
2316 		huge_gfp = vma_thp_gfp_mask(vma);
2317 		huge_gfp = limit_gfp_mask(huge_gfp, gfp);
2318 		folio = shmem_alloc_and_add_folio(vmf, huge_gfp,
2319 				inode, index, fault_mm, orders);
2320 		if (!IS_ERR(folio)) {
2321 			if (folio_test_pmd_mappable(folio))
2322 				count_vm_event(THP_FILE_ALLOC);
2323 			count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC);
2324 			goto alloced;
2325 		}
2326 		if (PTR_ERR(folio) == -EEXIST)
2327 			goto repeat;
2328 	}
2329 
2330 	folio = shmem_alloc_and_add_folio(vmf, gfp, inode, index, fault_mm, 0);
2331 	if (IS_ERR(folio)) {
2332 		error = PTR_ERR(folio);
2333 		if (error == -EEXIST)
2334 			goto repeat;
2335 		folio = NULL;
2336 		goto unlock;
2337 	}
2338 
2339 alloced:
2340 	alloced = true;
2341 	if (folio_test_large(folio) &&
2342 	    DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
2343 					folio_next_index(folio)) {
2344 		struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2345 		struct shmem_inode_info *info = SHMEM_I(inode);
2346 		/*
2347 		 * Part of the large folio is beyond i_size: subject
2348 		 * to shrink under memory pressure.
2349 		 */
2350 		spin_lock(&sbinfo->shrinklist_lock);
2351 		/*
2352 		 * _careful to defend against unlocked access to
2353 		 * ->shrink_list in shmem_unused_huge_shrink()
2354 		 */
2355 		if (list_empty_careful(&info->shrinklist)) {
2356 			list_add_tail(&info->shrinklist,
2357 				      &sbinfo->shrinklist);
2358 			sbinfo->shrinklist_len++;
2359 		}
2360 		spin_unlock(&sbinfo->shrinklist_lock);
2361 	}
2362 
2363 	if (sgp == SGP_WRITE)
2364 		folio_set_referenced(folio);
2365 	/*
2366 	 * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio.
2367 	 */
2368 	if (sgp == SGP_FALLOC)
2369 		sgp = SGP_WRITE;
2370 clear:
2371 	/*
2372 	 * Let SGP_WRITE caller clear ends if write does not fill folio;
2373 	 * but SGP_FALLOC on a folio fallocated earlier must initialize
2374 	 * it now, lest undo on failure cancel our earlier guarantee.
2375 	 */
2376 	if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
2377 		long i, n = folio_nr_pages(folio);
2378 
2379 		for (i = 0; i < n; i++)
2380 			clear_highpage(folio_page(folio, i));
2381 		flush_dcache_folio(folio);
2382 		folio_mark_uptodate(folio);
2383 	}
2384 
2385 	/* Perhaps the file has been truncated since we checked */
2386 	if (sgp <= SGP_CACHE &&
2387 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
2388 		error = -EINVAL;
2389 		goto unlock;
2390 	}
2391 out:
2392 	*foliop = folio;
2393 	return 0;
2394 
2395 	/*
2396 	 * Error recovery.
2397 	 */
2398 unlock:
2399 	if (alloced)
2400 		filemap_remove_folio(folio);
2401 	shmem_recalc_inode(inode, 0, 0);
2402 	if (folio) {
2403 		folio_unlock(folio);
2404 		folio_put(folio);
2405 	}
2406 	return error;
2407 }
2408 
2409 /**
2410  * shmem_get_folio - find, and lock a shmem folio.
2411  * @inode:	inode to search
2412  * @index:	the page index.
2413  * @write_end:	end of a write, could extend inode size
2414  * @foliop:	pointer to the folio if found
2415  * @sgp:	SGP_* flags to control behavior
2416  *
2417  * Looks up the page cache entry at @inode & @index.  If a folio is
2418  * present, it is returned locked with an increased refcount.
2419  *
2420  * If the caller modifies data in the folio, it must call folio_mark_dirty()
2421  * before unlocking the folio to ensure that the folio is not reclaimed.
2422  * There is no need to reserve space before calling folio_mark_dirty().
2423  *
2424  * When no folio is found, the behavior depends on @sgp:
2425  *  - for SGP_READ, *@foliop is %NULL and 0 is returned
2426  *  - for SGP_NOALLOC, *@foliop is %NULL and -ENOENT is returned
2427  *  - for all other flags a new folio is allocated, inserted into the
2428  *    page cache and returned locked in @foliop.
2429  *
2430  * Context: May sleep.
2431  * Return: 0 if successful, else a negative error code.
2432  */
2433 int shmem_get_folio(struct inode *inode, pgoff_t index, loff_t write_end,
2434 		    struct folio **foliop, enum sgp_type sgp)
2435 {
2436 	return shmem_get_folio_gfp(inode, index, write_end, foliop, sgp,
2437 			mapping_gfp_mask(inode->i_mapping), NULL, NULL);
2438 }
2439 EXPORT_SYMBOL_GPL(shmem_get_folio);
2440 
2441 /*
2442  * This is like autoremove_wake_function, but it removes the wait queue
2443  * entry unconditionally - even if something else had already woken the
2444  * target.
2445  */
2446 static int synchronous_wake_function(wait_queue_entry_t *wait,
2447 			unsigned int mode, int sync, void *key)
2448 {
2449 	int ret = default_wake_function(wait, mode, sync, key);
2450 	list_del_init(&wait->entry);
2451 	return ret;
2452 }
2453 
2454 /*
2455  * Trinity finds that probing a hole which tmpfs is punching can
2456  * prevent the hole-punch from ever completing: which in turn
2457  * locks writers out with its hold on i_rwsem.  So refrain from
2458  * faulting pages into the hole while it's being punched.  Although
2459  * shmem_undo_range() does remove the additions, it may be unable to
2460  * keep up, as each new page needs its own unmap_mapping_range() call,
2461  * and the i_mmap tree grows ever slower to scan if new vmas are added.
2462  *
2463  * It does not matter if we sometimes reach this check just before the
2464  * hole-punch begins, so that one fault then races with the punch:
2465  * we just need to make racing faults a rare case.
2466  *
2467  * The implementation below would be much simpler if we just used a
2468  * standard mutex or completion: but we cannot take i_rwsem in fault,
2469  * and bloating every shmem inode for this unlikely case would be sad.
2470  */
2471 static vm_fault_t shmem_falloc_wait(struct vm_fault *vmf, struct inode *inode)
2472 {
2473 	struct shmem_falloc *shmem_falloc;
2474 	struct file *fpin = NULL;
2475 	vm_fault_t ret = 0;
2476 
2477 	spin_lock(&inode->i_lock);
2478 	shmem_falloc = inode->i_private;
2479 	if (shmem_falloc &&
2480 	    shmem_falloc->waitq &&
2481 	    vmf->pgoff >= shmem_falloc->start &&
2482 	    vmf->pgoff < shmem_falloc->next) {
2483 		wait_queue_head_t *shmem_falloc_waitq;
2484 		DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
2485 
2486 		ret = VM_FAULT_NOPAGE;
2487 		fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2488 		shmem_falloc_waitq = shmem_falloc->waitq;
2489 		prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2490 				TASK_UNINTERRUPTIBLE);
2491 		spin_unlock(&inode->i_lock);
2492 		schedule();
2493 
2494 		/*
2495 		 * shmem_falloc_waitq points into the shmem_fallocate()
2496 		 * stack of the hole-punching task: shmem_falloc_waitq
2497 		 * is usually invalid by the time we reach here, but
2498 		 * finish_wait() does not dereference it in that case;
2499 		 * though i_lock needed lest racing with wake_up_all().
2500 		 */
2501 		spin_lock(&inode->i_lock);
2502 		finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2503 	}
2504 	spin_unlock(&inode->i_lock);
2505 	if (fpin) {
2506 		fput(fpin);
2507 		ret = VM_FAULT_RETRY;
2508 	}
2509 	return ret;
2510 }
2511 
2512 static vm_fault_t shmem_fault(struct vm_fault *vmf)
2513 {
2514 	struct inode *inode = file_inode(vmf->vma->vm_file);
2515 	gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
2516 	struct folio *folio = NULL;
2517 	vm_fault_t ret = 0;
2518 	int err;
2519 
2520 	/*
2521 	 * Trinity finds that probing a hole which tmpfs is punching can
2522 	 * prevent the hole-punch from ever completing: noted in i_private.
2523 	 */
2524 	if (unlikely(inode->i_private)) {
2525 		ret = shmem_falloc_wait(vmf, inode);
2526 		if (ret)
2527 			return ret;
2528 	}
2529 
2530 	WARN_ON_ONCE(vmf->page != NULL);
2531 	err = shmem_get_folio_gfp(inode, vmf->pgoff, 0, &folio, SGP_CACHE,
2532 				  gfp, vmf, &ret);
2533 	if (err)
2534 		return vmf_error(err);
2535 	if (folio) {
2536 		vmf->page = folio_file_page(folio, vmf->pgoff);
2537 		ret |= VM_FAULT_LOCKED;
2538 	}
2539 	return ret;
2540 }
2541 
2542 unsigned long shmem_get_unmapped_area(struct file *file,
2543 				      unsigned long uaddr, unsigned long len,
2544 				      unsigned long pgoff, unsigned long flags)
2545 {
2546 	unsigned long addr;
2547 	unsigned long offset;
2548 	unsigned long inflated_len;
2549 	unsigned long inflated_addr;
2550 	unsigned long inflated_offset;
2551 	unsigned long hpage_size;
2552 
2553 	if (len > TASK_SIZE)
2554 		return -ENOMEM;
2555 
2556 	addr = mm_get_unmapped_area(current->mm, file, uaddr, len, pgoff,
2557 				    flags);
2558 
2559 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2560 		return addr;
2561 	if (IS_ERR_VALUE(addr))
2562 		return addr;
2563 	if (addr & ~PAGE_MASK)
2564 		return addr;
2565 	if (addr > TASK_SIZE - len)
2566 		return addr;
2567 
2568 	if (shmem_huge == SHMEM_HUGE_DENY)
2569 		return addr;
2570 	if (flags & MAP_FIXED)
2571 		return addr;
2572 	/*
2573 	 * Our priority is to support MAP_SHARED mapped hugely;
2574 	 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2575 	 * But if caller specified an address hint and we allocated area there
2576 	 * successfully, respect that as before.
2577 	 */
2578 	if (uaddr == addr)
2579 		return addr;
2580 
2581 	hpage_size = HPAGE_PMD_SIZE;
2582 	if (shmem_huge != SHMEM_HUGE_FORCE) {
2583 		struct super_block *sb;
2584 		unsigned long __maybe_unused hpage_orders;
2585 		int order = 0;
2586 
2587 		if (file) {
2588 			VM_BUG_ON(file->f_op != &shmem_file_operations);
2589 			sb = file_inode(file)->i_sb;
2590 		} else {
2591 			/*
2592 			 * Called directly from mm/mmap.c, or drivers/char/mem.c
2593 			 * for "/dev/zero", to create a shared anonymous object.
2594 			 */
2595 			if (IS_ERR(shm_mnt))
2596 				return addr;
2597 			sb = shm_mnt->mnt_sb;
2598 
2599 			/*
2600 			 * Find the highest mTHP order used for anonymous shmem to
2601 			 * provide a suitable alignment address.
2602 			 */
2603 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2604 			hpage_orders = READ_ONCE(huge_shmem_orders_always);
2605 			hpage_orders |= READ_ONCE(huge_shmem_orders_within_size);
2606 			hpage_orders |= READ_ONCE(huge_shmem_orders_madvise);
2607 			if (SHMEM_SB(sb)->huge != SHMEM_HUGE_NEVER)
2608 				hpage_orders |= READ_ONCE(huge_shmem_orders_inherit);
2609 
2610 			if (hpage_orders > 0) {
2611 				order = highest_order(hpage_orders);
2612 				hpage_size = PAGE_SIZE << order;
2613 			}
2614 #endif
2615 		}
2616 		if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER && !order)
2617 			return addr;
2618 	}
2619 
2620 	if (len < hpage_size)
2621 		return addr;
2622 
2623 	offset = (pgoff << PAGE_SHIFT) & (hpage_size - 1);
2624 	if (offset && offset + len < 2 * hpage_size)
2625 		return addr;
2626 	if ((addr & (hpage_size - 1)) == offset)
2627 		return addr;
2628 
2629 	inflated_len = len + hpage_size - PAGE_SIZE;
2630 	if (inflated_len > TASK_SIZE)
2631 		return addr;
2632 	if (inflated_len < len)
2633 		return addr;
2634 
2635 	inflated_addr = mm_get_unmapped_area(current->mm, NULL, uaddr,
2636 					     inflated_len, 0, flags);
2637 	if (IS_ERR_VALUE(inflated_addr))
2638 		return addr;
2639 	if (inflated_addr & ~PAGE_MASK)
2640 		return addr;
2641 
2642 	inflated_offset = inflated_addr & (hpage_size - 1);
2643 	inflated_addr += offset - inflated_offset;
2644 	if (inflated_offset > offset)
2645 		inflated_addr += hpage_size;
2646 
2647 	if (inflated_addr > TASK_SIZE - len)
2648 		return addr;
2649 	return inflated_addr;
2650 }
2651 
2652 #ifdef CONFIG_NUMA
2653 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2654 {
2655 	struct inode *inode = file_inode(vma->vm_file);
2656 	return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2657 }
2658 
2659 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2660 					  unsigned long addr, pgoff_t *ilx)
2661 {
2662 	struct inode *inode = file_inode(vma->vm_file);
2663 	pgoff_t index;
2664 
2665 	/*
2666 	 * Bias interleave by inode number to distribute better across nodes;
2667 	 * but this interface is independent of which page order is used, so
2668 	 * supplies only that bias, letting caller apply the offset (adjusted
2669 	 * by page order, as in shmem_get_pgoff_policy() and get_vma_policy()).
2670 	 */
2671 	*ilx = inode->i_ino;
2672 	index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2673 	return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2674 }
2675 
2676 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
2677 			pgoff_t index, unsigned int order, pgoff_t *ilx)
2678 {
2679 	struct mempolicy *mpol;
2680 
2681 	/* Bias interleave by inode number to distribute better across nodes */
2682 	*ilx = info->vfs_inode.i_ino + (index >> order);
2683 
2684 	mpol = mpol_shared_policy_lookup(&info->policy, index);
2685 	return mpol ? mpol : get_task_policy(current);
2686 }
2687 #else
2688 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
2689 			pgoff_t index, unsigned int order, pgoff_t *ilx)
2690 {
2691 	*ilx = 0;
2692 	return NULL;
2693 }
2694 #endif /* CONFIG_NUMA */
2695 
2696 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
2697 {
2698 	struct inode *inode = file_inode(file);
2699 	struct shmem_inode_info *info = SHMEM_I(inode);
2700 	int retval = -ENOMEM;
2701 
2702 	/*
2703 	 * What serializes the accesses to info->flags?
2704 	 * ipc_lock_object() when called from shmctl_do_lock(),
2705 	 * no serialization needed when called from shm_destroy().
2706 	 */
2707 	if (lock && !(info->flags & VM_LOCKED)) {
2708 		if (!user_shm_lock(inode->i_size, ucounts))
2709 			goto out_nomem;
2710 		info->flags |= VM_LOCKED;
2711 		mapping_set_unevictable(file->f_mapping);
2712 	}
2713 	if (!lock && (info->flags & VM_LOCKED) && ucounts) {
2714 		user_shm_unlock(inode->i_size, ucounts);
2715 		info->flags &= ~VM_LOCKED;
2716 		mapping_clear_unevictable(file->f_mapping);
2717 	}
2718 	retval = 0;
2719 
2720 out_nomem:
2721 	return retval;
2722 }
2723 
2724 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2725 {
2726 	struct inode *inode = file_inode(file);
2727 	struct shmem_inode_info *info = SHMEM_I(inode);
2728 	int ret;
2729 
2730 	ret = seal_check_write(info->seals, vma);
2731 	if (ret)
2732 		return ret;
2733 
2734 	/* arm64 - allow memory tagging on RAM-based files */
2735 	vm_flags_set(vma, VM_MTE_ALLOWED);
2736 
2737 	file_accessed(file);
2738 	/* This is anonymous shared memory if it is unlinked at the time of mmap */
2739 	if (inode->i_nlink)
2740 		vma->vm_ops = &shmem_vm_ops;
2741 	else
2742 		vma->vm_ops = &shmem_anon_vm_ops;
2743 	return 0;
2744 }
2745 
2746 static int shmem_file_open(struct inode *inode, struct file *file)
2747 {
2748 	file->f_mode |= FMODE_CAN_ODIRECT;
2749 	return generic_file_open(inode, file);
2750 }
2751 
2752 #ifdef CONFIG_TMPFS_XATTR
2753 static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2754 
2755 /*
2756  * chattr's fsflags are unrelated to extended attributes,
2757  * but tmpfs has chosen to enable them under the same config option.
2758  */
2759 static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
2760 {
2761 	unsigned int i_flags = 0;
2762 
2763 	if (fsflags & FS_NOATIME_FL)
2764 		i_flags |= S_NOATIME;
2765 	if (fsflags & FS_APPEND_FL)
2766 		i_flags |= S_APPEND;
2767 	if (fsflags & FS_IMMUTABLE_FL)
2768 		i_flags |= S_IMMUTABLE;
2769 	/*
2770 	 * But FS_NODUMP_FL does not require any action in i_flags.
2771 	 */
2772 	inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE);
2773 }
2774 #else
2775 static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
2776 {
2777 }
2778 #define shmem_initxattrs NULL
2779 #endif
2780 
2781 static struct offset_ctx *shmem_get_offset_ctx(struct inode *inode)
2782 {
2783 	return &SHMEM_I(inode)->dir_offsets;
2784 }
2785 
2786 static struct inode *__shmem_get_inode(struct mnt_idmap *idmap,
2787 					     struct super_block *sb,
2788 					     struct inode *dir, umode_t mode,
2789 					     dev_t dev, unsigned long flags)
2790 {
2791 	struct inode *inode;
2792 	struct shmem_inode_info *info;
2793 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2794 	ino_t ino;
2795 	int err;
2796 
2797 	err = shmem_reserve_inode(sb, &ino);
2798 	if (err)
2799 		return ERR_PTR(err);
2800 
2801 	inode = new_inode(sb);
2802 	if (!inode) {
2803 		shmem_free_inode(sb, 0);
2804 		return ERR_PTR(-ENOSPC);
2805 	}
2806 
2807 	inode->i_ino = ino;
2808 	inode_init_owner(idmap, inode, dir, mode);
2809 	inode->i_blocks = 0;
2810 	simple_inode_init_ts(inode);
2811 	inode->i_generation = get_random_u32();
2812 	info = SHMEM_I(inode);
2813 	memset(info, 0, (char *)inode - (char *)info);
2814 	spin_lock_init(&info->lock);
2815 	atomic_set(&info->stop_eviction, 0);
2816 	info->seals = F_SEAL_SEAL;
2817 	info->flags = flags & VM_NORESERVE;
2818 	info->i_crtime = inode_get_mtime(inode);
2819 	info->fsflags = (dir == NULL) ? 0 :
2820 		SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
2821 	if (info->fsflags)
2822 		shmem_set_inode_flags(inode, info->fsflags);
2823 	INIT_LIST_HEAD(&info->shrinklist);
2824 	INIT_LIST_HEAD(&info->swaplist);
2825 	simple_xattrs_init(&info->xattrs);
2826 	cache_no_acl(inode);
2827 	if (sbinfo->noswap)
2828 		mapping_set_unevictable(inode->i_mapping);
2829 	mapping_set_large_folios(inode->i_mapping);
2830 
2831 	switch (mode & S_IFMT) {
2832 	default:
2833 		inode->i_op = &shmem_special_inode_operations;
2834 		init_special_inode(inode, mode, dev);
2835 		break;
2836 	case S_IFREG:
2837 		inode->i_mapping->a_ops = &shmem_aops;
2838 		inode->i_op = &shmem_inode_operations;
2839 		inode->i_fop = &shmem_file_operations;
2840 		mpol_shared_policy_init(&info->policy,
2841 					 shmem_get_sbmpol(sbinfo));
2842 		break;
2843 	case S_IFDIR:
2844 		inc_nlink(inode);
2845 		/* Some things misbehave if size == 0 on a directory */
2846 		inode->i_size = 2 * BOGO_DIRENT_SIZE;
2847 		inode->i_op = &shmem_dir_inode_operations;
2848 		inode->i_fop = &simple_offset_dir_operations;
2849 		simple_offset_init(shmem_get_offset_ctx(inode));
2850 		break;
2851 	case S_IFLNK:
2852 		/*
2853 		 * Must not load anything in the rbtree,
2854 		 * mpol_free_shared_policy will not be called.
2855 		 */
2856 		mpol_shared_policy_init(&info->policy, NULL);
2857 		break;
2858 	}
2859 
2860 	lockdep_annotate_inode_mutex_key(inode);
2861 	return inode;
2862 }
2863 
2864 #ifdef CONFIG_TMPFS_QUOTA
2865 static struct inode *shmem_get_inode(struct mnt_idmap *idmap,
2866 				     struct super_block *sb, struct inode *dir,
2867 				     umode_t mode, dev_t dev, unsigned long flags)
2868 {
2869 	int err;
2870 	struct inode *inode;
2871 
2872 	inode = __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
2873 	if (IS_ERR(inode))
2874 		return inode;
2875 
2876 	err = dquot_initialize(inode);
2877 	if (err)
2878 		goto errout;
2879 
2880 	err = dquot_alloc_inode(inode);
2881 	if (err) {
2882 		dquot_drop(inode);
2883 		goto errout;
2884 	}
2885 	return inode;
2886 
2887 errout:
2888 	inode->i_flags |= S_NOQUOTA;
2889 	iput(inode);
2890 	return ERR_PTR(err);
2891 }
2892 #else
2893 static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
2894 				     struct super_block *sb, struct inode *dir,
2895 				     umode_t mode, dev_t dev, unsigned long flags)
2896 {
2897 	return __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
2898 }
2899 #endif /* CONFIG_TMPFS_QUOTA */
2900 
2901 #ifdef CONFIG_USERFAULTFD
2902 int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
2903 			   struct vm_area_struct *dst_vma,
2904 			   unsigned long dst_addr,
2905 			   unsigned long src_addr,
2906 			   uffd_flags_t flags,
2907 			   struct folio **foliop)
2908 {
2909 	struct inode *inode = file_inode(dst_vma->vm_file);
2910 	struct shmem_inode_info *info = SHMEM_I(inode);
2911 	struct address_space *mapping = inode->i_mapping;
2912 	gfp_t gfp = mapping_gfp_mask(mapping);
2913 	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
2914 	void *page_kaddr;
2915 	struct folio *folio;
2916 	int ret;
2917 	pgoff_t max_off;
2918 
2919 	if (shmem_inode_acct_blocks(inode, 1)) {
2920 		/*
2921 		 * We may have got a page, returned -ENOENT triggering a retry,
2922 		 * and now we find ourselves with -ENOMEM. Release the page, to
2923 		 * avoid a BUG_ON in our caller.
2924 		 */
2925 		if (unlikely(*foliop)) {
2926 			folio_put(*foliop);
2927 			*foliop = NULL;
2928 		}
2929 		return -ENOMEM;
2930 	}
2931 
2932 	if (!*foliop) {
2933 		ret = -ENOMEM;
2934 		folio = shmem_alloc_folio(gfp, 0, info, pgoff);
2935 		if (!folio)
2936 			goto out_unacct_blocks;
2937 
2938 		if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
2939 			page_kaddr = kmap_local_folio(folio, 0);
2940 			/*
2941 			 * The read mmap_lock is held here.  Despite the
2942 			 * mmap_lock being read recursive a deadlock is still
2943 			 * possible if a writer has taken a lock.  For example:
2944 			 *
2945 			 * process A thread 1 takes read lock on own mmap_lock
2946 			 * process A thread 2 calls mmap, blocks taking write lock
2947 			 * process B thread 1 takes page fault, read lock on own mmap lock
2948 			 * process B thread 2 calls mmap, blocks taking write lock
2949 			 * process A thread 1 blocks taking read lock on process B
2950 			 * process B thread 1 blocks taking read lock on process A
2951 			 *
2952 			 * Disable page faults to prevent potential deadlock
2953 			 * and retry the copy outside the mmap_lock.
2954 			 */
2955 			pagefault_disable();
2956 			ret = copy_from_user(page_kaddr,
2957 					     (const void __user *)src_addr,
2958 					     PAGE_SIZE);
2959 			pagefault_enable();
2960 			kunmap_local(page_kaddr);
2961 
2962 			/* fallback to copy_from_user outside mmap_lock */
2963 			if (unlikely(ret)) {
2964 				*foliop = folio;
2965 				ret = -ENOENT;
2966 				/* don't free the page */
2967 				goto out_unacct_blocks;
2968 			}
2969 
2970 			flush_dcache_folio(folio);
2971 		} else {		/* ZEROPAGE */
2972 			clear_user_highpage(&folio->page, dst_addr);
2973 		}
2974 	} else {
2975 		folio = *foliop;
2976 		VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
2977 		*foliop = NULL;
2978 	}
2979 
2980 	VM_BUG_ON(folio_test_locked(folio));
2981 	VM_BUG_ON(folio_test_swapbacked(folio));
2982 	__folio_set_locked(folio);
2983 	__folio_set_swapbacked(folio);
2984 	__folio_mark_uptodate(folio);
2985 
2986 	ret = -EFAULT;
2987 	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2988 	if (unlikely(pgoff >= max_off))
2989 		goto out_release;
2990 
2991 	ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp);
2992 	if (ret)
2993 		goto out_release;
2994 	ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp);
2995 	if (ret)
2996 		goto out_release;
2997 
2998 	ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
2999 				       &folio->page, true, flags);
3000 	if (ret)
3001 		goto out_delete_from_cache;
3002 
3003 	shmem_recalc_inode(inode, 1, 0);
3004 	folio_unlock(folio);
3005 	return 0;
3006 out_delete_from_cache:
3007 	filemap_remove_folio(folio);
3008 out_release:
3009 	folio_unlock(folio);
3010 	folio_put(folio);
3011 out_unacct_blocks:
3012 	shmem_inode_unacct_blocks(inode, 1);
3013 	return ret;
3014 }
3015 #endif /* CONFIG_USERFAULTFD */
3016 
3017 #ifdef CONFIG_TMPFS
3018 static const struct inode_operations shmem_symlink_inode_operations;
3019 static const struct inode_operations shmem_short_symlink_operations;
3020 
3021 static int
3022 shmem_write_begin(struct file *file, struct address_space *mapping,
3023 			loff_t pos, unsigned len,
3024 			struct folio **foliop, void **fsdata)
3025 {
3026 	struct inode *inode = mapping->host;
3027 	struct shmem_inode_info *info = SHMEM_I(inode);
3028 	pgoff_t index = pos >> PAGE_SHIFT;
3029 	struct folio *folio;
3030 	int ret = 0;
3031 
3032 	/* i_rwsem is held by caller */
3033 	if (unlikely(info->seals & (F_SEAL_GROW |
3034 				   F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
3035 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
3036 			return -EPERM;
3037 		if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
3038 			return -EPERM;
3039 	}
3040 
3041 	ret = shmem_get_folio(inode, index, pos + len, &folio, SGP_WRITE);
3042 	if (ret)
3043 		return ret;
3044 
3045 	if (folio_test_hwpoison(folio) ||
3046 	    (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) {
3047 		folio_unlock(folio);
3048 		folio_put(folio);
3049 		return -EIO;
3050 	}
3051 
3052 	*foliop = folio;
3053 	return 0;
3054 }
3055 
3056 static int
3057 shmem_write_end(struct file *file, struct address_space *mapping,
3058 			loff_t pos, unsigned len, unsigned copied,
3059 			struct folio *folio, void *fsdata)
3060 {
3061 	struct inode *inode = mapping->host;
3062 
3063 	if (pos + copied > inode->i_size)
3064 		i_size_write(inode, pos + copied);
3065 
3066 	if (!folio_test_uptodate(folio)) {
3067 		if (copied < folio_size(folio)) {
3068 			size_t from = offset_in_folio(folio, pos);
3069 			folio_zero_segments(folio, 0, from,
3070 					from + copied, folio_size(folio));
3071 		}
3072 		folio_mark_uptodate(folio);
3073 	}
3074 	folio_mark_dirty(folio);
3075 	folio_unlock(folio);
3076 	folio_put(folio);
3077 
3078 	return copied;
3079 }
3080 
3081 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
3082 {
3083 	struct file *file = iocb->ki_filp;
3084 	struct inode *inode = file_inode(file);
3085 	struct address_space *mapping = inode->i_mapping;
3086 	pgoff_t index;
3087 	unsigned long offset;
3088 	int error = 0;
3089 	ssize_t retval = 0;
3090 	loff_t *ppos = &iocb->ki_pos;
3091 
3092 	index = *ppos >> PAGE_SHIFT;
3093 	offset = *ppos & ~PAGE_MASK;
3094 
3095 	for (;;) {
3096 		struct folio *folio = NULL;
3097 		struct page *page = NULL;
3098 		pgoff_t end_index;
3099 		unsigned long nr, ret;
3100 		loff_t i_size = i_size_read(inode);
3101 
3102 		end_index = i_size >> PAGE_SHIFT;
3103 		if (index > end_index)
3104 			break;
3105 		if (index == end_index) {
3106 			nr = i_size & ~PAGE_MASK;
3107 			if (nr <= offset)
3108 				break;
3109 		}
3110 
3111 		error = shmem_get_folio(inode, index, 0, &folio, SGP_READ);
3112 		if (error) {
3113 			if (error == -EINVAL)
3114 				error = 0;
3115 			break;
3116 		}
3117 		if (folio) {
3118 			folio_unlock(folio);
3119 
3120 			page = folio_file_page(folio, index);
3121 			if (PageHWPoison(page)) {
3122 				folio_put(folio);
3123 				error = -EIO;
3124 				break;
3125 			}
3126 		}
3127 
3128 		/*
3129 		 * We must evaluate after, since reads (unlike writes)
3130 		 * are called without i_rwsem protection against truncate
3131 		 */
3132 		nr = PAGE_SIZE;
3133 		i_size = i_size_read(inode);
3134 		end_index = i_size >> PAGE_SHIFT;
3135 		if (index == end_index) {
3136 			nr = i_size & ~PAGE_MASK;
3137 			if (nr <= offset) {
3138 				if (folio)
3139 					folio_put(folio);
3140 				break;
3141 			}
3142 		}
3143 		nr -= offset;
3144 
3145 		if (folio) {
3146 			/*
3147 			 * If users can be writing to this page using arbitrary
3148 			 * virtual addresses, take care about potential aliasing
3149 			 * before reading the page on the kernel side.
3150 			 */
3151 			if (mapping_writably_mapped(mapping))
3152 				flush_dcache_page(page);
3153 			/*
3154 			 * Mark the page accessed if we read the beginning.
3155 			 */
3156 			if (!offset)
3157 				folio_mark_accessed(folio);
3158 			/*
3159 			 * Ok, we have the page, and it's up-to-date, so
3160 			 * now we can copy it to user space...
3161 			 */
3162 			ret = copy_page_to_iter(page, offset, nr, to);
3163 			folio_put(folio);
3164 
3165 		} else if (user_backed_iter(to)) {
3166 			/*
3167 			 * Copy to user tends to be so well optimized, but
3168 			 * clear_user() not so much, that it is noticeably
3169 			 * faster to copy the zero page instead of clearing.
3170 			 */
3171 			ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to);
3172 		} else {
3173 			/*
3174 			 * But submitting the same page twice in a row to
3175 			 * splice() - or others? - can result in confusion:
3176 			 * so don't attempt that optimization on pipes etc.
3177 			 */
3178 			ret = iov_iter_zero(nr, to);
3179 		}
3180 
3181 		retval += ret;
3182 		offset += ret;
3183 		index += offset >> PAGE_SHIFT;
3184 		offset &= ~PAGE_MASK;
3185 
3186 		if (!iov_iter_count(to))
3187 			break;
3188 		if (ret < nr) {
3189 			error = -EFAULT;
3190 			break;
3191 		}
3192 		cond_resched();
3193 	}
3194 
3195 	*ppos = ((loff_t) index << PAGE_SHIFT) + offset;
3196 	file_accessed(file);
3197 	return retval ? retval : error;
3198 }
3199 
3200 static ssize_t shmem_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3201 {
3202 	struct file *file = iocb->ki_filp;
3203 	struct inode *inode = file->f_mapping->host;
3204 	ssize_t ret;
3205 
3206 	inode_lock(inode);
3207 	ret = generic_write_checks(iocb, from);
3208 	if (ret <= 0)
3209 		goto unlock;
3210 	ret = file_remove_privs(file);
3211 	if (ret)
3212 		goto unlock;
3213 	ret = file_update_time(file);
3214 	if (ret)
3215 		goto unlock;
3216 	ret = generic_perform_write(iocb, from);
3217 unlock:
3218 	inode_unlock(inode);
3219 	return ret;
3220 }
3221 
3222 static bool zero_pipe_buf_get(struct pipe_inode_info *pipe,
3223 			      struct pipe_buffer *buf)
3224 {
3225 	return true;
3226 }
3227 
3228 static void zero_pipe_buf_release(struct pipe_inode_info *pipe,
3229 				  struct pipe_buffer *buf)
3230 {
3231 }
3232 
3233 static bool zero_pipe_buf_try_steal(struct pipe_inode_info *pipe,
3234 				    struct pipe_buffer *buf)
3235 {
3236 	return false;
3237 }
3238 
3239 static const struct pipe_buf_operations zero_pipe_buf_ops = {
3240 	.release	= zero_pipe_buf_release,
3241 	.try_steal	= zero_pipe_buf_try_steal,
3242 	.get		= zero_pipe_buf_get,
3243 };
3244 
3245 static size_t splice_zeropage_into_pipe(struct pipe_inode_info *pipe,
3246 					loff_t fpos, size_t size)
3247 {
3248 	size_t offset = fpos & ~PAGE_MASK;
3249 
3250 	size = min_t(size_t, size, PAGE_SIZE - offset);
3251 
3252 	if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) {
3253 		struct pipe_buffer *buf = pipe_head_buf(pipe);
3254 
3255 		*buf = (struct pipe_buffer) {
3256 			.ops	= &zero_pipe_buf_ops,
3257 			.page	= ZERO_PAGE(0),
3258 			.offset	= offset,
3259 			.len	= size,
3260 		};
3261 		pipe->head++;
3262 	}
3263 
3264 	return size;
3265 }
3266 
3267 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
3268 				      struct pipe_inode_info *pipe,
3269 				      size_t len, unsigned int flags)
3270 {
3271 	struct inode *inode = file_inode(in);
3272 	struct address_space *mapping = inode->i_mapping;
3273 	struct folio *folio = NULL;
3274 	size_t total_spliced = 0, used, npages, n, part;
3275 	loff_t isize;
3276 	int error = 0;
3277 
3278 	/* Work out how much data we can actually add into the pipe */
3279 	used = pipe_occupancy(pipe->head, pipe->tail);
3280 	npages = max_t(ssize_t, pipe->max_usage - used, 0);
3281 	len = min_t(size_t, len, npages * PAGE_SIZE);
3282 
3283 	do {
3284 		if (*ppos >= i_size_read(inode))
3285 			break;
3286 
3287 		error = shmem_get_folio(inode, *ppos / PAGE_SIZE, 0, &folio,
3288 					SGP_READ);
3289 		if (error) {
3290 			if (error == -EINVAL)
3291 				error = 0;
3292 			break;
3293 		}
3294 		if (folio) {
3295 			folio_unlock(folio);
3296 
3297 			if (folio_test_hwpoison(folio) ||
3298 			    (folio_test_large(folio) &&
3299 			     folio_test_has_hwpoisoned(folio))) {
3300 				error = -EIO;
3301 				break;
3302 			}
3303 		}
3304 
3305 		/*
3306 		 * i_size must be checked after we know the pages are Uptodate.
3307 		 *
3308 		 * Checking i_size after the check allows us to calculate
3309 		 * the correct value for "nr", which means the zero-filled
3310 		 * part of the page is not copied back to userspace (unless
3311 		 * another truncate extends the file - this is desired though).
3312 		 */
3313 		isize = i_size_read(inode);
3314 		if (unlikely(*ppos >= isize))
3315 			break;
3316 		part = min_t(loff_t, isize - *ppos, len);
3317 
3318 		if (folio) {
3319 			/*
3320 			 * If users can be writing to this page using arbitrary
3321 			 * virtual addresses, take care about potential aliasing
3322 			 * before reading the page on the kernel side.
3323 			 */
3324 			if (mapping_writably_mapped(mapping))
3325 				flush_dcache_folio(folio);
3326 			folio_mark_accessed(folio);
3327 			/*
3328 			 * Ok, we have the page, and it's up-to-date, so we can
3329 			 * now splice it into the pipe.
3330 			 */
3331 			n = splice_folio_into_pipe(pipe, folio, *ppos, part);
3332 			folio_put(folio);
3333 			folio = NULL;
3334 		} else {
3335 			n = splice_zeropage_into_pipe(pipe, *ppos, part);
3336 		}
3337 
3338 		if (!n)
3339 			break;
3340 		len -= n;
3341 		total_spliced += n;
3342 		*ppos += n;
3343 		in->f_ra.prev_pos = *ppos;
3344 		if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
3345 			break;
3346 
3347 		cond_resched();
3348 	} while (len);
3349 
3350 	if (folio)
3351 		folio_put(folio);
3352 
3353 	file_accessed(in);
3354 	return total_spliced ? total_spliced : error;
3355 }
3356 
3357 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
3358 {
3359 	struct address_space *mapping = file->f_mapping;
3360 	struct inode *inode = mapping->host;
3361 
3362 	if (whence != SEEK_DATA && whence != SEEK_HOLE)
3363 		return generic_file_llseek_size(file, offset, whence,
3364 					MAX_LFS_FILESIZE, i_size_read(inode));
3365 	if (offset < 0)
3366 		return -ENXIO;
3367 
3368 	inode_lock(inode);
3369 	/* We're holding i_rwsem so we can access i_size directly */
3370 	offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
3371 	if (offset >= 0)
3372 		offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
3373 	inode_unlock(inode);
3374 	return offset;
3375 }
3376 
3377 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
3378 							 loff_t len)
3379 {
3380 	struct inode *inode = file_inode(file);
3381 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3382 	struct shmem_inode_info *info = SHMEM_I(inode);
3383 	struct shmem_falloc shmem_falloc;
3384 	pgoff_t start, index, end, undo_fallocend;
3385 	int error;
3386 
3387 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
3388 		return -EOPNOTSUPP;
3389 
3390 	inode_lock(inode);
3391 
3392 	if (mode & FALLOC_FL_PUNCH_HOLE) {
3393 		struct address_space *mapping = file->f_mapping;
3394 		loff_t unmap_start = round_up(offset, PAGE_SIZE);
3395 		loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
3396 		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
3397 
3398 		/* protected by i_rwsem */
3399 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
3400 			error = -EPERM;
3401 			goto out;
3402 		}
3403 
3404 		shmem_falloc.waitq = &shmem_falloc_waitq;
3405 		shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
3406 		shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
3407 		spin_lock(&inode->i_lock);
3408 		inode->i_private = &shmem_falloc;
3409 		spin_unlock(&inode->i_lock);
3410 
3411 		if ((u64)unmap_end > (u64)unmap_start)
3412 			unmap_mapping_range(mapping, unmap_start,
3413 					    1 + unmap_end - unmap_start, 0);
3414 		shmem_truncate_range(inode, offset, offset + len - 1);
3415 		/* No need to unmap again: hole-punching leaves COWed pages */
3416 
3417 		spin_lock(&inode->i_lock);
3418 		inode->i_private = NULL;
3419 		wake_up_all(&shmem_falloc_waitq);
3420 		WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
3421 		spin_unlock(&inode->i_lock);
3422 		error = 0;
3423 		goto out;
3424 	}
3425 
3426 	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
3427 	error = inode_newsize_ok(inode, offset + len);
3428 	if (error)
3429 		goto out;
3430 
3431 	if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
3432 		error = -EPERM;
3433 		goto out;
3434 	}
3435 
3436 	start = offset >> PAGE_SHIFT;
3437 	end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
3438 	/* Try to avoid a swapstorm if len is impossible to satisfy */
3439 	if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
3440 		error = -ENOSPC;
3441 		goto out;
3442 	}
3443 
3444 	shmem_falloc.waitq = NULL;
3445 	shmem_falloc.start = start;
3446 	shmem_falloc.next  = start;
3447 	shmem_falloc.nr_falloced = 0;
3448 	shmem_falloc.nr_unswapped = 0;
3449 	spin_lock(&inode->i_lock);
3450 	inode->i_private = &shmem_falloc;
3451 	spin_unlock(&inode->i_lock);
3452 
3453 	/*
3454 	 * info->fallocend is only relevant when huge pages might be
3455 	 * involved: to prevent split_huge_page() freeing fallocated
3456 	 * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size.
3457 	 */
3458 	undo_fallocend = info->fallocend;
3459 	if (info->fallocend < end)
3460 		info->fallocend = end;
3461 
3462 	for (index = start; index < end; ) {
3463 		struct folio *folio;
3464 
3465 		/*
3466 		 * Check for fatal signal so that we abort early in OOM
3467 		 * situations. We don't want to abort in case of non-fatal
3468 		 * signals as large fallocate can take noticeable time and
3469 		 * e.g. periodic timers may result in fallocate constantly
3470 		 * restarting.
3471 		 */
3472 		if (fatal_signal_pending(current))
3473 			error = -EINTR;
3474 		else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
3475 			error = -ENOMEM;
3476 		else
3477 			error = shmem_get_folio(inode, index, offset + len,
3478 						&folio, SGP_FALLOC);
3479 		if (error) {
3480 			info->fallocend = undo_fallocend;
3481 			/* Remove the !uptodate folios we added */
3482 			if (index > start) {
3483 				shmem_undo_range(inode,
3484 				    (loff_t)start << PAGE_SHIFT,
3485 				    ((loff_t)index << PAGE_SHIFT) - 1, true);
3486 			}
3487 			goto undone;
3488 		}
3489 
3490 		/*
3491 		 * Here is a more important optimization than it appears:
3492 		 * a second SGP_FALLOC on the same large folio will clear it,
3493 		 * making it uptodate and un-undoable if we fail later.
3494 		 */
3495 		index = folio_next_index(folio);
3496 		/* Beware 32-bit wraparound */
3497 		if (!index)
3498 			index--;
3499 
3500 		/*
3501 		 * Inform shmem_writepage() how far we have reached.
3502 		 * No need for lock or barrier: we have the page lock.
3503 		 */
3504 		if (!folio_test_uptodate(folio))
3505 			shmem_falloc.nr_falloced += index - shmem_falloc.next;
3506 		shmem_falloc.next = index;
3507 
3508 		/*
3509 		 * If !uptodate, leave it that way so that freeable folios
3510 		 * can be recognized if we need to rollback on error later.
3511 		 * But mark it dirty so that memory pressure will swap rather
3512 		 * than free the folios we are allocating (and SGP_CACHE folios
3513 		 * might still be clean: we now need to mark those dirty too).
3514 		 */
3515 		folio_mark_dirty(folio);
3516 		folio_unlock(folio);
3517 		folio_put(folio);
3518 		cond_resched();
3519 	}
3520 
3521 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
3522 		i_size_write(inode, offset + len);
3523 undone:
3524 	spin_lock(&inode->i_lock);
3525 	inode->i_private = NULL;
3526 	spin_unlock(&inode->i_lock);
3527 out:
3528 	if (!error)
3529 		file_modified(file);
3530 	inode_unlock(inode);
3531 	return error;
3532 }
3533 
3534 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
3535 {
3536 	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
3537 
3538 	buf->f_type = TMPFS_MAGIC;
3539 	buf->f_bsize = PAGE_SIZE;
3540 	buf->f_namelen = NAME_MAX;
3541 	if (sbinfo->max_blocks) {
3542 		buf->f_blocks = sbinfo->max_blocks;
3543 		buf->f_bavail =
3544 		buf->f_bfree  = sbinfo->max_blocks -
3545 				percpu_counter_sum(&sbinfo->used_blocks);
3546 	}
3547 	if (sbinfo->max_inodes) {
3548 		buf->f_files = sbinfo->max_inodes;
3549 		buf->f_ffree = sbinfo->free_ispace / BOGO_INODE_SIZE;
3550 	}
3551 	/* else leave those fields 0 like simple_statfs */
3552 
3553 	buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
3554 
3555 	return 0;
3556 }
3557 
3558 /*
3559  * File creation. Allocate an inode, and we're done..
3560  */
3561 static int
3562 shmem_mknod(struct mnt_idmap *idmap, struct inode *dir,
3563 	    struct dentry *dentry, umode_t mode, dev_t dev)
3564 {
3565 	struct inode *inode;
3566 	int error;
3567 
3568 	inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, dev, VM_NORESERVE);
3569 	if (IS_ERR(inode))
3570 		return PTR_ERR(inode);
3571 
3572 	error = simple_acl_create(dir, inode);
3573 	if (error)
3574 		goto out_iput;
3575 	error = security_inode_init_security(inode, dir, &dentry->d_name,
3576 					     shmem_initxattrs, NULL);
3577 	if (error && error != -EOPNOTSUPP)
3578 		goto out_iput;
3579 
3580 	error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3581 	if (error)
3582 		goto out_iput;
3583 
3584 	dir->i_size += BOGO_DIRENT_SIZE;
3585 	inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
3586 	inode_inc_iversion(dir);
3587 	d_instantiate(dentry, inode);
3588 	dget(dentry); /* Extra count - pin the dentry in core */
3589 	return error;
3590 
3591 out_iput:
3592 	iput(inode);
3593 	return error;
3594 }
3595 
3596 static int
3597 shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
3598 	      struct file *file, umode_t mode)
3599 {
3600 	struct inode *inode;
3601 	int error;
3602 
3603 	inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE);
3604 	if (IS_ERR(inode)) {
3605 		error = PTR_ERR(inode);
3606 		goto err_out;
3607 	}
3608 	error = security_inode_init_security(inode, dir, NULL,
3609 					     shmem_initxattrs, NULL);
3610 	if (error && error != -EOPNOTSUPP)
3611 		goto out_iput;
3612 	error = simple_acl_create(dir, inode);
3613 	if (error)
3614 		goto out_iput;
3615 	d_tmpfile(file, inode);
3616 
3617 err_out:
3618 	return finish_open_simple(file, error);
3619 out_iput:
3620 	iput(inode);
3621 	return error;
3622 }
3623 
3624 static int shmem_mkdir(struct mnt_idmap *idmap, struct inode *dir,
3625 		       struct dentry *dentry, umode_t mode)
3626 {
3627 	int error;
3628 
3629 	error = shmem_mknod(idmap, dir, dentry, mode | S_IFDIR, 0);
3630 	if (error)
3631 		return error;
3632 	inc_nlink(dir);
3633 	return 0;
3634 }
3635 
3636 static int shmem_create(struct mnt_idmap *idmap, struct inode *dir,
3637 			struct dentry *dentry, umode_t mode, bool excl)
3638 {
3639 	return shmem_mknod(idmap, dir, dentry, mode | S_IFREG, 0);
3640 }
3641 
3642 /*
3643  * Link a file..
3644  */
3645 static int shmem_link(struct dentry *old_dentry, struct inode *dir,
3646 		      struct dentry *dentry)
3647 {
3648 	struct inode *inode = d_inode(old_dentry);
3649 	int ret = 0;
3650 
3651 	/*
3652 	 * No ordinary (disk based) filesystem counts links as inodes;
3653 	 * but each new link needs a new dentry, pinning lowmem, and
3654 	 * tmpfs dentries cannot be pruned until they are unlinked.
3655 	 * But if an O_TMPFILE file is linked into the tmpfs, the
3656 	 * first link must skip that, to get the accounting right.
3657 	 */
3658 	if (inode->i_nlink) {
3659 		ret = shmem_reserve_inode(inode->i_sb, NULL);
3660 		if (ret)
3661 			goto out;
3662 	}
3663 
3664 	ret = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3665 	if (ret) {
3666 		if (inode->i_nlink)
3667 			shmem_free_inode(inode->i_sb, 0);
3668 		goto out;
3669 	}
3670 
3671 	dir->i_size += BOGO_DIRENT_SIZE;
3672 	inode_set_mtime_to_ts(dir,
3673 			      inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
3674 	inode_inc_iversion(dir);
3675 	inc_nlink(inode);
3676 	ihold(inode);	/* New dentry reference */
3677 	dget(dentry);	/* Extra pinning count for the created dentry */
3678 	d_instantiate(dentry, inode);
3679 out:
3680 	return ret;
3681 }
3682 
3683 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
3684 {
3685 	struct inode *inode = d_inode(dentry);
3686 
3687 	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
3688 		shmem_free_inode(inode->i_sb, 0);
3689 
3690 	simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
3691 
3692 	dir->i_size -= BOGO_DIRENT_SIZE;
3693 	inode_set_mtime_to_ts(dir,
3694 			      inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
3695 	inode_inc_iversion(dir);
3696 	drop_nlink(inode);
3697 	dput(dentry);	/* Undo the count from "create" - does all the work */
3698 	return 0;
3699 }
3700 
3701 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
3702 {
3703 	if (!simple_offset_empty(dentry))
3704 		return -ENOTEMPTY;
3705 
3706 	drop_nlink(d_inode(dentry));
3707 	drop_nlink(dir);
3708 	return shmem_unlink(dir, dentry);
3709 }
3710 
3711 static int shmem_whiteout(struct mnt_idmap *idmap,
3712 			  struct inode *old_dir, struct dentry *old_dentry)
3713 {
3714 	struct dentry *whiteout;
3715 	int error;
3716 
3717 	whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
3718 	if (!whiteout)
3719 		return -ENOMEM;
3720 
3721 	error = shmem_mknod(idmap, old_dir, whiteout,
3722 			    S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
3723 	dput(whiteout);
3724 	if (error)
3725 		return error;
3726 
3727 	/*
3728 	 * Cheat and hash the whiteout while the old dentry is still in
3729 	 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
3730 	 *
3731 	 * d_lookup() will consistently find one of them at this point,
3732 	 * not sure which one, but that isn't even important.
3733 	 */
3734 	d_rehash(whiteout);
3735 	return 0;
3736 }
3737 
3738 /*
3739  * The VFS layer already does all the dentry stuff for rename,
3740  * we just have to decrement the usage count for the target if
3741  * it exists so that the VFS layer correctly free's it when it
3742  * gets overwritten.
3743  */
3744 static int shmem_rename2(struct mnt_idmap *idmap,
3745 			 struct inode *old_dir, struct dentry *old_dentry,
3746 			 struct inode *new_dir, struct dentry *new_dentry,
3747 			 unsigned int flags)
3748 {
3749 	struct inode *inode = d_inode(old_dentry);
3750 	int they_are_dirs = S_ISDIR(inode->i_mode);
3751 	int error;
3752 
3753 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3754 		return -EINVAL;
3755 
3756 	if (flags & RENAME_EXCHANGE)
3757 		return simple_offset_rename_exchange(old_dir, old_dentry,
3758 						     new_dir, new_dentry);
3759 
3760 	if (!simple_offset_empty(new_dentry))
3761 		return -ENOTEMPTY;
3762 
3763 	if (flags & RENAME_WHITEOUT) {
3764 		error = shmem_whiteout(idmap, old_dir, old_dentry);
3765 		if (error)
3766 			return error;
3767 	}
3768 
3769 	error = simple_offset_rename(old_dir, old_dentry, new_dir, new_dentry);
3770 	if (error)
3771 		return error;
3772 
3773 	if (d_really_is_positive(new_dentry)) {
3774 		(void) shmem_unlink(new_dir, new_dentry);
3775 		if (they_are_dirs) {
3776 			drop_nlink(d_inode(new_dentry));
3777 			drop_nlink(old_dir);
3778 		}
3779 	} else if (they_are_dirs) {
3780 		drop_nlink(old_dir);
3781 		inc_nlink(new_dir);
3782 	}
3783 
3784 	old_dir->i_size -= BOGO_DIRENT_SIZE;
3785 	new_dir->i_size += BOGO_DIRENT_SIZE;
3786 	simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
3787 	inode_inc_iversion(old_dir);
3788 	inode_inc_iversion(new_dir);
3789 	return 0;
3790 }
3791 
3792 static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir,
3793 			 struct dentry *dentry, const char *symname)
3794 {
3795 	int error;
3796 	int len;
3797 	struct inode *inode;
3798 	struct folio *folio;
3799 
3800 	len = strlen(symname) + 1;
3801 	if (len > PAGE_SIZE)
3802 		return -ENAMETOOLONG;
3803 
3804 	inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0,
3805 				VM_NORESERVE);
3806 	if (IS_ERR(inode))
3807 		return PTR_ERR(inode);
3808 
3809 	error = security_inode_init_security(inode, dir, &dentry->d_name,
3810 					     shmem_initxattrs, NULL);
3811 	if (error && error != -EOPNOTSUPP)
3812 		goto out_iput;
3813 
3814 	error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3815 	if (error)
3816 		goto out_iput;
3817 
3818 	inode->i_size = len-1;
3819 	if (len <= SHORT_SYMLINK_LEN) {
3820 		inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3821 		if (!inode->i_link) {
3822 			error = -ENOMEM;
3823 			goto out_remove_offset;
3824 		}
3825 		inode->i_op = &shmem_short_symlink_operations;
3826 	} else {
3827 		inode_nohighmem(inode);
3828 		inode->i_mapping->a_ops = &shmem_aops;
3829 		error = shmem_get_folio(inode, 0, 0, &folio, SGP_WRITE);
3830 		if (error)
3831 			goto out_remove_offset;
3832 		inode->i_op = &shmem_symlink_inode_operations;
3833 		memcpy(folio_address(folio), symname, len);
3834 		folio_mark_uptodate(folio);
3835 		folio_mark_dirty(folio);
3836 		folio_unlock(folio);
3837 		folio_put(folio);
3838 	}
3839 	dir->i_size += BOGO_DIRENT_SIZE;
3840 	inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
3841 	inode_inc_iversion(dir);
3842 	d_instantiate(dentry, inode);
3843 	dget(dentry);
3844 	return 0;
3845 
3846 out_remove_offset:
3847 	simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
3848 out_iput:
3849 	iput(inode);
3850 	return error;
3851 }
3852 
3853 static void shmem_put_link(void *arg)
3854 {
3855 	folio_mark_accessed(arg);
3856 	folio_put(arg);
3857 }
3858 
3859 static const char *shmem_get_link(struct dentry *dentry, struct inode *inode,
3860 				  struct delayed_call *done)
3861 {
3862 	struct folio *folio = NULL;
3863 	int error;
3864 
3865 	if (!dentry) {
3866 		folio = filemap_get_folio(inode->i_mapping, 0);
3867 		if (IS_ERR(folio))
3868 			return ERR_PTR(-ECHILD);
3869 		if (PageHWPoison(folio_page(folio, 0)) ||
3870 		    !folio_test_uptodate(folio)) {
3871 			folio_put(folio);
3872 			return ERR_PTR(-ECHILD);
3873 		}
3874 	} else {
3875 		error = shmem_get_folio(inode, 0, 0, &folio, SGP_READ);
3876 		if (error)
3877 			return ERR_PTR(error);
3878 		if (!folio)
3879 			return ERR_PTR(-ECHILD);
3880 		if (PageHWPoison(folio_page(folio, 0))) {
3881 			folio_unlock(folio);
3882 			folio_put(folio);
3883 			return ERR_PTR(-ECHILD);
3884 		}
3885 		folio_unlock(folio);
3886 	}
3887 	set_delayed_call(done, shmem_put_link, folio);
3888 	return folio_address(folio);
3889 }
3890 
3891 #ifdef CONFIG_TMPFS_XATTR
3892 
3893 static int shmem_fileattr_get(struct dentry *dentry, struct fileattr *fa)
3894 {
3895 	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3896 
3897 	fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE);
3898 
3899 	return 0;
3900 }
3901 
3902 static int shmem_fileattr_set(struct mnt_idmap *idmap,
3903 			      struct dentry *dentry, struct fileattr *fa)
3904 {
3905 	struct inode *inode = d_inode(dentry);
3906 	struct shmem_inode_info *info = SHMEM_I(inode);
3907 
3908 	if (fileattr_has_fsx(fa))
3909 		return -EOPNOTSUPP;
3910 	if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE)
3911 		return -EOPNOTSUPP;
3912 
3913 	info->fsflags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) |
3914 		(fa->flags & SHMEM_FL_USER_MODIFIABLE);
3915 
3916 	shmem_set_inode_flags(inode, info->fsflags);
3917 	inode_set_ctime_current(inode);
3918 	inode_inc_iversion(inode);
3919 	return 0;
3920 }
3921 
3922 /*
3923  * Superblocks without xattr inode operations may get some security.* xattr
3924  * support from the LSM "for free". As soon as we have any other xattrs
3925  * like ACLs, we also need to implement the security.* handlers at
3926  * filesystem level, though.
3927  */
3928 
3929 /*
3930  * Callback for security_inode_init_security() for acquiring xattrs.
3931  */
3932 static int shmem_initxattrs(struct inode *inode,
3933 			    const struct xattr *xattr_array, void *fs_info)
3934 {
3935 	struct shmem_inode_info *info = SHMEM_I(inode);
3936 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3937 	const struct xattr *xattr;
3938 	struct simple_xattr *new_xattr;
3939 	size_t ispace = 0;
3940 	size_t len;
3941 
3942 	if (sbinfo->max_inodes) {
3943 		for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3944 			ispace += simple_xattr_space(xattr->name,
3945 				xattr->value_len + XATTR_SECURITY_PREFIX_LEN);
3946 		}
3947 		if (ispace) {
3948 			raw_spin_lock(&sbinfo->stat_lock);
3949 			if (sbinfo->free_ispace < ispace)
3950 				ispace = 0;
3951 			else
3952 				sbinfo->free_ispace -= ispace;
3953 			raw_spin_unlock(&sbinfo->stat_lock);
3954 			if (!ispace)
3955 				return -ENOSPC;
3956 		}
3957 	}
3958 
3959 	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3960 		new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
3961 		if (!new_xattr)
3962 			break;
3963 
3964 		len = strlen(xattr->name) + 1;
3965 		new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3966 					  GFP_KERNEL_ACCOUNT);
3967 		if (!new_xattr->name) {
3968 			kvfree(new_xattr);
3969 			break;
3970 		}
3971 
3972 		memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
3973 		       XATTR_SECURITY_PREFIX_LEN);
3974 		memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
3975 		       xattr->name, len);
3976 
3977 		simple_xattr_add(&info->xattrs, new_xattr);
3978 	}
3979 
3980 	if (xattr->name != NULL) {
3981 		if (ispace) {
3982 			raw_spin_lock(&sbinfo->stat_lock);
3983 			sbinfo->free_ispace += ispace;
3984 			raw_spin_unlock(&sbinfo->stat_lock);
3985 		}
3986 		simple_xattrs_free(&info->xattrs, NULL);
3987 		return -ENOMEM;
3988 	}
3989 
3990 	return 0;
3991 }
3992 
3993 static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3994 				   struct dentry *unused, struct inode *inode,
3995 				   const char *name, void *buffer, size_t size)
3996 {
3997 	struct shmem_inode_info *info = SHMEM_I(inode);
3998 
3999 	name = xattr_full_name(handler, name);
4000 	return simple_xattr_get(&info->xattrs, name, buffer, size);
4001 }
4002 
4003 static int shmem_xattr_handler_set(const struct xattr_handler *handler,
4004 				   struct mnt_idmap *idmap,
4005 				   struct dentry *unused, struct inode *inode,
4006 				   const char *name, const void *value,
4007 				   size_t size, int flags)
4008 {
4009 	struct shmem_inode_info *info = SHMEM_I(inode);
4010 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
4011 	struct simple_xattr *old_xattr;
4012 	size_t ispace = 0;
4013 
4014 	name = xattr_full_name(handler, name);
4015 	if (value && sbinfo->max_inodes) {
4016 		ispace = simple_xattr_space(name, size);
4017 		raw_spin_lock(&sbinfo->stat_lock);
4018 		if (sbinfo->free_ispace < ispace)
4019 			ispace = 0;
4020 		else
4021 			sbinfo->free_ispace -= ispace;
4022 		raw_spin_unlock(&sbinfo->stat_lock);
4023 		if (!ispace)
4024 			return -ENOSPC;
4025 	}
4026 
4027 	old_xattr = simple_xattr_set(&info->xattrs, name, value, size, flags);
4028 	if (!IS_ERR(old_xattr)) {
4029 		ispace = 0;
4030 		if (old_xattr && sbinfo->max_inodes)
4031 			ispace = simple_xattr_space(old_xattr->name,
4032 						    old_xattr->size);
4033 		simple_xattr_free(old_xattr);
4034 		old_xattr = NULL;
4035 		inode_set_ctime_current(inode);
4036 		inode_inc_iversion(inode);
4037 	}
4038 	if (ispace) {
4039 		raw_spin_lock(&sbinfo->stat_lock);
4040 		sbinfo->free_ispace += ispace;
4041 		raw_spin_unlock(&sbinfo->stat_lock);
4042 	}
4043 	return PTR_ERR(old_xattr);
4044 }
4045 
4046 static const struct xattr_handler shmem_security_xattr_handler = {
4047 	.prefix = XATTR_SECURITY_PREFIX,
4048 	.get = shmem_xattr_handler_get,
4049 	.set = shmem_xattr_handler_set,
4050 };
4051 
4052 static const struct xattr_handler shmem_trusted_xattr_handler = {
4053 	.prefix = XATTR_TRUSTED_PREFIX,
4054 	.get = shmem_xattr_handler_get,
4055 	.set = shmem_xattr_handler_set,
4056 };
4057 
4058 static const struct xattr_handler shmem_user_xattr_handler = {
4059 	.prefix = XATTR_USER_PREFIX,
4060 	.get = shmem_xattr_handler_get,
4061 	.set = shmem_xattr_handler_set,
4062 };
4063 
4064 static const struct xattr_handler * const shmem_xattr_handlers[] = {
4065 	&shmem_security_xattr_handler,
4066 	&shmem_trusted_xattr_handler,
4067 	&shmem_user_xattr_handler,
4068 	NULL
4069 };
4070 
4071 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
4072 {
4073 	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
4074 	return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
4075 }
4076 #endif /* CONFIG_TMPFS_XATTR */
4077 
4078 static const struct inode_operations shmem_short_symlink_operations = {
4079 	.getattr	= shmem_getattr,
4080 	.setattr	= shmem_setattr,
4081 	.get_link	= simple_get_link,
4082 #ifdef CONFIG_TMPFS_XATTR
4083 	.listxattr	= shmem_listxattr,
4084 #endif
4085 };
4086 
4087 static const struct inode_operations shmem_symlink_inode_operations = {
4088 	.getattr	= shmem_getattr,
4089 	.setattr	= shmem_setattr,
4090 	.get_link	= shmem_get_link,
4091 #ifdef CONFIG_TMPFS_XATTR
4092 	.listxattr	= shmem_listxattr,
4093 #endif
4094 };
4095 
4096 static struct dentry *shmem_get_parent(struct dentry *child)
4097 {
4098 	return ERR_PTR(-ESTALE);
4099 }
4100 
4101 static int shmem_match(struct inode *ino, void *vfh)
4102 {
4103 	__u32 *fh = vfh;
4104 	__u64 inum = fh[2];
4105 	inum = (inum << 32) | fh[1];
4106 	return ino->i_ino == inum && fh[0] == ino->i_generation;
4107 }
4108 
4109 /* Find any alias of inode, but prefer a hashed alias */
4110 static struct dentry *shmem_find_alias(struct inode *inode)
4111 {
4112 	struct dentry *alias = d_find_alias(inode);
4113 
4114 	return alias ?: d_find_any_alias(inode);
4115 }
4116 
4117 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
4118 		struct fid *fid, int fh_len, int fh_type)
4119 {
4120 	struct inode *inode;
4121 	struct dentry *dentry = NULL;
4122 	u64 inum;
4123 
4124 	if (fh_len < 3)
4125 		return NULL;
4126 
4127 	inum = fid->raw[2];
4128 	inum = (inum << 32) | fid->raw[1];
4129 
4130 	inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
4131 			shmem_match, fid->raw);
4132 	if (inode) {
4133 		dentry = shmem_find_alias(inode);
4134 		iput(inode);
4135 	}
4136 
4137 	return dentry;
4138 }
4139 
4140 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
4141 				struct inode *parent)
4142 {
4143 	if (*len < 3) {
4144 		*len = 3;
4145 		return FILEID_INVALID;
4146 	}
4147 
4148 	if (inode_unhashed(inode)) {
4149 		/* Unfortunately insert_inode_hash is not idempotent,
4150 		 * so as we hash inodes here rather than at creation
4151 		 * time, we need a lock to ensure we only try
4152 		 * to do it once
4153 		 */
4154 		static DEFINE_SPINLOCK(lock);
4155 		spin_lock(&lock);
4156 		if (inode_unhashed(inode))
4157 			__insert_inode_hash(inode,
4158 					    inode->i_ino + inode->i_generation);
4159 		spin_unlock(&lock);
4160 	}
4161 
4162 	fh[0] = inode->i_generation;
4163 	fh[1] = inode->i_ino;
4164 	fh[2] = ((__u64)inode->i_ino) >> 32;
4165 
4166 	*len = 3;
4167 	return 1;
4168 }
4169 
4170 static const struct export_operations shmem_export_ops = {
4171 	.get_parent     = shmem_get_parent,
4172 	.encode_fh      = shmem_encode_fh,
4173 	.fh_to_dentry	= shmem_fh_to_dentry,
4174 };
4175 
4176 enum shmem_param {
4177 	Opt_gid,
4178 	Opt_huge,
4179 	Opt_mode,
4180 	Opt_mpol,
4181 	Opt_nr_blocks,
4182 	Opt_nr_inodes,
4183 	Opt_size,
4184 	Opt_uid,
4185 	Opt_inode32,
4186 	Opt_inode64,
4187 	Opt_noswap,
4188 	Opt_quota,
4189 	Opt_usrquota,
4190 	Opt_grpquota,
4191 	Opt_usrquota_block_hardlimit,
4192 	Opt_usrquota_inode_hardlimit,
4193 	Opt_grpquota_block_hardlimit,
4194 	Opt_grpquota_inode_hardlimit,
4195 };
4196 
4197 static const struct constant_table shmem_param_enums_huge[] = {
4198 	{"never",	SHMEM_HUGE_NEVER },
4199 	{"always",	SHMEM_HUGE_ALWAYS },
4200 	{"within_size",	SHMEM_HUGE_WITHIN_SIZE },
4201 	{"advise",	SHMEM_HUGE_ADVISE },
4202 	{}
4203 };
4204 
4205 const struct fs_parameter_spec shmem_fs_parameters[] = {
4206 	fsparam_gid   ("gid",		Opt_gid),
4207 	fsparam_enum  ("huge",		Opt_huge,  shmem_param_enums_huge),
4208 	fsparam_u32oct("mode",		Opt_mode),
4209 	fsparam_string("mpol",		Opt_mpol),
4210 	fsparam_string("nr_blocks",	Opt_nr_blocks),
4211 	fsparam_string("nr_inodes",	Opt_nr_inodes),
4212 	fsparam_string("size",		Opt_size),
4213 	fsparam_uid   ("uid",		Opt_uid),
4214 	fsparam_flag  ("inode32",	Opt_inode32),
4215 	fsparam_flag  ("inode64",	Opt_inode64),
4216 	fsparam_flag  ("noswap",	Opt_noswap),
4217 #ifdef CONFIG_TMPFS_QUOTA
4218 	fsparam_flag  ("quota",		Opt_quota),
4219 	fsparam_flag  ("usrquota",	Opt_usrquota),
4220 	fsparam_flag  ("grpquota",	Opt_grpquota),
4221 	fsparam_string("usrquota_block_hardlimit", Opt_usrquota_block_hardlimit),
4222 	fsparam_string("usrquota_inode_hardlimit", Opt_usrquota_inode_hardlimit),
4223 	fsparam_string("grpquota_block_hardlimit", Opt_grpquota_block_hardlimit),
4224 	fsparam_string("grpquota_inode_hardlimit", Opt_grpquota_inode_hardlimit),
4225 #endif
4226 	{}
4227 };
4228 
4229 static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
4230 {
4231 	struct shmem_options *ctx = fc->fs_private;
4232 	struct fs_parse_result result;
4233 	unsigned long long size;
4234 	char *rest;
4235 	int opt;
4236 	kuid_t kuid;
4237 	kgid_t kgid;
4238 
4239 	opt = fs_parse(fc, shmem_fs_parameters, param, &result);
4240 	if (opt < 0)
4241 		return opt;
4242 
4243 	switch (opt) {
4244 	case Opt_size:
4245 		size = memparse(param->string, &rest);
4246 		if (*rest == '%') {
4247 			size <<= PAGE_SHIFT;
4248 			size *= totalram_pages();
4249 			do_div(size, 100);
4250 			rest++;
4251 		}
4252 		if (*rest)
4253 			goto bad_value;
4254 		ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
4255 		ctx->seen |= SHMEM_SEEN_BLOCKS;
4256 		break;
4257 	case Opt_nr_blocks:
4258 		ctx->blocks = memparse(param->string, &rest);
4259 		if (*rest || ctx->blocks > LONG_MAX)
4260 			goto bad_value;
4261 		ctx->seen |= SHMEM_SEEN_BLOCKS;
4262 		break;
4263 	case Opt_nr_inodes:
4264 		ctx->inodes = memparse(param->string, &rest);
4265 		if (*rest || ctx->inodes > ULONG_MAX / BOGO_INODE_SIZE)
4266 			goto bad_value;
4267 		ctx->seen |= SHMEM_SEEN_INODES;
4268 		break;
4269 	case Opt_mode:
4270 		ctx->mode = result.uint_32 & 07777;
4271 		break;
4272 	case Opt_uid:
4273 		kuid = result.uid;
4274 
4275 		/*
4276 		 * The requested uid must be representable in the
4277 		 * filesystem's idmapping.
4278 		 */
4279 		if (!kuid_has_mapping(fc->user_ns, kuid))
4280 			goto bad_value;
4281 
4282 		ctx->uid = kuid;
4283 		break;
4284 	case Opt_gid:
4285 		kgid = result.gid;
4286 
4287 		/*
4288 		 * The requested gid must be representable in the
4289 		 * filesystem's idmapping.
4290 		 */
4291 		if (!kgid_has_mapping(fc->user_ns, kgid))
4292 			goto bad_value;
4293 
4294 		ctx->gid = kgid;
4295 		break;
4296 	case Opt_huge:
4297 		ctx->huge = result.uint_32;
4298 		if (ctx->huge != SHMEM_HUGE_NEVER &&
4299 		    !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
4300 		      has_transparent_hugepage()))
4301 			goto unsupported_parameter;
4302 		ctx->seen |= SHMEM_SEEN_HUGE;
4303 		break;
4304 	case Opt_mpol:
4305 		if (IS_ENABLED(CONFIG_NUMA)) {
4306 			mpol_put(ctx->mpol);
4307 			ctx->mpol = NULL;
4308 			if (mpol_parse_str(param->string, &ctx->mpol))
4309 				goto bad_value;
4310 			break;
4311 		}
4312 		goto unsupported_parameter;
4313 	case Opt_inode32:
4314 		ctx->full_inums = false;
4315 		ctx->seen |= SHMEM_SEEN_INUMS;
4316 		break;
4317 	case Opt_inode64:
4318 		if (sizeof(ino_t) < 8) {
4319 			return invalfc(fc,
4320 				       "Cannot use inode64 with <64bit inums in kernel\n");
4321 		}
4322 		ctx->full_inums = true;
4323 		ctx->seen |= SHMEM_SEEN_INUMS;
4324 		break;
4325 	case Opt_noswap:
4326 		if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) {
4327 			return invalfc(fc,
4328 				       "Turning off swap in unprivileged tmpfs mounts unsupported");
4329 		}
4330 		ctx->noswap = true;
4331 		ctx->seen |= SHMEM_SEEN_NOSWAP;
4332 		break;
4333 	case Opt_quota:
4334 		if (fc->user_ns != &init_user_ns)
4335 			return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4336 		ctx->seen |= SHMEM_SEEN_QUOTA;
4337 		ctx->quota_types |= (QTYPE_MASK_USR | QTYPE_MASK_GRP);
4338 		break;
4339 	case Opt_usrquota:
4340 		if (fc->user_ns != &init_user_ns)
4341 			return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4342 		ctx->seen |= SHMEM_SEEN_QUOTA;
4343 		ctx->quota_types |= QTYPE_MASK_USR;
4344 		break;
4345 	case Opt_grpquota:
4346 		if (fc->user_ns != &init_user_ns)
4347 			return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4348 		ctx->seen |= SHMEM_SEEN_QUOTA;
4349 		ctx->quota_types |= QTYPE_MASK_GRP;
4350 		break;
4351 	case Opt_usrquota_block_hardlimit:
4352 		size = memparse(param->string, &rest);
4353 		if (*rest || !size)
4354 			goto bad_value;
4355 		if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
4356 			return invalfc(fc,
4357 				       "User quota block hardlimit too large.");
4358 		ctx->qlimits.usrquota_bhardlimit = size;
4359 		break;
4360 	case Opt_grpquota_block_hardlimit:
4361 		size = memparse(param->string, &rest);
4362 		if (*rest || !size)
4363 			goto bad_value;
4364 		if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
4365 			return invalfc(fc,
4366 				       "Group quota block hardlimit too large.");
4367 		ctx->qlimits.grpquota_bhardlimit = size;
4368 		break;
4369 	case Opt_usrquota_inode_hardlimit:
4370 		size = memparse(param->string, &rest);
4371 		if (*rest || !size)
4372 			goto bad_value;
4373 		if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
4374 			return invalfc(fc,
4375 				       "User quota inode hardlimit too large.");
4376 		ctx->qlimits.usrquota_ihardlimit = size;
4377 		break;
4378 	case Opt_grpquota_inode_hardlimit:
4379 		size = memparse(param->string, &rest);
4380 		if (*rest || !size)
4381 			goto bad_value;
4382 		if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
4383 			return invalfc(fc,
4384 				       "Group quota inode hardlimit too large.");
4385 		ctx->qlimits.grpquota_ihardlimit = size;
4386 		break;
4387 	}
4388 	return 0;
4389 
4390 unsupported_parameter:
4391 	return invalfc(fc, "Unsupported parameter '%s'", param->key);
4392 bad_value:
4393 	return invalfc(fc, "Bad value for '%s'", param->key);
4394 }
4395 
4396 static int shmem_parse_options(struct fs_context *fc, void *data)
4397 {
4398 	char *options = data;
4399 
4400 	if (options) {
4401 		int err = security_sb_eat_lsm_opts(options, &fc->security);
4402 		if (err)
4403 			return err;
4404 	}
4405 
4406 	while (options != NULL) {
4407 		char *this_char = options;
4408 		for (;;) {
4409 			/*
4410 			 * NUL-terminate this option: unfortunately,
4411 			 * mount options form a comma-separated list,
4412 			 * but mpol's nodelist may also contain commas.
4413 			 */
4414 			options = strchr(options, ',');
4415 			if (options == NULL)
4416 				break;
4417 			options++;
4418 			if (!isdigit(*options)) {
4419 				options[-1] = '\0';
4420 				break;
4421 			}
4422 		}
4423 		if (*this_char) {
4424 			char *value = strchr(this_char, '=');
4425 			size_t len = 0;
4426 			int err;
4427 
4428 			if (value) {
4429 				*value++ = '\0';
4430 				len = strlen(value);
4431 			}
4432 			err = vfs_parse_fs_string(fc, this_char, value, len);
4433 			if (err < 0)
4434 				return err;
4435 		}
4436 	}
4437 	return 0;
4438 }
4439 
4440 /*
4441  * Reconfigure a shmem filesystem.
4442  */
4443 static int shmem_reconfigure(struct fs_context *fc)
4444 {
4445 	struct shmem_options *ctx = fc->fs_private;
4446 	struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
4447 	unsigned long used_isp;
4448 	struct mempolicy *mpol = NULL;
4449 	const char *err;
4450 
4451 	raw_spin_lock(&sbinfo->stat_lock);
4452 	used_isp = sbinfo->max_inodes * BOGO_INODE_SIZE - sbinfo->free_ispace;
4453 
4454 	if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
4455 		if (!sbinfo->max_blocks) {
4456 			err = "Cannot retroactively limit size";
4457 			goto out;
4458 		}
4459 		if (percpu_counter_compare(&sbinfo->used_blocks,
4460 					   ctx->blocks) > 0) {
4461 			err = "Too small a size for current use";
4462 			goto out;
4463 		}
4464 	}
4465 	if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
4466 		if (!sbinfo->max_inodes) {
4467 			err = "Cannot retroactively limit inodes";
4468 			goto out;
4469 		}
4470 		if (ctx->inodes * BOGO_INODE_SIZE < used_isp) {
4471 			err = "Too few inodes for current use";
4472 			goto out;
4473 		}
4474 	}
4475 
4476 	if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
4477 	    sbinfo->next_ino > UINT_MAX) {
4478 		err = "Current inum too high to switch to 32-bit inums";
4479 		goto out;
4480 	}
4481 	if ((ctx->seen & SHMEM_SEEN_NOSWAP) && ctx->noswap && !sbinfo->noswap) {
4482 		err = "Cannot disable swap on remount";
4483 		goto out;
4484 	}
4485 	if (!(ctx->seen & SHMEM_SEEN_NOSWAP) && !ctx->noswap && sbinfo->noswap) {
4486 		err = "Cannot enable swap on remount if it was disabled on first mount";
4487 		goto out;
4488 	}
4489 
4490 	if (ctx->seen & SHMEM_SEEN_QUOTA &&
4491 	    !sb_any_quota_loaded(fc->root->d_sb)) {
4492 		err = "Cannot enable quota on remount";
4493 		goto out;
4494 	}
4495 
4496 #ifdef CONFIG_TMPFS_QUOTA
4497 #define CHANGED_LIMIT(name)						\
4498 	(ctx->qlimits.name## hardlimit &&				\
4499 	(ctx->qlimits.name## hardlimit != sbinfo->qlimits.name## hardlimit))
4500 
4501 	if (CHANGED_LIMIT(usrquota_b) || CHANGED_LIMIT(usrquota_i) ||
4502 	    CHANGED_LIMIT(grpquota_b) || CHANGED_LIMIT(grpquota_i)) {
4503 		err = "Cannot change global quota limit on remount";
4504 		goto out;
4505 	}
4506 #endif /* CONFIG_TMPFS_QUOTA */
4507 
4508 	if (ctx->seen & SHMEM_SEEN_HUGE)
4509 		sbinfo->huge = ctx->huge;
4510 	if (ctx->seen & SHMEM_SEEN_INUMS)
4511 		sbinfo->full_inums = ctx->full_inums;
4512 	if (ctx->seen & SHMEM_SEEN_BLOCKS)
4513 		sbinfo->max_blocks  = ctx->blocks;
4514 	if (ctx->seen & SHMEM_SEEN_INODES) {
4515 		sbinfo->max_inodes  = ctx->inodes;
4516 		sbinfo->free_ispace = ctx->inodes * BOGO_INODE_SIZE - used_isp;
4517 	}
4518 
4519 	/*
4520 	 * Preserve previous mempolicy unless mpol remount option was specified.
4521 	 */
4522 	if (ctx->mpol) {
4523 		mpol = sbinfo->mpol;
4524 		sbinfo->mpol = ctx->mpol;	/* transfers initial ref */
4525 		ctx->mpol = NULL;
4526 	}
4527 
4528 	if (ctx->noswap)
4529 		sbinfo->noswap = true;
4530 
4531 	raw_spin_unlock(&sbinfo->stat_lock);
4532 	mpol_put(mpol);
4533 	return 0;
4534 out:
4535 	raw_spin_unlock(&sbinfo->stat_lock);
4536 	return invalfc(fc, "%s", err);
4537 }
4538 
4539 static int shmem_show_options(struct seq_file *seq, struct dentry *root)
4540 {
4541 	struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
4542 	struct mempolicy *mpol;
4543 
4544 	if (sbinfo->max_blocks != shmem_default_max_blocks())
4545 		seq_printf(seq, ",size=%luk", K(sbinfo->max_blocks));
4546 	if (sbinfo->max_inodes != shmem_default_max_inodes())
4547 		seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
4548 	if (sbinfo->mode != (0777 | S_ISVTX))
4549 		seq_printf(seq, ",mode=%03ho", sbinfo->mode);
4550 	if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
4551 		seq_printf(seq, ",uid=%u",
4552 				from_kuid_munged(&init_user_ns, sbinfo->uid));
4553 	if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
4554 		seq_printf(seq, ",gid=%u",
4555 				from_kgid_munged(&init_user_ns, sbinfo->gid));
4556 
4557 	/*
4558 	 * Showing inode{64,32} might be useful even if it's the system default,
4559 	 * since then people don't have to resort to checking both here and
4560 	 * /proc/config.gz to confirm 64-bit inums were successfully applied
4561 	 * (which may not even exist if IKCONFIG_PROC isn't enabled).
4562 	 *
4563 	 * We hide it when inode64 isn't the default and we are using 32-bit
4564 	 * inodes, since that probably just means the feature isn't even under
4565 	 * consideration.
4566 	 *
4567 	 * As such:
4568 	 *
4569 	 *                     +-----------------+-----------------+
4570 	 *                     | TMPFS_INODE64=y | TMPFS_INODE64=n |
4571 	 *  +------------------+-----------------+-----------------+
4572 	 *  | full_inums=true  | show            | show            |
4573 	 *  | full_inums=false | show            | hide            |
4574 	 *  +------------------+-----------------+-----------------+
4575 	 *
4576 	 */
4577 	if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
4578 		seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
4579 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4580 	/* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
4581 	if (sbinfo->huge)
4582 		seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
4583 #endif
4584 	mpol = shmem_get_sbmpol(sbinfo);
4585 	shmem_show_mpol(seq, mpol);
4586 	mpol_put(mpol);
4587 	if (sbinfo->noswap)
4588 		seq_printf(seq, ",noswap");
4589 #ifdef CONFIG_TMPFS_QUOTA
4590 	if (sb_has_quota_active(root->d_sb, USRQUOTA))
4591 		seq_printf(seq, ",usrquota");
4592 	if (sb_has_quota_active(root->d_sb, GRPQUOTA))
4593 		seq_printf(seq, ",grpquota");
4594 	if (sbinfo->qlimits.usrquota_bhardlimit)
4595 		seq_printf(seq, ",usrquota_block_hardlimit=%lld",
4596 			   sbinfo->qlimits.usrquota_bhardlimit);
4597 	if (sbinfo->qlimits.grpquota_bhardlimit)
4598 		seq_printf(seq, ",grpquota_block_hardlimit=%lld",
4599 			   sbinfo->qlimits.grpquota_bhardlimit);
4600 	if (sbinfo->qlimits.usrquota_ihardlimit)
4601 		seq_printf(seq, ",usrquota_inode_hardlimit=%lld",
4602 			   sbinfo->qlimits.usrquota_ihardlimit);
4603 	if (sbinfo->qlimits.grpquota_ihardlimit)
4604 		seq_printf(seq, ",grpquota_inode_hardlimit=%lld",
4605 			   sbinfo->qlimits.grpquota_ihardlimit);
4606 #endif
4607 	return 0;
4608 }
4609 
4610 #endif /* CONFIG_TMPFS */
4611 
4612 static void shmem_put_super(struct super_block *sb)
4613 {
4614 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
4615 
4616 #ifdef CONFIG_TMPFS_QUOTA
4617 	shmem_disable_quotas(sb);
4618 #endif
4619 	free_percpu(sbinfo->ino_batch);
4620 	percpu_counter_destroy(&sbinfo->used_blocks);
4621 	mpol_put(sbinfo->mpol);
4622 	kfree(sbinfo);
4623 	sb->s_fs_info = NULL;
4624 }
4625 
4626 static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
4627 {
4628 	struct shmem_options *ctx = fc->fs_private;
4629 	struct inode *inode;
4630 	struct shmem_sb_info *sbinfo;
4631 	int error = -ENOMEM;
4632 
4633 	/* Round up to L1_CACHE_BYTES to resist false sharing */
4634 	sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
4635 				L1_CACHE_BYTES), GFP_KERNEL);
4636 	if (!sbinfo)
4637 		return error;
4638 
4639 	sb->s_fs_info = sbinfo;
4640 
4641 #ifdef CONFIG_TMPFS
4642 	/*
4643 	 * Per default we only allow half of the physical ram per
4644 	 * tmpfs instance, limiting inodes to one per page of lowmem;
4645 	 * but the internal instance is left unlimited.
4646 	 */
4647 	if (!(sb->s_flags & SB_KERNMOUNT)) {
4648 		if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
4649 			ctx->blocks = shmem_default_max_blocks();
4650 		if (!(ctx->seen & SHMEM_SEEN_INODES))
4651 			ctx->inodes = shmem_default_max_inodes();
4652 		if (!(ctx->seen & SHMEM_SEEN_INUMS))
4653 			ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
4654 		sbinfo->noswap = ctx->noswap;
4655 	} else {
4656 		sb->s_flags |= SB_NOUSER;
4657 	}
4658 	sb->s_export_op = &shmem_export_ops;
4659 	sb->s_flags |= SB_NOSEC | SB_I_VERSION;
4660 #else
4661 	sb->s_flags |= SB_NOUSER;
4662 #endif
4663 	sbinfo->max_blocks = ctx->blocks;
4664 	sbinfo->max_inodes = ctx->inodes;
4665 	sbinfo->free_ispace = sbinfo->max_inodes * BOGO_INODE_SIZE;
4666 	if (sb->s_flags & SB_KERNMOUNT) {
4667 		sbinfo->ino_batch = alloc_percpu(ino_t);
4668 		if (!sbinfo->ino_batch)
4669 			goto failed;
4670 	}
4671 	sbinfo->uid = ctx->uid;
4672 	sbinfo->gid = ctx->gid;
4673 	sbinfo->full_inums = ctx->full_inums;
4674 	sbinfo->mode = ctx->mode;
4675 	sbinfo->huge = ctx->huge;
4676 	sbinfo->mpol = ctx->mpol;
4677 	ctx->mpol = NULL;
4678 
4679 	raw_spin_lock_init(&sbinfo->stat_lock);
4680 	if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
4681 		goto failed;
4682 	spin_lock_init(&sbinfo->shrinklist_lock);
4683 	INIT_LIST_HEAD(&sbinfo->shrinklist);
4684 
4685 	sb->s_maxbytes = MAX_LFS_FILESIZE;
4686 	sb->s_blocksize = PAGE_SIZE;
4687 	sb->s_blocksize_bits = PAGE_SHIFT;
4688 	sb->s_magic = TMPFS_MAGIC;
4689 	sb->s_op = &shmem_ops;
4690 	sb->s_time_gran = 1;
4691 #ifdef CONFIG_TMPFS_XATTR
4692 	sb->s_xattr = shmem_xattr_handlers;
4693 #endif
4694 #ifdef CONFIG_TMPFS_POSIX_ACL
4695 	sb->s_flags |= SB_POSIXACL;
4696 #endif
4697 	uuid_t uuid;
4698 	uuid_gen(&uuid);
4699 	super_set_uuid(sb, uuid.b, sizeof(uuid));
4700 
4701 #ifdef CONFIG_TMPFS_QUOTA
4702 	if (ctx->seen & SHMEM_SEEN_QUOTA) {
4703 		sb->dq_op = &shmem_quota_operations;
4704 		sb->s_qcop = &dquot_quotactl_sysfile_ops;
4705 		sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
4706 
4707 		/* Copy the default limits from ctx into sbinfo */
4708 		memcpy(&sbinfo->qlimits, &ctx->qlimits,
4709 		       sizeof(struct shmem_quota_limits));
4710 
4711 		if (shmem_enable_quotas(sb, ctx->quota_types))
4712 			goto failed;
4713 	}
4714 #endif /* CONFIG_TMPFS_QUOTA */
4715 
4716 	inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL,
4717 				S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
4718 	if (IS_ERR(inode)) {
4719 		error = PTR_ERR(inode);
4720 		goto failed;
4721 	}
4722 	inode->i_uid = sbinfo->uid;
4723 	inode->i_gid = sbinfo->gid;
4724 	sb->s_root = d_make_root(inode);
4725 	if (!sb->s_root)
4726 		goto failed;
4727 	return 0;
4728 
4729 failed:
4730 	shmem_put_super(sb);
4731 	return error;
4732 }
4733 
4734 static int shmem_get_tree(struct fs_context *fc)
4735 {
4736 	return get_tree_nodev(fc, shmem_fill_super);
4737 }
4738 
4739 static void shmem_free_fc(struct fs_context *fc)
4740 {
4741 	struct shmem_options *ctx = fc->fs_private;
4742 
4743 	if (ctx) {
4744 		mpol_put(ctx->mpol);
4745 		kfree(ctx);
4746 	}
4747 }
4748 
4749 static const struct fs_context_operations shmem_fs_context_ops = {
4750 	.free			= shmem_free_fc,
4751 	.get_tree		= shmem_get_tree,
4752 #ifdef CONFIG_TMPFS
4753 	.parse_monolithic	= shmem_parse_options,
4754 	.parse_param		= shmem_parse_one,
4755 	.reconfigure		= shmem_reconfigure,
4756 #endif
4757 };
4758 
4759 static struct kmem_cache *shmem_inode_cachep __ro_after_init;
4760 
4761 static struct inode *shmem_alloc_inode(struct super_block *sb)
4762 {
4763 	struct shmem_inode_info *info;
4764 	info = alloc_inode_sb(sb, shmem_inode_cachep, GFP_KERNEL);
4765 	if (!info)
4766 		return NULL;
4767 	return &info->vfs_inode;
4768 }
4769 
4770 static void shmem_free_in_core_inode(struct inode *inode)
4771 {
4772 	if (S_ISLNK(inode->i_mode))
4773 		kfree(inode->i_link);
4774 	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
4775 }
4776 
4777 static void shmem_destroy_inode(struct inode *inode)
4778 {
4779 	if (S_ISREG(inode->i_mode))
4780 		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
4781 	if (S_ISDIR(inode->i_mode))
4782 		simple_offset_destroy(shmem_get_offset_ctx(inode));
4783 }
4784 
4785 static void shmem_init_inode(void *foo)
4786 {
4787 	struct shmem_inode_info *info = foo;
4788 	inode_init_once(&info->vfs_inode);
4789 }
4790 
4791 static void __init shmem_init_inodecache(void)
4792 {
4793 	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
4794 				sizeof(struct shmem_inode_info),
4795 				0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
4796 }
4797 
4798 static void __init shmem_destroy_inodecache(void)
4799 {
4800 	kmem_cache_destroy(shmem_inode_cachep);
4801 }
4802 
4803 /* Keep the page in page cache instead of truncating it */
4804 static int shmem_error_remove_folio(struct address_space *mapping,
4805 				   struct folio *folio)
4806 {
4807 	return 0;
4808 }
4809 
4810 static const struct address_space_operations shmem_aops = {
4811 	.writepage	= shmem_writepage,
4812 	.dirty_folio	= noop_dirty_folio,
4813 #ifdef CONFIG_TMPFS
4814 	.write_begin	= shmem_write_begin,
4815 	.write_end	= shmem_write_end,
4816 #endif
4817 #ifdef CONFIG_MIGRATION
4818 	.migrate_folio	= migrate_folio,
4819 #endif
4820 	.error_remove_folio = shmem_error_remove_folio,
4821 };
4822 
4823 static const struct file_operations shmem_file_operations = {
4824 	.mmap		= shmem_mmap,
4825 	.open		= shmem_file_open,
4826 	.get_unmapped_area = shmem_get_unmapped_area,
4827 #ifdef CONFIG_TMPFS
4828 	.llseek		= shmem_file_llseek,
4829 	.read_iter	= shmem_file_read_iter,
4830 	.write_iter	= shmem_file_write_iter,
4831 	.fsync		= noop_fsync,
4832 	.splice_read	= shmem_file_splice_read,
4833 	.splice_write	= iter_file_splice_write,
4834 	.fallocate	= shmem_fallocate,
4835 #endif
4836 };
4837 
4838 static const struct inode_operations shmem_inode_operations = {
4839 	.getattr	= shmem_getattr,
4840 	.setattr	= shmem_setattr,
4841 #ifdef CONFIG_TMPFS_XATTR
4842 	.listxattr	= shmem_listxattr,
4843 	.set_acl	= simple_set_acl,
4844 	.fileattr_get	= shmem_fileattr_get,
4845 	.fileattr_set	= shmem_fileattr_set,
4846 #endif
4847 };
4848 
4849 static const struct inode_operations shmem_dir_inode_operations = {
4850 #ifdef CONFIG_TMPFS
4851 	.getattr	= shmem_getattr,
4852 	.create		= shmem_create,
4853 	.lookup		= simple_lookup,
4854 	.link		= shmem_link,
4855 	.unlink		= shmem_unlink,
4856 	.symlink	= shmem_symlink,
4857 	.mkdir		= shmem_mkdir,
4858 	.rmdir		= shmem_rmdir,
4859 	.mknod		= shmem_mknod,
4860 	.rename		= shmem_rename2,
4861 	.tmpfile	= shmem_tmpfile,
4862 	.get_offset_ctx	= shmem_get_offset_ctx,
4863 #endif
4864 #ifdef CONFIG_TMPFS_XATTR
4865 	.listxattr	= shmem_listxattr,
4866 	.fileattr_get	= shmem_fileattr_get,
4867 	.fileattr_set	= shmem_fileattr_set,
4868 #endif
4869 #ifdef CONFIG_TMPFS_POSIX_ACL
4870 	.setattr	= shmem_setattr,
4871 	.set_acl	= simple_set_acl,
4872 #endif
4873 };
4874 
4875 static const struct inode_operations shmem_special_inode_operations = {
4876 	.getattr	= shmem_getattr,
4877 #ifdef CONFIG_TMPFS_XATTR
4878 	.listxattr	= shmem_listxattr,
4879 #endif
4880 #ifdef CONFIG_TMPFS_POSIX_ACL
4881 	.setattr	= shmem_setattr,
4882 	.set_acl	= simple_set_acl,
4883 #endif
4884 };
4885 
4886 static const struct super_operations shmem_ops = {
4887 	.alloc_inode	= shmem_alloc_inode,
4888 	.free_inode	= shmem_free_in_core_inode,
4889 	.destroy_inode	= shmem_destroy_inode,
4890 #ifdef CONFIG_TMPFS
4891 	.statfs		= shmem_statfs,
4892 	.show_options	= shmem_show_options,
4893 #endif
4894 #ifdef CONFIG_TMPFS_QUOTA
4895 	.get_dquots	= shmem_get_dquots,
4896 #endif
4897 	.evict_inode	= shmem_evict_inode,
4898 	.drop_inode	= generic_delete_inode,
4899 	.put_super	= shmem_put_super,
4900 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4901 	.nr_cached_objects	= shmem_unused_huge_count,
4902 	.free_cached_objects	= shmem_unused_huge_scan,
4903 #endif
4904 };
4905 
4906 static const struct vm_operations_struct shmem_vm_ops = {
4907 	.fault		= shmem_fault,
4908 	.map_pages	= filemap_map_pages,
4909 #ifdef CONFIG_NUMA
4910 	.set_policy     = shmem_set_policy,
4911 	.get_policy     = shmem_get_policy,
4912 #endif
4913 };
4914 
4915 static const struct vm_operations_struct shmem_anon_vm_ops = {
4916 	.fault		= shmem_fault,
4917 	.map_pages	= filemap_map_pages,
4918 #ifdef CONFIG_NUMA
4919 	.set_policy     = shmem_set_policy,
4920 	.get_policy     = shmem_get_policy,
4921 #endif
4922 };
4923 
4924 int shmem_init_fs_context(struct fs_context *fc)
4925 {
4926 	struct shmem_options *ctx;
4927 
4928 	ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
4929 	if (!ctx)
4930 		return -ENOMEM;
4931 
4932 	ctx->mode = 0777 | S_ISVTX;
4933 	ctx->uid = current_fsuid();
4934 	ctx->gid = current_fsgid();
4935 
4936 	fc->fs_private = ctx;
4937 	fc->ops = &shmem_fs_context_ops;
4938 	return 0;
4939 }
4940 
4941 static struct file_system_type shmem_fs_type = {
4942 	.owner		= THIS_MODULE,
4943 	.name		= "tmpfs",
4944 	.init_fs_context = shmem_init_fs_context,
4945 #ifdef CONFIG_TMPFS
4946 	.parameters	= shmem_fs_parameters,
4947 #endif
4948 	.kill_sb	= kill_litter_super,
4949 	.fs_flags	= FS_USERNS_MOUNT | FS_ALLOW_IDMAP,
4950 };
4951 
4952 void __init shmem_init(void)
4953 {
4954 	int error;
4955 
4956 	shmem_init_inodecache();
4957 
4958 #ifdef CONFIG_TMPFS_QUOTA
4959 	register_quota_format(&shmem_quota_format);
4960 #endif
4961 
4962 	error = register_filesystem(&shmem_fs_type);
4963 	if (error) {
4964 		pr_err("Could not register tmpfs\n");
4965 		goto out2;
4966 	}
4967 
4968 	shm_mnt = kern_mount(&shmem_fs_type);
4969 	if (IS_ERR(shm_mnt)) {
4970 		error = PTR_ERR(shm_mnt);
4971 		pr_err("Could not kern_mount tmpfs\n");
4972 		goto out1;
4973 	}
4974 
4975 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4976 	if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
4977 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
4978 	else
4979 		shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
4980 
4981 	/*
4982 	 * Default to setting PMD-sized THP to inherit the global setting and
4983 	 * disable all other multi-size THPs.
4984 	 */
4985 	huge_shmem_orders_inherit = BIT(HPAGE_PMD_ORDER);
4986 #endif
4987 	return;
4988 
4989 out1:
4990 	unregister_filesystem(&shmem_fs_type);
4991 out2:
4992 #ifdef CONFIG_TMPFS_QUOTA
4993 	unregister_quota_format(&shmem_quota_format);
4994 #endif
4995 	shmem_destroy_inodecache();
4996 	shm_mnt = ERR_PTR(error);
4997 }
4998 
4999 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
5000 static ssize_t shmem_enabled_show(struct kobject *kobj,
5001 				  struct kobj_attribute *attr, char *buf)
5002 {
5003 	static const int values[] = {
5004 		SHMEM_HUGE_ALWAYS,
5005 		SHMEM_HUGE_WITHIN_SIZE,
5006 		SHMEM_HUGE_ADVISE,
5007 		SHMEM_HUGE_NEVER,
5008 		SHMEM_HUGE_DENY,
5009 		SHMEM_HUGE_FORCE,
5010 	};
5011 	int len = 0;
5012 	int i;
5013 
5014 	for (i = 0; i < ARRAY_SIZE(values); i++) {
5015 		len += sysfs_emit_at(buf, len,
5016 				shmem_huge == values[i] ? "%s[%s]" : "%s%s",
5017 				i ? " " : "", shmem_format_huge(values[i]));
5018 	}
5019 	len += sysfs_emit_at(buf, len, "\n");
5020 
5021 	return len;
5022 }
5023 
5024 static ssize_t shmem_enabled_store(struct kobject *kobj,
5025 		struct kobj_attribute *attr, const char *buf, size_t count)
5026 {
5027 	char tmp[16];
5028 	int huge;
5029 
5030 	if (count + 1 > sizeof(tmp))
5031 		return -EINVAL;
5032 	memcpy(tmp, buf, count);
5033 	tmp[count] = '\0';
5034 	if (count && tmp[count - 1] == '\n')
5035 		tmp[count - 1] = '\0';
5036 
5037 	huge = shmem_parse_huge(tmp);
5038 	if (huge == -EINVAL)
5039 		return -EINVAL;
5040 	if (!has_transparent_hugepage() &&
5041 			huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
5042 		return -EINVAL;
5043 
5044 	/* Do not override huge allocation policy with non-PMD sized mTHP */
5045 	if (huge == SHMEM_HUGE_FORCE &&
5046 	    huge_shmem_orders_inherit != BIT(HPAGE_PMD_ORDER))
5047 		return -EINVAL;
5048 
5049 	shmem_huge = huge;
5050 	if (shmem_huge > SHMEM_HUGE_DENY)
5051 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
5052 	return count;
5053 }
5054 
5055 struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled);
5056 static DEFINE_SPINLOCK(huge_shmem_orders_lock);
5057 
5058 static ssize_t thpsize_shmem_enabled_show(struct kobject *kobj,
5059 					  struct kobj_attribute *attr, char *buf)
5060 {
5061 	int order = to_thpsize(kobj)->order;
5062 	const char *output;
5063 
5064 	if (test_bit(order, &huge_shmem_orders_always))
5065 		output = "[always] inherit within_size advise never";
5066 	else if (test_bit(order, &huge_shmem_orders_inherit))
5067 		output = "always [inherit] within_size advise never";
5068 	else if (test_bit(order, &huge_shmem_orders_within_size))
5069 		output = "always inherit [within_size] advise never";
5070 	else if (test_bit(order, &huge_shmem_orders_madvise))
5071 		output = "always inherit within_size [advise] never";
5072 	else
5073 		output = "always inherit within_size advise [never]";
5074 
5075 	return sysfs_emit(buf, "%s\n", output);
5076 }
5077 
5078 static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj,
5079 					   struct kobj_attribute *attr,
5080 					   const char *buf, size_t count)
5081 {
5082 	int order = to_thpsize(kobj)->order;
5083 	ssize_t ret = count;
5084 
5085 	if (sysfs_streq(buf, "always")) {
5086 		spin_lock(&huge_shmem_orders_lock);
5087 		clear_bit(order, &huge_shmem_orders_inherit);
5088 		clear_bit(order, &huge_shmem_orders_madvise);
5089 		clear_bit(order, &huge_shmem_orders_within_size);
5090 		set_bit(order, &huge_shmem_orders_always);
5091 		spin_unlock(&huge_shmem_orders_lock);
5092 	} else if (sysfs_streq(buf, "inherit")) {
5093 		/* Do not override huge allocation policy with non-PMD sized mTHP */
5094 		if (shmem_huge == SHMEM_HUGE_FORCE &&
5095 		    order != HPAGE_PMD_ORDER)
5096 			return -EINVAL;
5097 
5098 		spin_lock(&huge_shmem_orders_lock);
5099 		clear_bit(order, &huge_shmem_orders_always);
5100 		clear_bit(order, &huge_shmem_orders_madvise);
5101 		clear_bit(order, &huge_shmem_orders_within_size);
5102 		set_bit(order, &huge_shmem_orders_inherit);
5103 		spin_unlock(&huge_shmem_orders_lock);
5104 	} else if (sysfs_streq(buf, "within_size")) {
5105 		spin_lock(&huge_shmem_orders_lock);
5106 		clear_bit(order, &huge_shmem_orders_always);
5107 		clear_bit(order, &huge_shmem_orders_inherit);
5108 		clear_bit(order, &huge_shmem_orders_madvise);
5109 		set_bit(order, &huge_shmem_orders_within_size);
5110 		spin_unlock(&huge_shmem_orders_lock);
5111 	} else if (sysfs_streq(buf, "advise")) {
5112 		spin_lock(&huge_shmem_orders_lock);
5113 		clear_bit(order, &huge_shmem_orders_always);
5114 		clear_bit(order, &huge_shmem_orders_inherit);
5115 		clear_bit(order, &huge_shmem_orders_within_size);
5116 		set_bit(order, &huge_shmem_orders_madvise);
5117 		spin_unlock(&huge_shmem_orders_lock);
5118 	} else if (sysfs_streq(buf, "never")) {
5119 		spin_lock(&huge_shmem_orders_lock);
5120 		clear_bit(order, &huge_shmem_orders_always);
5121 		clear_bit(order, &huge_shmem_orders_inherit);
5122 		clear_bit(order, &huge_shmem_orders_within_size);
5123 		clear_bit(order, &huge_shmem_orders_madvise);
5124 		spin_unlock(&huge_shmem_orders_lock);
5125 	} else {
5126 		ret = -EINVAL;
5127 	}
5128 
5129 	return ret;
5130 }
5131 
5132 struct kobj_attribute thpsize_shmem_enabled_attr =
5133 	__ATTR(shmem_enabled, 0644, thpsize_shmem_enabled_show, thpsize_shmem_enabled_store);
5134 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
5135 
5136 #else /* !CONFIG_SHMEM */
5137 
5138 /*
5139  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
5140  *
5141  * This is intended for small system where the benefits of the full
5142  * shmem code (swap-backed and resource-limited) are outweighed by
5143  * their complexity. On systems without swap this code should be
5144  * effectively equivalent, but much lighter weight.
5145  */
5146 
5147 static struct file_system_type shmem_fs_type = {
5148 	.name		= "tmpfs",
5149 	.init_fs_context = ramfs_init_fs_context,
5150 	.parameters	= ramfs_fs_parameters,
5151 	.kill_sb	= ramfs_kill_sb,
5152 	.fs_flags	= FS_USERNS_MOUNT,
5153 };
5154 
5155 void __init shmem_init(void)
5156 {
5157 	BUG_ON(register_filesystem(&shmem_fs_type) != 0);
5158 
5159 	shm_mnt = kern_mount(&shmem_fs_type);
5160 	BUG_ON(IS_ERR(shm_mnt));
5161 }
5162 
5163 int shmem_unuse(unsigned int type)
5164 {
5165 	return 0;
5166 }
5167 
5168 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
5169 {
5170 	return 0;
5171 }
5172 
5173 void shmem_unlock_mapping(struct address_space *mapping)
5174 {
5175 }
5176 
5177 #ifdef CONFIG_MMU
5178 unsigned long shmem_get_unmapped_area(struct file *file,
5179 				      unsigned long addr, unsigned long len,
5180 				      unsigned long pgoff, unsigned long flags)
5181 {
5182 	return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
5183 }
5184 #endif
5185 
5186 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
5187 {
5188 	truncate_inode_pages_range(inode->i_mapping, lstart, lend);
5189 }
5190 EXPORT_SYMBOL_GPL(shmem_truncate_range);
5191 
5192 #define shmem_vm_ops				generic_file_vm_ops
5193 #define shmem_anon_vm_ops			generic_file_vm_ops
5194 #define shmem_file_operations			ramfs_file_operations
5195 #define shmem_acct_size(flags, size)		0
5196 #define shmem_unacct_size(flags, size)		do {} while (0)
5197 
5198 static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
5199 				struct super_block *sb, struct inode *dir,
5200 				umode_t mode, dev_t dev, unsigned long flags)
5201 {
5202 	struct inode *inode = ramfs_get_inode(sb, dir, mode, dev);
5203 	return inode ? inode : ERR_PTR(-ENOSPC);
5204 }
5205 
5206 #endif /* CONFIG_SHMEM */
5207 
5208 /* common code */
5209 
5210 static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name,
5211 			loff_t size, unsigned long flags, unsigned int i_flags)
5212 {
5213 	struct inode *inode;
5214 	struct file *res;
5215 
5216 	if (IS_ERR(mnt))
5217 		return ERR_CAST(mnt);
5218 
5219 	if (size < 0 || size > MAX_LFS_FILESIZE)
5220 		return ERR_PTR(-EINVAL);
5221 
5222 	if (shmem_acct_size(flags, size))
5223 		return ERR_PTR(-ENOMEM);
5224 
5225 	if (is_idmapped_mnt(mnt))
5226 		return ERR_PTR(-EINVAL);
5227 
5228 	inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL,
5229 				S_IFREG | S_IRWXUGO, 0, flags);
5230 	if (IS_ERR(inode)) {
5231 		shmem_unacct_size(flags, size);
5232 		return ERR_CAST(inode);
5233 	}
5234 	inode->i_flags |= i_flags;
5235 	inode->i_size = size;
5236 	clear_nlink(inode);	/* It is unlinked */
5237 	res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
5238 	if (!IS_ERR(res))
5239 		res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
5240 				&shmem_file_operations);
5241 	if (IS_ERR(res))
5242 		iput(inode);
5243 	return res;
5244 }
5245 
5246 /**
5247  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
5248  * 	kernel internal.  There will be NO LSM permission checks against the
5249  * 	underlying inode.  So users of this interface must do LSM checks at a
5250  *	higher layer.  The users are the big_key and shm implementations.  LSM
5251  *	checks are provided at the key or shm level rather than the inode.
5252  * @name: name for dentry (to be seen in /proc/<pid>/maps
5253  * @size: size to be set for the file
5254  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
5255  */
5256 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
5257 {
5258 	return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
5259 }
5260 EXPORT_SYMBOL_GPL(shmem_kernel_file_setup);
5261 
5262 /**
5263  * shmem_file_setup - get an unlinked file living in tmpfs
5264  * @name: name for dentry (to be seen in /proc/<pid>/maps
5265  * @size: size to be set for the file
5266  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
5267  */
5268 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
5269 {
5270 	return __shmem_file_setup(shm_mnt, name, size, flags, 0);
5271 }
5272 EXPORT_SYMBOL_GPL(shmem_file_setup);
5273 
5274 /**
5275  * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
5276  * @mnt: the tmpfs mount where the file will be created
5277  * @name: name for dentry (to be seen in /proc/<pid>/maps
5278  * @size: size to be set for the file
5279  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
5280  */
5281 struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
5282 				       loff_t size, unsigned long flags)
5283 {
5284 	return __shmem_file_setup(mnt, name, size, flags, 0);
5285 }
5286 EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
5287 
5288 /**
5289  * shmem_zero_setup - setup a shared anonymous mapping
5290  * @vma: the vma to be mmapped is prepared by do_mmap
5291  */
5292 int shmem_zero_setup(struct vm_area_struct *vma)
5293 {
5294 	struct file *file;
5295 	loff_t size = vma->vm_end - vma->vm_start;
5296 
5297 	/*
5298 	 * Cloning a new file under mmap_lock leads to a lock ordering conflict
5299 	 * between XFS directory reading and selinux: since this file is only
5300 	 * accessible to the user through its mapping, use S_PRIVATE flag to
5301 	 * bypass file security, in the same way as shmem_kernel_file_setup().
5302 	 */
5303 	file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
5304 	if (IS_ERR(file))
5305 		return PTR_ERR(file);
5306 
5307 	if (vma->vm_file)
5308 		fput(vma->vm_file);
5309 	vma->vm_file = file;
5310 	vma->vm_ops = &shmem_anon_vm_ops;
5311 
5312 	return 0;
5313 }
5314 
5315 /**
5316  * shmem_read_folio_gfp - read into page cache, using specified page allocation flags.
5317  * @mapping:	the folio's address_space
5318  * @index:	the folio index
5319  * @gfp:	the page allocator flags to use if allocating
5320  *
5321  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
5322  * with any new page allocations done using the specified allocation flags.
5323  * But read_cache_page_gfp() uses the ->read_folio() method: which does not
5324  * suit tmpfs, since it may have pages in swapcache, and needs to find those
5325  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
5326  *
5327  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
5328  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
5329  */
5330 struct folio *shmem_read_folio_gfp(struct address_space *mapping,
5331 		pgoff_t index, gfp_t gfp)
5332 {
5333 #ifdef CONFIG_SHMEM
5334 	struct inode *inode = mapping->host;
5335 	struct folio *folio;
5336 	int error;
5337 
5338 	error = shmem_get_folio_gfp(inode, index, 0, &folio, SGP_CACHE,
5339 				    gfp, NULL, NULL);
5340 	if (error)
5341 		return ERR_PTR(error);
5342 
5343 	folio_unlock(folio);
5344 	return folio;
5345 #else
5346 	/*
5347 	 * The tiny !SHMEM case uses ramfs without swap
5348 	 */
5349 	return mapping_read_folio_gfp(mapping, index, gfp);
5350 #endif
5351 }
5352 EXPORT_SYMBOL_GPL(shmem_read_folio_gfp);
5353 
5354 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
5355 					 pgoff_t index, gfp_t gfp)
5356 {
5357 	struct folio *folio = shmem_read_folio_gfp(mapping, index, gfp);
5358 	struct page *page;
5359 
5360 	if (IS_ERR(folio))
5361 		return &folio->page;
5362 
5363 	page = folio_file_page(folio, index);
5364 	if (PageHWPoison(page)) {
5365 		folio_put(folio);
5366 		return ERR_PTR(-EIO);
5367 	}
5368 
5369 	return page;
5370 }
5371 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
5372