xref: /linux/mm/shmem.c (revision c9fdc4d5487a16bd1f003fc8b66e91f88efb50e6)
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *		 2000 Transmeta Corp.
6  *		 2000-2001 Christoph Rohland
7  *		 2000-2001 SAP AG
8  *		 2002 Red Hat Inc.
9  * Copyright (C) 2002-2011 Hugh Dickins.
10  * Copyright (C) 2011 Google Inc.
11  * Copyright (C) 2002-2005 VERITAS Software Corporation.
12  * Copyright (C) 2004 Andi Kleen, SuSE Labs
13  *
14  * Extended attribute support for tmpfs:
15  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17  *
18  * tiny-shmem:
19  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20  *
21  * This file is released under the GPL.
22  */
23 
24 #include <linux/fs.h>
25 #include <linux/init.h>
26 #include <linux/vfs.h>
27 #include <linux/mount.h>
28 #include <linux/ramfs.h>
29 #include <linux/pagemap.h>
30 #include <linux/file.h>
31 #include <linux/mm.h>
32 #include <linux/random.h>
33 #include <linux/sched/signal.h>
34 #include <linux/export.h>
35 #include <linux/swap.h>
36 #include <linux/uio.h>
37 #include <linux/khugepaged.h>
38 #include <linux/hugetlb.h>
39 #include <linux/frontswap.h>
40 #include <linux/fs_parser.h>
41 #include <linux/swapfile.h>
42 
43 static struct vfsmount *shm_mnt;
44 
45 #ifdef CONFIG_SHMEM
46 /*
47  * This virtual memory filesystem is heavily based on the ramfs. It
48  * extends ramfs by the ability to use swap and honor resource limits
49  * which makes it a completely usable filesystem.
50  */
51 
52 #include <linux/xattr.h>
53 #include <linux/exportfs.h>
54 #include <linux/posix_acl.h>
55 #include <linux/posix_acl_xattr.h>
56 #include <linux/mman.h>
57 #include <linux/string.h>
58 #include <linux/slab.h>
59 #include <linux/backing-dev.h>
60 #include <linux/shmem_fs.h>
61 #include <linux/writeback.h>
62 #include <linux/pagevec.h>
63 #include <linux/percpu_counter.h>
64 #include <linux/falloc.h>
65 #include <linux/splice.h>
66 #include <linux/security.h>
67 #include <linux/swapops.h>
68 #include <linux/mempolicy.h>
69 #include <linux/namei.h>
70 #include <linux/ctype.h>
71 #include <linux/migrate.h>
72 #include <linux/highmem.h>
73 #include <linux/seq_file.h>
74 #include <linux/magic.h>
75 #include <linux/syscalls.h>
76 #include <linux/fcntl.h>
77 #include <uapi/linux/memfd.h>
78 #include <linux/userfaultfd_k.h>
79 #include <linux/rmap.h>
80 #include <linux/uuid.h>
81 
82 #include <linux/uaccess.h>
83 
84 #include "internal.h"
85 
86 #define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
87 #define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
88 
89 /* Pretend that each entry is of this size in directory's i_size */
90 #define BOGO_DIRENT_SIZE 20
91 
92 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
93 #define SHORT_SYMLINK_LEN 128
94 
95 /*
96  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
97  * inode->i_private (with i_rwsem making sure that it has only one user at
98  * a time): we would prefer not to enlarge the shmem inode just for that.
99  */
100 struct shmem_falloc {
101 	wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
102 	pgoff_t start;		/* start of range currently being fallocated */
103 	pgoff_t next;		/* the next page offset to be fallocated */
104 	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
105 	pgoff_t nr_unswapped;	/* how often writepage refused to swap out */
106 };
107 
108 struct shmem_options {
109 	unsigned long long blocks;
110 	unsigned long long inodes;
111 	struct mempolicy *mpol;
112 	kuid_t uid;
113 	kgid_t gid;
114 	umode_t mode;
115 	bool full_inums;
116 	int huge;
117 	int seen;
118 #define SHMEM_SEEN_BLOCKS 1
119 #define SHMEM_SEEN_INODES 2
120 #define SHMEM_SEEN_HUGE 4
121 #define SHMEM_SEEN_INUMS 8
122 };
123 
124 #ifdef CONFIG_TMPFS
125 static unsigned long shmem_default_max_blocks(void)
126 {
127 	return totalram_pages() / 2;
128 }
129 
130 static unsigned long shmem_default_max_inodes(void)
131 {
132 	unsigned long nr_pages = totalram_pages();
133 
134 	return min(nr_pages - totalhigh_pages(), nr_pages / 2);
135 }
136 #endif
137 
138 static int shmem_swapin_page(struct inode *inode, pgoff_t index,
139 			     struct page **pagep, enum sgp_type sgp,
140 			     gfp_t gfp, struct vm_area_struct *vma,
141 			     vm_fault_t *fault_type);
142 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
143 		struct page **pagep, enum sgp_type sgp,
144 		gfp_t gfp, struct vm_area_struct *vma,
145 		struct vm_fault *vmf, vm_fault_t *fault_type);
146 
147 int shmem_getpage(struct inode *inode, pgoff_t index,
148 		struct page **pagep, enum sgp_type sgp)
149 {
150 	return shmem_getpage_gfp(inode, index, pagep, sgp,
151 		mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
152 }
153 
154 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
155 {
156 	return sb->s_fs_info;
157 }
158 
159 /*
160  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
161  * for shared memory and for shared anonymous (/dev/zero) mappings
162  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
163  * consistent with the pre-accounting of private mappings ...
164  */
165 static inline int shmem_acct_size(unsigned long flags, loff_t size)
166 {
167 	return (flags & VM_NORESERVE) ?
168 		0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
169 }
170 
171 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
172 {
173 	if (!(flags & VM_NORESERVE))
174 		vm_unacct_memory(VM_ACCT(size));
175 }
176 
177 static inline int shmem_reacct_size(unsigned long flags,
178 		loff_t oldsize, loff_t newsize)
179 {
180 	if (!(flags & VM_NORESERVE)) {
181 		if (VM_ACCT(newsize) > VM_ACCT(oldsize))
182 			return security_vm_enough_memory_mm(current->mm,
183 					VM_ACCT(newsize) - VM_ACCT(oldsize));
184 		else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
185 			vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
186 	}
187 	return 0;
188 }
189 
190 /*
191  * ... whereas tmpfs objects are accounted incrementally as
192  * pages are allocated, in order to allow large sparse files.
193  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
194  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
195  */
196 static inline int shmem_acct_block(unsigned long flags, long pages)
197 {
198 	if (!(flags & VM_NORESERVE))
199 		return 0;
200 
201 	return security_vm_enough_memory_mm(current->mm,
202 			pages * VM_ACCT(PAGE_SIZE));
203 }
204 
205 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
206 {
207 	if (flags & VM_NORESERVE)
208 		vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
209 }
210 
211 static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
212 {
213 	struct shmem_inode_info *info = SHMEM_I(inode);
214 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
215 
216 	if (shmem_acct_block(info->flags, pages))
217 		return false;
218 
219 	if (sbinfo->max_blocks) {
220 		if (percpu_counter_compare(&sbinfo->used_blocks,
221 					   sbinfo->max_blocks - pages) > 0)
222 			goto unacct;
223 		percpu_counter_add(&sbinfo->used_blocks, pages);
224 	}
225 
226 	return true;
227 
228 unacct:
229 	shmem_unacct_blocks(info->flags, pages);
230 	return false;
231 }
232 
233 static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
234 {
235 	struct shmem_inode_info *info = SHMEM_I(inode);
236 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
237 
238 	if (sbinfo->max_blocks)
239 		percpu_counter_sub(&sbinfo->used_blocks, pages);
240 	shmem_unacct_blocks(info->flags, pages);
241 }
242 
243 static const struct super_operations shmem_ops;
244 const struct address_space_operations shmem_aops;
245 static const struct file_operations shmem_file_operations;
246 static const struct inode_operations shmem_inode_operations;
247 static const struct inode_operations shmem_dir_inode_operations;
248 static const struct inode_operations shmem_special_inode_operations;
249 static const struct vm_operations_struct shmem_vm_ops;
250 static struct file_system_type shmem_fs_type;
251 
252 bool vma_is_shmem(struct vm_area_struct *vma)
253 {
254 	return vma->vm_ops == &shmem_vm_ops;
255 }
256 
257 static LIST_HEAD(shmem_swaplist);
258 static DEFINE_MUTEX(shmem_swaplist_mutex);
259 
260 /*
261  * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
262  * produces a novel ino for the newly allocated inode.
263  *
264  * It may also be called when making a hard link to permit the space needed by
265  * each dentry. However, in that case, no new inode number is needed since that
266  * internally draws from another pool of inode numbers (currently global
267  * get_next_ino()). This case is indicated by passing NULL as inop.
268  */
269 #define SHMEM_INO_BATCH 1024
270 static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
271 {
272 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
273 	ino_t ino;
274 
275 	if (!(sb->s_flags & SB_KERNMOUNT)) {
276 		raw_spin_lock(&sbinfo->stat_lock);
277 		if (sbinfo->max_inodes) {
278 			if (!sbinfo->free_inodes) {
279 				raw_spin_unlock(&sbinfo->stat_lock);
280 				return -ENOSPC;
281 			}
282 			sbinfo->free_inodes--;
283 		}
284 		if (inop) {
285 			ino = sbinfo->next_ino++;
286 			if (unlikely(is_zero_ino(ino)))
287 				ino = sbinfo->next_ino++;
288 			if (unlikely(!sbinfo->full_inums &&
289 				     ino > UINT_MAX)) {
290 				/*
291 				 * Emulate get_next_ino uint wraparound for
292 				 * compatibility
293 				 */
294 				if (IS_ENABLED(CONFIG_64BIT))
295 					pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
296 						__func__, MINOR(sb->s_dev));
297 				sbinfo->next_ino = 1;
298 				ino = sbinfo->next_ino++;
299 			}
300 			*inop = ino;
301 		}
302 		raw_spin_unlock(&sbinfo->stat_lock);
303 	} else if (inop) {
304 		/*
305 		 * __shmem_file_setup, one of our callers, is lock-free: it
306 		 * doesn't hold stat_lock in shmem_reserve_inode since
307 		 * max_inodes is always 0, and is called from potentially
308 		 * unknown contexts. As such, use a per-cpu batched allocator
309 		 * which doesn't require the per-sb stat_lock unless we are at
310 		 * the batch boundary.
311 		 *
312 		 * We don't need to worry about inode{32,64} since SB_KERNMOUNT
313 		 * shmem mounts are not exposed to userspace, so we don't need
314 		 * to worry about things like glibc compatibility.
315 		 */
316 		ino_t *next_ino;
317 
318 		next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
319 		ino = *next_ino;
320 		if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
321 			raw_spin_lock(&sbinfo->stat_lock);
322 			ino = sbinfo->next_ino;
323 			sbinfo->next_ino += SHMEM_INO_BATCH;
324 			raw_spin_unlock(&sbinfo->stat_lock);
325 			if (unlikely(is_zero_ino(ino)))
326 				ino++;
327 		}
328 		*inop = ino;
329 		*next_ino = ++ino;
330 		put_cpu();
331 	}
332 
333 	return 0;
334 }
335 
336 static void shmem_free_inode(struct super_block *sb)
337 {
338 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
339 	if (sbinfo->max_inodes) {
340 		raw_spin_lock(&sbinfo->stat_lock);
341 		sbinfo->free_inodes++;
342 		raw_spin_unlock(&sbinfo->stat_lock);
343 	}
344 }
345 
346 /**
347  * shmem_recalc_inode - recalculate the block usage of an inode
348  * @inode: inode to recalc
349  *
350  * We have to calculate the free blocks since the mm can drop
351  * undirtied hole pages behind our back.
352  *
353  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
354  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
355  *
356  * It has to be called with the spinlock held.
357  */
358 static void shmem_recalc_inode(struct inode *inode)
359 {
360 	struct shmem_inode_info *info = SHMEM_I(inode);
361 	long freed;
362 
363 	freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
364 	if (freed > 0) {
365 		info->alloced -= freed;
366 		inode->i_blocks -= freed * BLOCKS_PER_PAGE;
367 		shmem_inode_unacct_blocks(inode, freed);
368 	}
369 }
370 
371 bool shmem_charge(struct inode *inode, long pages)
372 {
373 	struct shmem_inode_info *info = SHMEM_I(inode);
374 	unsigned long flags;
375 
376 	if (!shmem_inode_acct_block(inode, pages))
377 		return false;
378 
379 	/* nrpages adjustment first, then shmem_recalc_inode() when balanced */
380 	inode->i_mapping->nrpages += pages;
381 
382 	spin_lock_irqsave(&info->lock, flags);
383 	info->alloced += pages;
384 	inode->i_blocks += pages * BLOCKS_PER_PAGE;
385 	shmem_recalc_inode(inode);
386 	spin_unlock_irqrestore(&info->lock, flags);
387 
388 	return true;
389 }
390 
391 void shmem_uncharge(struct inode *inode, long pages)
392 {
393 	struct shmem_inode_info *info = SHMEM_I(inode);
394 	unsigned long flags;
395 
396 	/* nrpages adjustment done by __delete_from_page_cache() or caller */
397 
398 	spin_lock_irqsave(&info->lock, flags);
399 	info->alloced -= pages;
400 	inode->i_blocks -= pages * BLOCKS_PER_PAGE;
401 	shmem_recalc_inode(inode);
402 	spin_unlock_irqrestore(&info->lock, flags);
403 
404 	shmem_inode_unacct_blocks(inode, pages);
405 }
406 
407 /*
408  * Replace item expected in xarray by a new item, while holding xa_lock.
409  */
410 static int shmem_replace_entry(struct address_space *mapping,
411 			pgoff_t index, void *expected, void *replacement)
412 {
413 	XA_STATE(xas, &mapping->i_pages, index);
414 	void *item;
415 
416 	VM_BUG_ON(!expected);
417 	VM_BUG_ON(!replacement);
418 	item = xas_load(&xas);
419 	if (item != expected)
420 		return -ENOENT;
421 	xas_store(&xas, replacement);
422 	return 0;
423 }
424 
425 /*
426  * Sometimes, before we decide whether to proceed or to fail, we must check
427  * that an entry was not already brought back from swap by a racing thread.
428  *
429  * Checking page is not enough: by the time a SwapCache page is locked, it
430  * might be reused, and again be SwapCache, using the same swap as before.
431  */
432 static bool shmem_confirm_swap(struct address_space *mapping,
433 			       pgoff_t index, swp_entry_t swap)
434 {
435 	return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
436 }
437 
438 /*
439  * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
440  *
441  * SHMEM_HUGE_NEVER:
442  *	disables huge pages for the mount;
443  * SHMEM_HUGE_ALWAYS:
444  *	enables huge pages for the mount;
445  * SHMEM_HUGE_WITHIN_SIZE:
446  *	only allocate huge pages if the page will be fully within i_size,
447  *	also respect fadvise()/madvise() hints;
448  * SHMEM_HUGE_ADVISE:
449  *	only allocate huge pages if requested with fadvise()/madvise();
450  */
451 
452 #define SHMEM_HUGE_NEVER	0
453 #define SHMEM_HUGE_ALWAYS	1
454 #define SHMEM_HUGE_WITHIN_SIZE	2
455 #define SHMEM_HUGE_ADVISE	3
456 
457 /*
458  * Special values.
459  * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
460  *
461  * SHMEM_HUGE_DENY:
462  *	disables huge on shm_mnt and all mounts, for emergency use;
463  * SHMEM_HUGE_FORCE:
464  *	enables huge on shm_mnt and all mounts, w/o needing option, for testing;
465  *
466  */
467 #define SHMEM_HUGE_DENY		(-1)
468 #define SHMEM_HUGE_FORCE	(-2)
469 
470 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
471 /* ifdef here to avoid bloating shmem.o when not necessary */
472 
473 static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
474 
475 bool shmem_is_huge(struct vm_area_struct *vma,
476 		   struct inode *inode, pgoff_t index)
477 {
478 	loff_t i_size;
479 
480 	if (shmem_huge == SHMEM_HUGE_DENY)
481 		return false;
482 	if (vma && ((vma->vm_flags & VM_NOHUGEPAGE) ||
483 	    test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
484 		return false;
485 	if (shmem_huge == SHMEM_HUGE_FORCE)
486 		return true;
487 
488 	switch (SHMEM_SB(inode->i_sb)->huge) {
489 	case SHMEM_HUGE_ALWAYS:
490 		return true;
491 	case SHMEM_HUGE_WITHIN_SIZE:
492 		index = round_up(index + 1, HPAGE_PMD_NR);
493 		i_size = round_up(i_size_read(inode), PAGE_SIZE);
494 		if (i_size >> PAGE_SHIFT >= index)
495 			return true;
496 		fallthrough;
497 	case SHMEM_HUGE_ADVISE:
498 		if (vma && (vma->vm_flags & VM_HUGEPAGE))
499 			return true;
500 		fallthrough;
501 	default:
502 		return false;
503 	}
504 }
505 
506 #if defined(CONFIG_SYSFS)
507 static int shmem_parse_huge(const char *str)
508 {
509 	if (!strcmp(str, "never"))
510 		return SHMEM_HUGE_NEVER;
511 	if (!strcmp(str, "always"))
512 		return SHMEM_HUGE_ALWAYS;
513 	if (!strcmp(str, "within_size"))
514 		return SHMEM_HUGE_WITHIN_SIZE;
515 	if (!strcmp(str, "advise"))
516 		return SHMEM_HUGE_ADVISE;
517 	if (!strcmp(str, "deny"))
518 		return SHMEM_HUGE_DENY;
519 	if (!strcmp(str, "force"))
520 		return SHMEM_HUGE_FORCE;
521 	return -EINVAL;
522 }
523 #endif
524 
525 #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
526 static const char *shmem_format_huge(int huge)
527 {
528 	switch (huge) {
529 	case SHMEM_HUGE_NEVER:
530 		return "never";
531 	case SHMEM_HUGE_ALWAYS:
532 		return "always";
533 	case SHMEM_HUGE_WITHIN_SIZE:
534 		return "within_size";
535 	case SHMEM_HUGE_ADVISE:
536 		return "advise";
537 	case SHMEM_HUGE_DENY:
538 		return "deny";
539 	case SHMEM_HUGE_FORCE:
540 		return "force";
541 	default:
542 		VM_BUG_ON(1);
543 		return "bad_val";
544 	}
545 }
546 #endif
547 
548 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
549 		struct shrink_control *sc, unsigned long nr_to_split)
550 {
551 	LIST_HEAD(list), *pos, *next;
552 	LIST_HEAD(to_remove);
553 	struct inode *inode;
554 	struct shmem_inode_info *info;
555 	struct page *page;
556 	unsigned long batch = sc ? sc->nr_to_scan : 128;
557 	int split = 0;
558 
559 	if (list_empty(&sbinfo->shrinklist))
560 		return SHRINK_STOP;
561 
562 	spin_lock(&sbinfo->shrinklist_lock);
563 	list_for_each_safe(pos, next, &sbinfo->shrinklist) {
564 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
565 
566 		/* pin the inode */
567 		inode = igrab(&info->vfs_inode);
568 
569 		/* inode is about to be evicted */
570 		if (!inode) {
571 			list_del_init(&info->shrinklist);
572 			goto next;
573 		}
574 
575 		/* Check if there's anything to gain */
576 		if (round_up(inode->i_size, PAGE_SIZE) ==
577 				round_up(inode->i_size, HPAGE_PMD_SIZE)) {
578 			list_move(&info->shrinklist, &to_remove);
579 			goto next;
580 		}
581 
582 		list_move(&info->shrinklist, &list);
583 next:
584 		sbinfo->shrinklist_len--;
585 		if (!--batch)
586 			break;
587 	}
588 	spin_unlock(&sbinfo->shrinklist_lock);
589 
590 	list_for_each_safe(pos, next, &to_remove) {
591 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
592 		inode = &info->vfs_inode;
593 		list_del_init(&info->shrinklist);
594 		iput(inode);
595 	}
596 
597 	list_for_each_safe(pos, next, &list) {
598 		int ret;
599 
600 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
601 		inode = &info->vfs_inode;
602 
603 		if (nr_to_split && split >= nr_to_split)
604 			goto move_back;
605 
606 		page = find_get_page(inode->i_mapping,
607 				(inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
608 		if (!page)
609 			goto drop;
610 
611 		/* No huge page at the end of the file: nothing to split */
612 		if (!PageTransHuge(page)) {
613 			put_page(page);
614 			goto drop;
615 		}
616 
617 		/*
618 		 * Move the inode on the list back to shrinklist if we failed
619 		 * to lock the page at this time.
620 		 *
621 		 * Waiting for the lock may lead to deadlock in the
622 		 * reclaim path.
623 		 */
624 		if (!trylock_page(page)) {
625 			put_page(page);
626 			goto move_back;
627 		}
628 
629 		ret = split_huge_page(page);
630 		unlock_page(page);
631 		put_page(page);
632 
633 		/* If split failed move the inode on the list back to shrinklist */
634 		if (ret)
635 			goto move_back;
636 
637 		split++;
638 drop:
639 		list_del_init(&info->shrinklist);
640 		goto put;
641 move_back:
642 		/*
643 		 * Make sure the inode is either on the global list or deleted
644 		 * from any local list before iput() since it could be deleted
645 		 * in another thread once we put the inode (then the local list
646 		 * is corrupted).
647 		 */
648 		spin_lock(&sbinfo->shrinklist_lock);
649 		list_move(&info->shrinklist, &sbinfo->shrinklist);
650 		sbinfo->shrinklist_len++;
651 		spin_unlock(&sbinfo->shrinklist_lock);
652 put:
653 		iput(inode);
654 	}
655 
656 	return split;
657 }
658 
659 static long shmem_unused_huge_scan(struct super_block *sb,
660 		struct shrink_control *sc)
661 {
662 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
663 
664 	if (!READ_ONCE(sbinfo->shrinklist_len))
665 		return SHRINK_STOP;
666 
667 	return shmem_unused_huge_shrink(sbinfo, sc, 0);
668 }
669 
670 static long shmem_unused_huge_count(struct super_block *sb,
671 		struct shrink_control *sc)
672 {
673 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
674 	return READ_ONCE(sbinfo->shrinklist_len);
675 }
676 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
677 
678 #define shmem_huge SHMEM_HUGE_DENY
679 
680 bool shmem_is_huge(struct vm_area_struct *vma,
681 		   struct inode *inode, pgoff_t index)
682 {
683 	return false;
684 }
685 
686 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
687 		struct shrink_control *sc, unsigned long nr_to_split)
688 {
689 	return 0;
690 }
691 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
692 
693 /*
694  * Like add_to_page_cache_locked, but error if expected item has gone.
695  */
696 static int shmem_add_to_page_cache(struct page *page,
697 				   struct address_space *mapping,
698 				   pgoff_t index, void *expected, gfp_t gfp,
699 				   struct mm_struct *charge_mm)
700 {
701 	XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
702 	unsigned long i = 0;
703 	unsigned long nr = compound_nr(page);
704 	int error;
705 
706 	VM_BUG_ON_PAGE(PageTail(page), page);
707 	VM_BUG_ON_PAGE(index != round_down(index, nr), page);
708 	VM_BUG_ON_PAGE(!PageLocked(page), page);
709 	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
710 	VM_BUG_ON(expected && PageTransHuge(page));
711 
712 	page_ref_add(page, nr);
713 	page->mapping = mapping;
714 	page->index = index;
715 
716 	if (!PageSwapCache(page)) {
717 		error = mem_cgroup_charge(page_folio(page), charge_mm, gfp);
718 		if (error) {
719 			if (PageTransHuge(page)) {
720 				count_vm_event(THP_FILE_FALLBACK);
721 				count_vm_event(THP_FILE_FALLBACK_CHARGE);
722 			}
723 			goto error;
724 		}
725 	}
726 	cgroup_throttle_swaprate(page, gfp);
727 
728 	do {
729 		void *entry;
730 		xas_lock_irq(&xas);
731 		entry = xas_find_conflict(&xas);
732 		if (entry != expected)
733 			xas_set_err(&xas, -EEXIST);
734 		xas_create_range(&xas);
735 		if (xas_error(&xas))
736 			goto unlock;
737 next:
738 		xas_store(&xas, page);
739 		if (++i < nr) {
740 			xas_next(&xas);
741 			goto next;
742 		}
743 		if (PageTransHuge(page)) {
744 			count_vm_event(THP_FILE_ALLOC);
745 			__mod_lruvec_page_state(page, NR_SHMEM_THPS, nr);
746 		}
747 		mapping->nrpages += nr;
748 		__mod_lruvec_page_state(page, NR_FILE_PAGES, nr);
749 		__mod_lruvec_page_state(page, NR_SHMEM, nr);
750 unlock:
751 		xas_unlock_irq(&xas);
752 	} while (xas_nomem(&xas, gfp));
753 
754 	if (xas_error(&xas)) {
755 		error = xas_error(&xas);
756 		goto error;
757 	}
758 
759 	return 0;
760 error:
761 	page->mapping = NULL;
762 	page_ref_sub(page, nr);
763 	return error;
764 }
765 
766 /*
767  * Like delete_from_page_cache, but substitutes swap for page.
768  */
769 static void shmem_delete_from_page_cache(struct page *page, void *radswap)
770 {
771 	struct address_space *mapping = page->mapping;
772 	int error;
773 
774 	VM_BUG_ON_PAGE(PageCompound(page), page);
775 
776 	xa_lock_irq(&mapping->i_pages);
777 	error = shmem_replace_entry(mapping, page->index, page, radswap);
778 	page->mapping = NULL;
779 	mapping->nrpages--;
780 	__dec_lruvec_page_state(page, NR_FILE_PAGES);
781 	__dec_lruvec_page_state(page, NR_SHMEM);
782 	xa_unlock_irq(&mapping->i_pages);
783 	put_page(page);
784 	BUG_ON(error);
785 }
786 
787 /*
788  * Remove swap entry from page cache, free the swap and its page cache.
789  */
790 static int shmem_free_swap(struct address_space *mapping,
791 			   pgoff_t index, void *radswap)
792 {
793 	void *old;
794 
795 	old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
796 	if (old != radswap)
797 		return -ENOENT;
798 	free_swap_and_cache(radix_to_swp_entry(radswap));
799 	return 0;
800 }
801 
802 /*
803  * Determine (in bytes) how many of the shmem object's pages mapped by the
804  * given offsets are swapped out.
805  *
806  * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
807  * as long as the inode doesn't go away and racy results are not a problem.
808  */
809 unsigned long shmem_partial_swap_usage(struct address_space *mapping,
810 						pgoff_t start, pgoff_t end)
811 {
812 	XA_STATE(xas, &mapping->i_pages, start);
813 	struct page *page;
814 	unsigned long swapped = 0;
815 
816 	rcu_read_lock();
817 	xas_for_each(&xas, page, end - 1) {
818 		if (xas_retry(&xas, page))
819 			continue;
820 		if (xa_is_value(page))
821 			swapped++;
822 
823 		if (need_resched()) {
824 			xas_pause(&xas);
825 			cond_resched_rcu();
826 		}
827 	}
828 
829 	rcu_read_unlock();
830 
831 	return swapped << PAGE_SHIFT;
832 }
833 
834 /*
835  * Determine (in bytes) how many of the shmem object's pages mapped by the
836  * given vma is swapped out.
837  *
838  * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
839  * as long as the inode doesn't go away and racy results are not a problem.
840  */
841 unsigned long shmem_swap_usage(struct vm_area_struct *vma)
842 {
843 	struct inode *inode = file_inode(vma->vm_file);
844 	struct shmem_inode_info *info = SHMEM_I(inode);
845 	struct address_space *mapping = inode->i_mapping;
846 	unsigned long swapped;
847 
848 	/* Be careful as we don't hold info->lock */
849 	swapped = READ_ONCE(info->swapped);
850 
851 	/*
852 	 * The easier cases are when the shmem object has nothing in swap, or
853 	 * the vma maps it whole. Then we can simply use the stats that we
854 	 * already track.
855 	 */
856 	if (!swapped)
857 		return 0;
858 
859 	if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
860 		return swapped << PAGE_SHIFT;
861 
862 	/* Here comes the more involved part */
863 	return shmem_partial_swap_usage(mapping, vma->vm_pgoff,
864 					vma->vm_pgoff + vma_pages(vma));
865 }
866 
867 /*
868  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
869  */
870 void shmem_unlock_mapping(struct address_space *mapping)
871 {
872 	struct pagevec pvec;
873 	pgoff_t index = 0;
874 
875 	pagevec_init(&pvec);
876 	/*
877 	 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
878 	 */
879 	while (!mapping_unevictable(mapping)) {
880 		if (!pagevec_lookup(&pvec, mapping, &index))
881 			break;
882 		check_move_unevictable_pages(&pvec);
883 		pagevec_release(&pvec);
884 		cond_resched();
885 	}
886 }
887 
888 /*
889  * Check whether a hole-punch or truncation needs to split a huge page,
890  * returning true if no split was required, or the split has been successful.
891  *
892  * Eviction (or truncation to 0 size) should never need to split a huge page;
893  * but in rare cases might do so, if shmem_undo_range() failed to trylock on
894  * head, and then succeeded to trylock on tail.
895  *
896  * A split can only succeed when there are no additional references on the
897  * huge page: so the split below relies upon find_get_entries() having stopped
898  * when it found a subpage of the huge page, without getting further references.
899  */
900 static bool shmem_punch_compound(struct page *page, pgoff_t start, pgoff_t end)
901 {
902 	if (!PageTransCompound(page))
903 		return true;
904 
905 	/* Just proceed to delete a huge page wholly within the range punched */
906 	if (PageHead(page) &&
907 	    page->index >= start && page->index + HPAGE_PMD_NR <= end)
908 		return true;
909 
910 	/* Try to split huge page, so we can truly punch the hole or truncate */
911 	return split_huge_page(page) >= 0;
912 }
913 
914 /*
915  * Remove range of pages and swap entries from page cache, and free them.
916  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
917  */
918 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
919 								 bool unfalloc)
920 {
921 	struct address_space *mapping = inode->i_mapping;
922 	struct shmem_inode_info *info = SHMEM_I(inode);
923 	pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
924 	pgoff_t end = (lend + 1) >> PAGE_SHIFT;
925 	unsigned int partial_start = lstart & (PAGE_SIZE - 1);
926 	unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
927 	struct pagevec pvec;
928 	pgoff_t indices[PAGEVEC_SIZE];
929 	long nr_swaps_freed = 0;
930 	pgoff_t index;
931 	int i;
932 
933 	if (lend == -1)
934 		end = -1;	/* unsigned, so actually very big */
935 
936 	if (info->fallocend > start && info->fallocend <= end && !unfalloc)
937 		info->fallocend = start;
938 
939 	pagevec_init(&pvec);
940 	index = start;
941 	while (index < end && find_lock_entries(mapping, index, end - 1,
942 			&pvec, indices)) {
943 		for (i = 0; i < pagevec_count(&pvec); i++) {
944 			struct page *page = pvec.pages[i];
945 
946 			index = indices[i];
947 
948 			if (xa_is_value(page)) {
949 				if (unfalloc)
950 					continue;
951 				nr_swaps_freed += !shmem_free_swap(mapping,
952 								index, page);
953 				continue;
954 			}
955 			index += thp_nr_pages(page) - 1;
956 
957 			if (!unfalloc || !PageUptodate(page))
958 				truncate_inode_page(mapping, page);
959 			unlock_page(page);
960 		}
961 		pagevec_remove_exceptionals(&pvec);
962 		pagevec_release(&pvec);
963 		cond_resched();
964 		index++;
965 	}
966 
967 	if (partial_start) {
968 		struct page *page = NULL;
969 		shmem_getpage(inode, start - 1, &page, SGP_READ);
970 		if (page) {
971 			unsigned int top = PAGE_SIZE;
972 			if (start > end) {
973 				top = partial_end;
974 				partial_end = 0;
975 			}
976 			zero_user_segment(page, partial_start, top);
977 			set_page_dirty(page);
978 			unlock_page(page);
979 			put_page(page);
980 		}
981 	}
982 	if (partial_end) {
983 		struct page *page = NULL;
984 		shmem_getpage(inode, end, &page, SGP_READ);
985 		if (page) {
986 			zero_user_segment(page, 0, partial_end);
987 			set_page_dirty(page);
988 			unlock_page(page);
989 			put_page(page);
990 		}
991 	}
992 	if (start >= end)
993 		return;
994 
995 	index = start;
996 	while (index < end) {
997 		cond_resched();
998 
999 		if (!find_get_entries(mapping, index, end - 1, &pvec,
1000 				indices)) {
1001 			/* If all gone or hole-punch or unfalloc, we're done */
1002 			if (index == start || end != -1)
1003 				break;
1004 			/* But if truncating, restart to make sure all gone */
1005 			index = start;
1006 			continue;
1007 		}
1008 		for (i = 0; i < pagevec_count(&pvec); i++) {
1009 			struct page *page = pvec.pages[i];
1010 
1011 			index = indices[i];
1012 			if (xa_is_value(page)) {
1013 				if (unfalloc)
1014 					continue;
1015 				if (shmem_free_swap(mapping, index, page)) {
1016 					/* Swap was replaced by page: retry */
1017 					index--;
1018 					break;
1019 				}
1020 				nr_swaps_freed++;
1021 				continue;
1022 			}
1023 
1024 			lock_page(page);
1025 
1026 			if (!unfalloc || !PageUptodate(page)) {
1027 				if (page_mapping(page) != mapping) {
1028 					/* Page was replaced by swap: retry */
1029 					unlock_page(page);
1030 					index--;
1031 					break;
1032 				}
1033 				VM_BUG_ON_PAGE(PageWriteback(page), page);
1034 				if (shmem_punch_compound(page, start, end))
1035 					truncate_inode_page(mapping, page);
1036 				else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
1037 					/* Wipe the page and don't get stuck */
1038 					clear_highpage(page);
1039 					flush_dcache_page(page);
1040 					set_page_dirty(page);
1041 					if (index <
1042 					    round_up(start, HPAGE_PMD_NR))
1043 						start = index + 1;
1044 				}
1045 			}
1046 			unlock_page(page);
1047 		}
1048 		pagevec_remove_exceptionals(&pvec);
1049 		pagevec_release(&pvec);
1050 		index++;
1051 	}
1052 
1053 	spin_lock_irq(&info->lock);
1054 	info->swapped -= nr_swaps_freed;
1055 	shmem_recalc_inode(inode);
1056 	spin_unlock_irq(&info->lock);
1057 }
1058 
1059 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1060 {
1061 	shmem_undo_range(inode, lstart, lend, false);
1062 	inode->i_ctime = inode->i_mtime = current_time(inode);
1063 }
1064 EXPORT_SYMBOL_GPL(shmem_truncate_range);
1065 
1066 static int shmem_getattr(struct user_namespace *mnt_userns,
1067 			 const struct path *path, struct kstat *stat,
1068 			 u32 request_mask, unsigned int query_flags)
1069 {
1070 	struct inode *inode = path->dentry->d_inode;
1071 	struct shmem_inode_info *info = SHMEM_I(inode);
1072 
1073 	if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
1074 		spin_lock_irq(&info->lock);
1075 		shmem_recalc_inode(inode);
1076 		spin_unlock_irq(&info->lock);
1077 	}
1078 	generic_fillattr(&init_user_ns, inode, stat);
1079 
1080 	if (shmem_is_huge(NULL, inode, 0))
1081 		stat->blksize = HPAGE_PMD_SIZE;
1082 
1083 	return 0;
1084 }
1085 
1086 static int shmem_setattr(struct user_namespace *mnt_userns,
1087 			 struct dentry *dentry, struct iattr *attr)
1088 {
1089 	struct inode *inode = d_inode(dentry);
1090 	struct shmem_inode_info *info = SHMEM_I(inode);
1091 	int error;
1092 
1093 	error = setattr_prepare(&init_user_ns, dentry, attr);
1094 	if (error)
1095 		return error;
1096 
1097 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1098 		loff_t oldsize = inode->i_size;
1099 		loff_t newsize = attr->ia_size;
1100 
1101 		/* protected by i_rwsem */
1102 		if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1103 		    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1104 			return -EPERM;
1105 
1106 		if (newsize != oldsize) {
1107 			error = shmem_reacct_size(SHMEM_I(inode)->flags,
1108 					oldsize, newsize);
1109 			if (error)
1110 				return error;
1111 			i_size_write(inode, newsize);
1112 			inode->i_ctime = inode->i_mtime = current_time(inode);
1113 		}
1114 		if (newsize <= oldsize) {
1115 			loff_t holebegin = round_up(newsize, PAGE_SIZE);
1116 			if (oldsize > holebegin)
1117 				unmap_mapping_range(inode->i_mapping,
1118 							holebegin, 0, 1);
1119 			if (info->alloced)
1120 				shmem_truncate_range(inode,
1121 							newsize, (loff_t)-1);
1122 			/* unmap again to remove racily COWed private pages */
1123 			if (oldsize > holebegin)
1124 				unmap_mapping_range(inode->i_mapping,
1125 							holebegin, 0, 1);
1126 		}
1127 	}
1128 
1129 	setattr_copy(&init_user_ns, inode, attr);
1130 	if (attr->ia_valid & ATTR_MODE)
1131 		error = posix_acl_chmod(&init_user_ns, inode, inode->i_mode);
1132 	return error;
1133 }
1134 
1135 static void shmem_evict_inode(struct inode *inode)
1136 {
1137 	struct shmem_inode_info *info = SHMEM_I(inode);
1138 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1139 
1140 	if (shmem_mapping(inode->i_mapping)) {
1141 		shmem_unacct_size(info->flags, inode->i_size);
1142 		inode->i_size = 0;
1143 		shmem_truncate_range(inode, 0, (loff_t)-1);
1144 		if (!list_empty(&info->shrinklist)) {
1145 			spin_lock(&sbinfo->shrinklist_lock);
1146 			if (!list_empty(&info->shrinklist)) {
1147 				list_del_init(&info->shrinklist);
1148 				sbinfo->shrinklist_len--;
1149 			}
1150 			spin_unlock(&sbinfo->shrinklist_lock);
1151 		}
1152 		while (!list_empty(&info->swaplist)) {
1153 			/* Wait while shmem_unuse() is scanning this inode... */
1154 			wait_var_event(&info->stop_eviction,
1155 				       !atomic_read(&info->stop_eviction));
1156 			mutex_lock(&shmem_swaplist_mutex);
1157 			/* ...but beware of the race if we peeked too early */
1158 			if (!atomic_read(&info->stop_eviction))
1159 				list_del_init(&info->swaplist);
1160 			mutex_unlock(&shmem_swaplist_mutex);
1161 		}
1162 	}
1163 
1164 	simple_xattrs_free(&info->xattrs);
1165 	WARN_ON(inode->i_blocks);
1166 	shmem_free_inode(inode->i_sb);
1167 	clear_inode(inode);
1168 }
1169 
1170 static int shmem_find_swap_entries(struct address_space *mapping,
1171 				   pgoff_t start, unsigned int nr_entries,
1172 				   struct page **entries, pgoff_t *indices,
1173 				   unsigned int type, bool frontswap)
1174 {
1175 	XA_STATE(xas, &mapping->i_pages, start);
1176 	struct page *page;
1177 	swp_entry_t entry;
1178 	unsigned int ret = 0;
1179 
1180 	if (!nr_entries)
1181 		return 0;
1182 
1183 	rcu_read_lock();
1184 	xas_for_each(&xas, page, ULONG_MAX) {
1185 		if (xas_retry(&xas, page))
1186 			continue;
1187 
1188 		if (!xa_is_value(page))
1189 			continue;
1190 
1191 		entry = radix_to_swp_entry(page);
1192 		if (swp_type(entry) != type)
1193 			continue;
1194 		if (frontswap &&
1195 		    !frontswap_test(swap_info[type], swp_offset(entry)))
1196 			continue;
1197 
1198 		indices[ret] = xas.xa_index;
1199 		entries[ret] = page;
1200 
1201 		if (need_resched()) {
1202 			xas_pause(&xas);
1203 			cond_resched_rcu();
1204 		}
1205 		if (++ret == nr_entries)
1206 			break;
1207 	}
1208 	rcu_read_unlock();
1209 
1210 	return ret;
1211 }
1212 
1213 /*
1214  * Move the swapped pages for an inode to page cache. Returns the count
1215  * of pages swapped in, or the error in case of failure.
1216  */
1217 static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec,
1218 				    pgoff_t *indices)
1219 {
1220 	int i = 0;
1221 	int ret = 0;
1222 	int error = 0;
1223 	struct address_space *mapping = inode->i_mapping;
1224 
1225 	for (i = 0; i < pvec.nr; i++) {
1226 		struct page *page = pvec.pages[i];
1227 
1228 		if (!xa_is_value(page))
1229 			continue;
1230 		error = shmem_swapin_page(inode, indices[i],
1231 					  &page, SGP_CACHE,
1232 					  mapping_gfp_mask(mapping),
1233 					  NULL, NULL);
1234 		if (error == 0) {
1235 			unlock_page(page);
1236 			put_page(page);
1237 			ret++;
1238 		}
1239 		if (error == -ENOMEM)
1240 			break;
1241 		error = 0;
1242 	}
1243 	return error ? error : ret;
1244 }
1245 
1246 /*
1247  * If swap found in inode, free it and move page from swapcache to filecache.
1248  */
1249 static int shmem_unuse_inode(struct inode *inode, unsigned int type,
1250 			     bool frontswap, unsigned long *fs_pages_to_unuse)
1251 {
1252 	struct address_space *mapping = inode->i_mapping;
1253 	pgoff_t start = 0;
1254 	struct pagevec pvec;
1255 	pgoff_t indices[PAGEVEC_SIZE];
1256 	bool frontswap_partial = (frontswap && *fs_pages_to_unuse > 0);
1257 	int ret = 0;
1258 
1259 	pagevec_init(&pvec);
1260 	do {
1261 		unsigned int nr_entries = PAGEVEC_SIZE;
1262 
1263 		if (frontswap_partial && *fs_pages_to_unuse < PAGEVEC_SIZE)
1264 			nr_entries = *fs_pages_to_unuse;
1265 
1266 		pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries,
1267 						  pvec.pages, indices,
1268 						  type, frontswap);
1269 		if (pvec.nr == 0) {
1270 			ret = 0;
1271 			break;
1272 		}
1273 
1274 		ret = shmem_unuse_swap_entries(inode, pvec, indices);
1275 		if (ret < 0)
1276 			break;
1277 
1278 		if (frontswap_partial) {
1279 			*fs_pages_to_unuse -= ret;
1280 			if (*fs_pages_to_unuse == 0) {
1281 				ret = FRONTSWAP_PAGES_UNUSED;
1282 				break;
1283 			}
1284 		}
1285 
1286 		start = indices[pvec.nr - 1];
1287 	} while (true);
1288 
1289 	return ret;
1290 }
1291 
1292 /*
1293  * Read all the shared memory data that resides in the swap
1294  * device 'type' back into memory, so the swap device can be
1295  * unused.
1296  */
1297 int shmem_unuse(unsigned int type, bool frontswap,
1298 		unsigned long *fs_pages_to_unuse)
1299 {
1300 	struct shmem_inode_info *info, *next;
1301 	int error = 0;
1302 
1303 	if (list_empty(&shmem_swaplist))
1304 		return 0;
1305 
1306 	mutex_lock(&shmem_swaplist_mutex);
1307 	list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1308 		if (!info->swapped) {
1309 			list_del_init(&info->swaplist);
1310 			continue;
1311 		}
1312 		/*
1313 		 * Drop the swaplist mutex while searching the inode for swap;
1314 		 * but before doing so, make sure shmem_evict_inode() will not
1315 		 * remove placeholder inode from swaplist, nor let it be freed
1316 		 * (igrab() would protect from unlink, but not from unmount).
1317 		 */
1318 		atomic_inc(&info->stop_eviction);
1319 		mutex_unlock(&shmem_swaplist_mutex);
1320 
1321 		error = shmem_unuse_inode(&info->vfs_inode, type, frontswap,
1322 					  fs_pages_to_unuse);
1323 		cond_resched();
1324 
1325 		mutex_lock(&shmem_swaplist_mutex);
1326 		next = list_next_entry(info, swaplist);
1327 		if (!info->swapped)
1328 			list_del_init(&info->swaplist);
1329 		if (atomic_dec_and_test(&info->stop_eviction))
1330 			wake_up_var(&info->stop_eviction);
1331 		if (error)
1332 			break;
1333 	}
1334 	mutex_unlock(&shmem_swaplist_mutex);
1335 
1336 	return error;
1337 }
1338 
1339 /*
1340  * Move the page from the page cache to the swap cache.
1341  */
1342 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1343 {
1344 	struct shmem_inode_info *info;
1345 	struct address_space *mapping;
1346 	struct inode *inode;
1347 	swp_entry_t swap;
1348 	pgoff_t index;
1349 
1350 	/*
1351 	 * If /sys/kernel/mm/transparent_hugepage/shmem_enabled is "always" or
1352 	 * "force", drivers/gpu/drm/i915/gem/i915_gem_shmem.c gets huge pages,
1353 	 * and its shmem_writeback() needs them to be split when swapping.
1354 	 */
1355 	if (PageTransCompound(page)) {
1356 		/* Ensure the subpages are still dirty */
1357 		SetPageDirty(page);
1358 		if (split_huge_page(page) < 0)
1359 			goto redirty;
1360 		ClearPageDirty(page);
1361 	}
1362 
1363 	BUG_ON(!PageLocked(page));
1364 	mapping = page->mapping;
1365 	index = page->index;
1366 	inode = mapping->host;
1367 	info = SHMEM_I(inode);
1368 	if (info->flags & VM_LOCKED)
1369 		goto redirty;
1370 	if (!total_swap_pages)
1371 		goto redirty;
1372 
1373 	/*
1374 	 * Our capabilities prevent regular writeback or sync from ever calling
1375 	 * shmem_writepage; but a stacking filesystem might use ->writepage of
1376 	 * its underlying filesystem, in which case tmpfs should write out to
1377 	 * swap only in response to memory pressure, and not for the writeback
1378 	 * threads or sync.
1379 	 */
1380 	if (!wbc->for_reclaim) {
1381 		WARN_ON_ONCE(1);	/* Still happens? Tell us about it! */
1382 		goto redirty;
1383 	}
1384 
1385 	/*
1386 	 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1387 	 * value into swapfile.c, the only way we can correctly account for a
1388 	 * fallocated page arriving here is now to initialize it and write it.
1389 	 *
1390 	 * That's okay for a page already fallocated earlier, but if we have
1391 	 * not yet completed the fallocation, then (a) we want to keep track
1392 	 * of this page in case we have to undo it, and (b) it may not be a
1393 	 * good idea to continue anyway, once we're pushing into swap.  So
1394 	 * reactivate the page, and let shmem_fallocate() quit when too many.
1395 	 */
1396 	if (!PageUptodate(page)) {
1397 		if (inode->i_private) {
1398 			struct shmem_falloc *shmem_falloc;
1399 			spin_lock(&inode->i_lock);
1400 			shmem_falloc = inode->i_private;
1401 			if (shmem_falloc &&
1402 			    !shmem_falloc->waitq &&
1403 			    index >= shmem_falloc->start &&
1404 			    index < shmem_falloc->next)
1405 				shmem_falloc->nr_unswapped++;
1406 			else
1407 				shmem_falloc = NULL;
1408 			spin_unlock(&inode->i_lock);
1409 			if (shmem_falloc)
1410 				goto redirty;
1411 		}
1412 		clear_highpage(page);
1413 		flush_dcache_page(page);
1414 		SetPageUptodate(page);
1415 	}
1416 
1417 	swap = get_swap_page(page);
1418 	if (!swap.val)
1419 		goto redirty;
1420 
1421 	/*
1422 	 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1423 	 * if it's not already there.  Do it now before the page is
1424 	 * moved to swap cache, when its pagelock no longer protects
1425 	 * the inode from eviction.  But don't unlock the mutex until
1426 	 * we've incremented swapped, because shmem_unuse_inode() will
1427 	 * prune a !swapped inode from the swaplist under this mutex.
1428 	 */
1429 	mutex_lock(&shmem_swaplist_mutex);
1430 	if (list_empty(&info->swaplist))
1431 		list_add(&info->swaplist, &shmem_swaplist);
1432 
1433 	if (add_to_swap_cache(page, swap,
1434 			__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
1435 			NULL) == 0) {
1436 		spin_lock_irq(&info->lock);
1437 		shmem_recalc_inode(inode);
1438 		info->swapped++;
1439 		spin_unlock_irq(&info->lock);
1440 
1441 		swap_shmem_alloc(swap);
1442 		shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
1443 
1444 		mutex_unlock(&shmem_swaplist_mutex);
1445 		BUG_ON(page_mapped(page));
1446 		swap_writepage(page, wbc);
1447 		return 0;
1448 	}
1449 
1450 	mutex_unlock(&shmem_swaplist_mutex);
1451 	put_swap_page(page, swap);
1452 redirty:
1453 	set_page_dirty(page);
1454 	if (wbc->for_reclaim)
1455 		return AOP_WRITEPAGE_ACTIVATE;	/* Return with page locked */
1456 	unlock_page(page);
1457 	return 0;
1458 }
1459 
1460 #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
1461 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1462 {
1463 	char buffer[64];
1464 
1465 	if (!mpol || mpol->mode == MPOL_DEFAULT)
1466 		return;		/* show nothing */
1467 
1468 	mpol_to_str(buffer, sizeof(buffer), mpol);
1469 
1470 	seq_printf(seq, ",mpol=%s", buffer);
1471 }
1472 
1473 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1474 {
1475 	struct mempolicy *mpol = NULL;
1476 	if (sbinfo->mpol) {
1477 		raw_spin_lock(&sbinfo->stat_lock);	/* prevent replace/use races */
1478 		mpol = sbinfo->mpol;
1479 		mpol_get(mpol);
1480 		raw_spin_unlock(&sbinfo->stat_lock);
1481 	}
1482 	return mpol;
1483 }
1484 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
1485 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1486 {
1487 }
1488 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1489 {
1490 	return NULL;
1491 }
1492 #endif /* CONFIG_NUMA && CONFIG_TMPFS */
1493 #ifndef CONFIG_NUMA
1494 #define vm_policy vm_private_data
1495 #endif
1496 
1497 static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1498 		struct shmem_inode_info *info, pgoff_t index)
1499 {
1500 	/* Create a pseudo vma that just contains the policy */
1501 	vma_init(vma, NULL);
1502 	/* Bias interleave by inode number to distribute better across nodes */
1503 	vma->vm_pgoff = index + info->vfs_inode.i_ino;
1504 	vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1505 }
1506 
1507 static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1508 {
1509 	/* Drop reference taken by mpol_shared_policy_lookup() */
1510 	mpol_cond_put(vma->vm_policy);
1511 }
1512 
1513 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
1514 			struct shmem_inode_info *info, pgoff_t index)
1515 {
1516 	struct vm_area_struct pvma;
1517 	struct page *page;
1518 	struct vm_fault vmf = {
1519 		.vma = &pvma,
1520 	};
1521 
1522 	shmem_pseudo_vma_init(&pvma, info, index);
1523 	page = swap_cluster_readahead(swap, gfp, &vmf);
1524 	shmem_pseudo_vma_destroy(&pvma);
1525 
1526 	return page;
1527 }
1528 
1529 /*
1530  * Make sure huge_gfp is always more limited than limit_gfp.
1531  * Some of the flags set permissions, while others set limitations.
1532  */
1533 static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
1534 {
1535 	gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
1536 	gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
1537 	gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
1538 	gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
1539 
1540 	/* Allow allocations only from the originally specified zones. */
1541 	result |= zoneflags;
1542 
1543 	/*
1544 	 * Minimize the result gfp by taking the union with the deny flags,
1545 	 * and the intersection of the allow flags.
1546 	 */
1547 	result |= (limit_gfp & denyflags);
1548 	result |= (huge_gfp & limit_gfp) & allowflags;
1549 
1550 	return result;
1551 }
1552 
1553 static struct page *shmem_alloc_hugepage(gfp_t gfp,
1554 		struct shmem_inode_info *info, pgoff_t index)
1555 {
1556 	struct vm_area_struct pvma;
1557 	struct address_space *mapping = info->vfs_inode.i_mapping;
1558 	pgoff_t hindex;
1559 	struct page *page;
1560 
1561 	hindex = round_down(index, HPAGE_PMD_NR);
1562 	if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
1563 								XA_PRESENT))
1564 		return NULL;
1565 
1566 	shmem_pseudo_vma_init(&pvma, info, hindex);
1567 	page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, &pvma, 0, true);
1568 	shmem_pseudo_vma_destroy(&pvma);
1569 	if (page)
1570 		prep_transhuge_page(page);
1571 	else
1572 		count_vm_event(THP_FILE_FALLBACK);
1573 	return page;
1574 }
1575 
1576 static struct page *shmem_alloc_page(gfp_t gfp,
1577 			struct shmem_inode_info *info, pgoff_t index)
1578 {
1579 	struct vm_area_struct pvma;
1580 	struct page *page;
1581 
1582 	shmem_pseudo_vma_init(&pvma, info, index);
1583 	page = alloc_page_vma(gfp, &pvma, 0);
1584 	shmem_pseudo_vma_destroy(&pvma);
1585 
1586 	return page;
1587 }
1588 
1589 static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
1590 		struct inode *inode,
1591 		pgoff_t index, bool huge)
1592 {
1593 	struct shmem_inode_info *info = SHMEM_I(inode);
1594 	struct page *page;
1595 	int nr;
1596 	int err = -ENOSPC;
1597 
1598 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1599 		huge = false;
1600 	nr = huge ? HPAGE_PMD_NR : 1;
1601 
1602 	if (!shmem_inode_acct_block(inode, nr))
1603 		goto failed;
1604 
1605 	if (huge)
1606 		page = shmem_alloc_hugepage(gfp, info, index);
1607 	else
1608 		page = shmem_alloc_page(gfp, info, index);
1609 	if (page) {
1610 		__SetPageLocked(page);
1611 		__SetPageSwapBacked(page);
1612 		return page;
1613 	}
1614 
1615 	err = -ENOMEM;
1616 	shmem_inode_unacct_blocks(inode, nr);
1617 failed:
1618 	return ERR_PTR(err);
1619 }
1620 
1621 /*
1622  * When a page is moved from swapcache to shmem filecache (either by the
1623  * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
1624  * shmem_unuse_inode()), it may have been read in earlier from swap, in
1625  * ignorance of the mapping it belongs to.  If that mapping has special
1626  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1627  * we may need to copy to a suitable page before moving to filecache.
1628  *
1629  * In a future release, this may well be extended to respect cpuset and
1630  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1631  * but for now it is a simple matter of zone.
1632  */
1633 static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
1634 {
1635 	return page_zonenum(page) > gfp_zone(gfp);
1636 }
1637 
1638 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1639 				struct shmem_inode_info *info, pgoff_t index)
1640 {
1641 	struct page *oldpage, *newpage;
1642 	struct folio *old, *new;
1643 	struct address_space *swap_mapping;
1644 	swp_entry_t entry;
1645 	pgoff_t swap_index;
1646 	int error;
1647 
1648 	oldpage = *pagep;
1649 	entry.val = page_private(oldpage);
1650 	swap_index = swp_offset(entry);
1651 	swap_mapping = page_mapping(oldpage);
1652 
1653 	/*
1654 	 * We have arrived here because our zones are constrained, so don't
1655 	 * limit chance of success by further cpuset and node constraints.
1656 	 */
1657 	gfp &= ~GFP_CONSTRAINT_MASK;
1658 	newpage = shmem_alloc_page(gfp, info, index);
1659 	if (!newpage)
1660 		return -ENOMEM;
1661 
1662 	get_page(newpage);
1663 	copy_highpage(newpage, oldpage);
1664 	flush_dcache_page(newpage);
1665 
1666 	__SetPageLocked(newpage);
1667 	__SetPageSwapBacked(newpage);
1668 	SetPageUptodate(newpage);
1669 	set_page_private(newpage, entry.val);
1670 	SetPageSwapCache(newpage);
1671 
1672 	/*
1673 	 * Our caller will very soon move newpage out of swapcache, but it's
1674 	 * a nice clean interface for us to replace oldpage by newpage there.
1675 	 */
1676 	xa_lock_irq(&swap_mapping->i_pages);
1677 	error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
1678 	if (!error) {
1679 		old = page_folio(oldpage);
1680 		new = page_folio(newpage);
1681 		mem_cgroup_migrate(old, new);
1682 		__inc_lruvec_page_state(newpage, NR_FILE_PAGES);
1683 		__dec_lruvec_page_state(oldpage, NR_FILE_PAGES);
1684 	}
1685 	xa_unlock_irq(&swap_mapping->i_pages);
1686 
1687 	if (unlikely(error)) {
1688 		/*
1689 		 * Is this possible?  I think not, now that our callers check
1690 		 * both PageSwapCache and page_private after getting page lock;
1691 		 * but be defensive.  Reverse old to newpage for clear and free.
1692 		 */
1693 		oldpage = newpage;
1694 	} else {
1695 		lru_cache_add(newpage);
1696 		*pagep = newpage;
1697 	}
1698 
1699 	ClearPageSwapCache(oldpage);
1700 	set_page_private(oldpage, 0);
1701 
1702 	unlock_page(oldpage);
1703 	put_page(oldpage);
1704 	put_page(oldpage);
1705 	return error;
1706 }
1707 
1708 /*
1709  * Swap in the page pointed to by *pagep.
1710  * Caller has to make sure that *pagep contains a valid swapped page.
1711  * Returns 0 and the page in pagep if success. On failure, returns the
1712  * error code and NULL in *pagep.
1713  */
1714 static int shmem_swapin_page(struct inode *inode, pgoff_t index,
1715 			     struct page **pagep, enum sgp_type sgp,
1716 			     gfp_t gfp, struct vm_area_struct *vma,
1717 			     vm_fault_t *fault_type)
1718 {
1719 	struct address_space *mapping = inode->i_mapping;
1720 	struct shmem_inode_info *info = SHMEM_I(inode);
1721 	struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
1722 	struct page *page;
1723 	swp_entry_t swap;
1724 	int error;
1725 
1726 	VM_BUG_ON(!*pagep || !xa_is_value(*pagep));
1727 	swap = radix_to_swp_entry(*pagep);
1728 	*pagep = NULL;
1729 
1730 	/* Look it up and read it in.. */
1731 	page = lookup_swap_cache(swap, NULL, 0);
1732 	if (!page) {
1733 		/* Or update major stats only when swapin succeeds?? */
1734 		if (fault_type) {
1735 			*fault_type |= VM_FAULT_MAJOR;
1736 			count_vm_event(PGMAJFAULT);
1737 			count_memcg_event_mm(charge_mm, PGMAJFAULT);
1738 		}
1739 		/* Here we actually start the io */
1740 		page = shmem_swapin(swap, gfp, info, index);
1741 		if (!page) {
1742 			error = -ENOMEM;
1743 			goto failed;
1744 		}
1745 	}
1746 
1747 	/* We have to do this with page locked to prevent races */
1748 	lock_page(page);
1749 	if (!PageSwapCache(page) || page_private(page) != swap.val ||
1750 	    !shmem_confirm_swap(mapping, index, swap)) {
1751 		error = -EEXIST;
1752 		goto unlock;
1753 	}
1754 	if (!PageUptodate(page)) {
1755 		error = -EIO;
1756 		goto failed;
1757 	}
1758 	wait_on_page_writeback(page);
1759 
1760 	/*
1761 	 * Some architectures may have to restore extra metadata to the
1762 	 * physical page after reading from swap.
1763 	 */
1764 	arch_swap_restore(swap, page);
1765 
1766 	if (shmem_should_replace_page(page, gfp)) {
1767 		error = shmem_replace_page(&page, gfp, info, index);
1768 		if (error)
1769 			goto failed;
1770 	}
1771 
1772 	error = shmem_add_to_page_cache(page, mapping, index,
1773 					swp_to_radix_entry(swap), gfp,
1774 					charge_mm);
1775 	if (error)
1776 		goto failed;
1777 
1778 	spin_lock_irq(&info->lock);
1779 	info->swapped--;
1780 	shmem_recalc_inode(inode);
1781 	spin_unlock_irq(&info->lock);
1782 
1783 	if (sgp == SGP_WRITE)
1784 		mark_page_accessed(page);
1785 
1786 	delete_from_swap_cache(page);
1787 	set_page_dirty(page);
1788 	swap_free(swap);
1789 
1790 	*pagep = page;
1791 	return 0;
1792 failed:
1793 	if (!shmem_confirm_swap(mapping, index, swap))
1794 		error = -EEXIST;
1795 unlock:
1796 	if (page) {
1797 		unlock_page(page);
1798 		put_page(page);
1799 	}
1800 
1801 	return error;
1802 }
1803 
1804 /*
1805  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1806  *
1807  * If we allocate a new one we do not mark it dirty. That's up to the
1808  * vm. If we swap it in we mark it dirty since we also free the swap
1809  * entry since a page cannot live in both the swap and page cache.
1810  *
1811  * vma, vmf, and fault_type are only supplied by shmem_fault:
1812  * otherwise they are NULL.
1813  */
1814 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1815 	struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1816 	struct vm_area_struct *vma, struct vm_fault *vmf,
1817 			vm_fault_t *fault_type)
1818 {
1819 	struct address_space *mapping = inode->i_mapping;
1820 	struct shmem_inode_info *info = SHMEM_I(inode);
1821 	struct shmem_sb_info *sbinfo;
1822 	struct mm_struct *charge_mm;
1823 	struct page *page;
1824 	pgoff_t hindex = index;
1825 	gfp_t huge_gfp;
1826 	int error;
1827 	int once = 0;
1828 	int alloced = 0;
1829 
1830 	if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1831 		return -EFBIG;
1832 repeat:
1833 	if (sgp <= SGP_CACHE &&
1834 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1835 		return -EINVAL;
1836 	}
1837 
1838 	sbinfo = SHMEM_SB(inode->i_sb);
1839 	charge_mm = vma ? vma->vm_mm : NULL;
1840 
1841 	page = pagecache_get_page(mapping, index,
1842 					FGP_ENTRY | FGP_HEAD | FGP_LOCK, 0);
1843 
1844 	if (page && vma && userfaultfd_minor(vma)) {
1845 		if (!xa_is_value(page)) {
1846 			unlock_page(page);
1847 			put_page(page);
1848 		}
1849 		*fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
1850 		return 0;
1851 	}
1852 
1853 	if (xa_is_value(page)) {
1854 		error = shmem_swapin_page(inode, index, &page,
1855 					  sgp, gfp, vma, fault_type);
1856 		if (error == -EEXIST)
1857 			goto repeat;
1858 
1859 		*pagep = page;
1860 		return error;
1861 	}
1862 
1863 	if (page) {
1864 		hindex = page->index;
1865 		if (sgp == SGP_WRITE)
1866 			mark_page_accessed(page);
1867 		if (PageUptodate(page))
1868 			goto out;
1869 		/* fallocated page */
1870 		if (sgp != SGP_READ)
1871 			goto clear;
1872 		unlock_page(page);
1873 		put_page(page);
1874 	}
1875 
1876 	/*
1877 	 * SGP_READ: succeed on hole, with NULL page, letting caller zero.
1878 	 * SGP_NOALLOC: fail on hole, with NULL page, letting caller fail.
1879 	 */
1880 	*pagep = NULL;
1881 	if (sgp == SGP_READ)
1882 		return 0;
1883 	if (sgp == SGP_NOALLOC)
1884 		return -ENOENT;
1885 
1886 	/*
1887 	 * Fast cache lookup and swap lookup did not find it: allocate.
1888 	 */
1889 
1890 	if (vma && userfaultfd_missing(vma)) {
1891 		*fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1892 		return 0;
1893 	}
1894 
1895 	/* Never use a huge page for shmem_symlink() */
1896 	if (S_ISLNK(inode->i_mode))
1897 		goto alloc_nohuge;
1898 	if (!shmem_is_huge(vma, inode, index))
1899 		goto alloc_nohuge;
1900 
1901 	huge_gfp = vma_thp_gfp_mask(vma);
1902 	huge_gfp = limit_gfp_mask(huge_gfp, gfp);
1903 	page = shmem_alloc_and_acct_page(huge_gfp, inode, index, true);
1904 	if (IS_ERR(page)) {
1905 alloc_nohuge:
1906 		page = shmem_alloc_and_acct_page(gfp, inode,
1907 						 index, false);
1908 	}
1909 	if (IS_ERR(page)) {
1910 		int retry = 5;
1911 
1912 		error = PTR_ERR(page);
1913 		page = NULL;
1914 		if (error != -ENOSPC)
1915 			goto unlock;
1916 		/*
1917 		 * Try to reclaim some space by splitting a huge page
1918 		 * beyond i_size on the filesystem.
1919 		 */
1920 		while (retry--) {
1921 			int ret;
1922 
1923 			ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1924 			if (ret == SHRINK_STOP)
1925 				break;
1926 			if (ret)
1927 				goto alloc_nohuge;
1928 		}
1929 		goto unlock;
1930 	}
1931 
1932 	if (PageTransHuge(page))
1933 		hindex = round_down(index, HPAGE_PMD_NR);
1934 	else
1935 		hindex = index;
1936 
1937 	if (sgp == SGP_WRITE)
1938 		__SetPageReferenced(page);
1939 
1940 	error = shmem_add_to_page_cache(page, mapping, hindex,
1941 					NULL, gfp & GFP_RECLAIM_MASK,
1942 					charge_mm);
1943 	if (error)
1944 		goto unacct;
1945 	lru_cache_add(page);
1946 
1947 	spin_lock_irq(&info->lock);
1948 	info->alloced += compound_nr(page);
1949 	inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
1950 	shmem_recalc_inode(inode);
1951 	spin_unlock_irq(&info->lock);
1952 	alloced = true;
1953 
1954 	if (PageTransHuge(page) &&
1955 	    DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1956 			hindex + HPAGE_PMD_NR - 1) {
1957 		/*
1958 		 * Part of the huge page is beyond i_size: subject
1959 		 * to shrink under memory pressure.
1960 		 */
1961 		spin_lock(&sbinfo->shrinklist_lock);
1962 		/*
1963 		 * _careful to defend against unlocked access to
1964 		 * ->shrink_list in shmem_unused_huge_shrink()
1965 		 */
1966 		if (list_empty_careful(&info->shrinklist)) {
1967 			list_add_tail(&info->shrinklist,
1968 				      &sbinfo->shrinklist);
1969 			sbinfo->shrinklist_len++;
1970 		}
1971 		spin_unlock(&sbinfo->shrinklist_lock);
1972 	}
1973 
1974 	/*
1975 	 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
1976 	 */
1977 	if (sgp == SGP_FALLOC)
1978 		sgp = SGP_WRITE;
1979 clear:
1980 	/*
1981 	 * Let SGP_WRITE caller clear ends if write does not fill page;
1982 	 * but SGP_FALLOC on a page fallocated earlier must initialize
1983 	 * it now, lest undo on failure cancel our earlier guarantee.
1984 	 */
1985 	if (sgp != SGP_WRITE && !PageUptodate(page)) {
1986 		int i;
1987 
1988 		for (i = 0; i < compound_nr(page); i++) {
1989 			clear_highpage(page + i);
1990 			flush_dcache_page(page + i);
1991 		}
1992 		SetPageUptodate(page);
1993 	}
1994 
1995 	/* Perhaps the file has been truncated since we checked */
1996 	if (sgp <= SGP_CACHE &&
1997 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1998 		if (alloced) {
1999 			ClearPageDirty(page);
2000 			delete_from_page_cache(page);
2001 			spin_lock_irq(&info->lock);
2002 			shmem_recalc_inode(inode);
2003 			spin_unlock_irq(&info->lock);
2004 		}
2005 		error = -EINVAL;
2006 		goto unlock;
2007 	}
2008 out:
2009 	*pagep = page + index - hindex;
2010 	return 0;
2011 
2012 	/*
2013 	 * Error recovery.
2014 	 */
2015 unacct:
2016 	shmem_inode_unacct_blocks(inode, compound_nr(page));
2017 
2018 	if (PageTransHuge(page)) {
2019 		unlock_page(page);
2020 		put_page(page);
2021 		goto alloc_nohuge;
2022 	}
2023 unlock:
2024 	if (page) {
2025 		unlock_page(page);
2026 		put_page(page);
2027 	}
2028 	if (error == -ENOSPC && !once++) {
2029 		spin_lock_irq(&info->lock);
2030 		shmem_recalc_inode(inode);
2031 		spin_unlock_irq(&info->lock);
2032 		goto repeat;
2033 	}
2034 	if (error == -EEXIST)
2035 		goto repeat;
2036 	return error;
2037 }
2038 
2039 /*
2040  * This is like autoremove_wake_function, but it removes the wait queue
2041  * entry unconditionally - even if something else had already woken the
2042  * target.
2043  */
2044 static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
2045 {
2046 	int ret = default_wake_function(wait, mode, sync, key);
2047 	list_del_init(&wait->entry);
2048 	return ret;
2049 }
2050 
2051 static vm_fault_t shmem_fault(struct vm_fault *vmf)
2052 {
2053 	struct vm_area_struct *vma = vmf->vma;
2054 	struct inode *inode = file_inode(vma->vm_file);
2055 	gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
2056 	int err;
2057 	vm_fault_t ret = VM_FAULT_LOCKED;
2058 
2059 	/*
2060 	 * Trinity finds that probing a hole which tmpfs is punching can
2061 	 * prevent the hole-punch from ever completing: which in turn
2062 	 * locks writers out with its hold on i_rwsem.  So refrain from
2063 	 * faulting pages into the hole while it's being punched.  Although
2064 	 * shmem_undo_range() does remove the additions, it may be unable to
2065 	 * keep up, as each new page needs its own unmap_mapping_range() call,
2066 	 * and the i_mmap tree grows ever slower to scan if new vmas are added.
2067 	 *
2068 	 * It does not matter if we sometimes reach this check just before the
2069 	 * hole-punch begins, so that one fault then races with the punch:
2070 	 * we just need to make racing faults a rare case.
2071 	 *
2072 	 * The implementation below would be much simpler if we just used a
2073 	 * standard mutex or completion: but we cannot take i_rwsem in fault,
2074 	 * and bloating every shmem inode for this unlikely case would be sad.
2075 	 */
2076 	if (unlikely(inode->i_private)) {
2077 		struct shmem_falloc *shmem_falloc;
2078 
2079 		spin_lock(&inode->i_lock);
2080 		shmem_falloc = inode->i_private;
2081 		if (shmem_falloc &&
2082 		    shmem_falloc->waitq &&
2083 		    vmf->pgoff >= shmem_falloc->start &&
2084 		    vmf->pgoff < shmem_falloc->next) {
2085 			struct file *fpin;
2086 			wait_queue_head_t *shmem_falloc_waitq;
2087 			DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
2088 
2089 			ret = VM_FAULT_NOPAGE;
2090 			fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2091 			if (fpin)
2092 				ret = VM_FAULT_RETRY;
2093 
2094 			shmem_falloc_waitq = shmem_falloc->waitq;
2095 			prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2096 					TASK_UNINTERRUPTIBLE);
2097 			spin_unlock(&inode->i_lock);
2098 			schedule();
2099 
2100 			/*
2101 			 * shmem_falloc_waitq points into the shmem_fallocate()
2102 			 * stack of the hole-punching task: shmem_falloc_waitq
2103 			 * is usually invalid by the time we reach here, but
2104 			 * finish_wait() does not dereference it in that case;
2105 			 * though i_lock needed lest racing with wake_up_all().
2106 			 */
2107 			spin_lock(&inode->i_lock);
2108 			finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2109 			spin_unlock(&inode->i_lock);
2110 
2111 			if (fpin)
2112 				fput(fpin);
2113 			return ret;
2114 		}
2115 		spin_unlock(&inode->i_lock);
2116 	}
2117 
2118 	err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, SGP_CACHE,
2119 				  gfp, vma, vmf, &ret);
2120 	if (err)
2121 		return vmf_error(err);
2122 	return ret;
2123 }
2124 
2125 unsigned long shmem_get_unmapped_area(struct file *file,
2126 				      unsigned long uaddr, unsigned long len,
2127 				      unsigned long pgoff, unsigned long flags)
2128 {
2129 	unsigned long (*get_area)(struct file *,
2130 		unsigned long, unsigned long, unsigned long, unsigned long);
2131 	unsigned long addr;
2132 	unsigned long offset;
2133 	unsigned long inflated_len;
2134 	unsigned long inflated_addr;
2135 	unsigned long inflated_offset;
2136 
2137 	if (len > TASK_SIZE)
2138 		return -ENOMEM;
2139 
2140 	get_area = current->mm->get_unmapped_area;
2141 	addr = get_area(file, uaddr, len, pgoff, flags);
2142 
2143 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2144 		return addr;
2145 	if (IS_ERR_VALUE(addr))
2146 		return addr;
2147 	if (addr & ~PAGE_MASK)
2148 		return addr;
2149 	if (addr > TASK_SIZE - len)
2150 		return addr;
2151 
2152 	if (shmem_huge == SHMEM_HUGE_DENY)
2153 		return addr;
2154 	if (len < HPAGE_PMD_SIZE)
2155 		return addr;
2156 	if (flags & MAP_FIXED)
2157 		return addr;
2158 	/*
2159 	 * Our priority is to support MAP_SHARED mapped hugely;
2160 	 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2161 	 * But if caller specified an address hint and we allocated area there
2162 	 * successfully, respect that as before.
2163 	 */
2164 	if (uaddr == addr)
2165 		return addr;
2166 
2167 	if (shmem_huge != SHMEM_HUGE_FORCE) {
2168 		struct super_block *sb;
2169 
2170 		if (file) {
2171 			VM_BUG_ON(file->f_op != &shmem_file_operations);
2172 			sb = file_inode(file)->i_sb;
2173 		} else {
2174 			/*
2175 			 * Called directly from mm/mmap.c, or drivers/char/mem.c
2176 			 * for "/dev/zero", to create a shared anonymous object.
2177 			 */
2178 			if (IS_ERR(shm_mnt))
2179 				return addr;
2180 			sb = shm_mnt->mnt_sb;
2181 		}
2182 		if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2183 			return addr;
2184 	}
2185 
2186 	offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2187 	if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2188 		return addr;
2189 	if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2190 		return addr;
2191 
2192 	inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2193 	if (inflated_len > TASK_SIZE)
2194 		return addr;
2195 	if (inflated_len < len)
2196 		return addr;
2197 
2198 	inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
2199 	if (IS_ERR_VALUE(inflated_addr))
2200 		return addr;
2201 	if (inflated_addr & ~PAGE_MASK)
2202 		return addr;
2203 
2204 	inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2205 	inflated_addr += offset - inflated_offset;
2206 	if (inflated_offset > offset)
2207 		inflated_addr += HPAGE_PMD_SIZE;
2208 
2209 	if (inflated_addr > TASK_SIZE - len)
2210 		return addr;
2211 	return inflated_addr;
2212 }
2213 
2214 #ifdef CONFIG_NUMA
2215 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2216 {
2217 	struct inode *inode = file_inode(vma->vm_file);
2218 	return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2219 }
2220 
2221 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2222 					  unsigned long addr)
2223 {
2224 	struct inode *inode = file_inode(vma->vm_file);
2225 	pgoff_t index;
2226 
2227 	index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2228 	return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2229 }
2230 #endif
2231 
2232 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
2233 {
2234 	struct inode *inode = file_inode(file);
2235 	struct shmem_inode_info *info = SHMEM_I(inode);
2236 	int retval = -ENOMEM;
2237 
2238 	/*
2239 	 * What serializes the accesses to info->flags?
2240 	 * ipc_lock_object() when called from shmctl_do_lock(),
2241 	 * no serialization needed when called from shm_destroy().
2242 	 */
2243 	if (lock && !(info->flags & VM_LOCKED)) {
2244 		if (!user_shm_lock(inode->i_size, ucounts))
2245 			goto out_nomem;
2246 		info->flags |= VM_LOCKED;
2247 		mapping_set_unevictable(file->f_mapping);
2248 	}
2249 	if (!lock && (info->flags & VM_LOCKED) && ucounts) {
2250 		user_shm_unlock(inode->i_size, ucounts);
2251 		info->flags &= ~VM_LOCKED;
2252 		mapping_clear_unevictable(file->f_mapping);
2253 	}
2254 	retval = 0;
2255 
2256 out_nomem:
2257 	return retval;
2258 }
2259 
2260 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2261 {
2262 	struct shmem_inode_info *info = SHMEM_I(file_inode(file));
2263 	int ret;
2264 
2265 	ret = seal_check_future_write(info->seals, vma);
2266 	if (ret)
2267 		return ret;
2268 
2269 	/* arm64 - allow memory tagging on RAM-based files */
2270 	vma->vm_flags |= VM_MTE_ALLOWED;
2271 
2272 	file_accessed(file);
2273 	vma->vm_ops = &shmem_vm_ops;
2274 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
2275 			((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
2276 			(vma->vm_end & HPAGE_PMD_MASK)) {
2277 		khugepaged_enter(vma, vma->vm_flags);
2278 	}
2279 	return 0;
2280 }
2281 
2282 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
2283 				     umode_t mode, dev_t dev, unsigned long flags)
2284 {
2285 	struct inode *inode;
2286 	struct shmem_inode_info *info;
2287 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2288 	ino_t ino;
2289 
2290 	if (shmem_reserve_inode(sb, &ino))
2291 		return NULL;
2292 
2293 	inode = new_inode(sb);
2294 	if (inode) {
2295 		inode->i_ino = ino;
2296 		inode_init_owner(&init_user_ns, inode, dir, mode);
2297 		inode->i_blocks = 0;
2298 		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
2299 		inode->i_generation = prandom_u32();
2300 		info = SHMEM_I(inode);
2301 		memset(info, 0, (char *)inode - (char *)info);
2302 		spin_lock_init(&info->lock);
2303 		atomic_set(&info->stop_eviction, 0);
2304 		info->seals = F_SEAL_SEAL;
2305 		info->flags = flags & VM_NORESERVE;
2306 		INIT_LIST_HEAD(&info->shrinklist);
2307 		INIT_LIST_HEAD(&info->swaplist);
2308 		simple_xattrs_init(&info->xattrs);
2309 		cache_no_acl(inode);
2310 		mapping_set_large_folios(inode->i_mapping);
2311 
2312 		switch (mode & S_IFMT) {
2313 		default:
2314 			inode->i_op = &shmem_special_inode_operations;
2315 			init_special_inode(inode, mode, dev);
2316 			break;
2317 		case S_IFREG:
2318 			inode->i_mapping->a_ops = &shmem_aops;
2319 			inode->i_op = &shmem_inode_operations;
2320 			inode->i_fop = &shmem_file_operations;
2321 			mpol_shared_policy_init(&info->policy,
2322 						 shmem_get_sbmpol(sbinfo));
2323 			break;
2324 		case S_IFDIR:
2325 			inc_nlink(inode);
2326 			/* Some things misbehave if size == 0 on a directory */
2327 			inode->i_size = 2 * BOGO_DIRENT_SIZE;
2328 			inode->i_op = &shmem_dir_inode_operations;
2329 			inode->i_fop = &simple_dir_operations;
2330 			break;
2331 		case S_IFLNK:
2332 			/*
2333 			 * Must not load anything in the rbtree,
2334 			 * mpol_free_shared_policy will not be called.
2335 			 */
2336 			mpol_shared_policy_init(&info->policy, NULL);
2337 			break;
2338 		}
2339 
2340 		lockdep_annotate_inode_mutex_key(inode);
2341 	} else
2342 		shmem_free_inode(sb);
2343 	return inode;
2344 }
2345 
2346 #ifdef CONFIG_USERFAULTFD
2347 int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
2348 			   pmd_t *dst_pmd,
2349 			   struct vm_area_struct *dst_vma,
2350 			   unsigned long dst_addr,
2351 			   unsigned long src_addr,
2352 			   bool zeropage,
2353 			   struct page **pagep)
2354 {
2355 	struct inode *inode = file_inode(dst_vma->vm_file);
2356 	struct shmem_inode_info *info = SHMEM_I(inode);
2357 	struct address_space *mapping = inode->i_mapping;
2358 	gfp_t gfp = mapping_gfp_mask(mapping);
2359 	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
2360 	void *page_kaddr;
2361 	struct page *page;
2362 	int ret;
2363 	pgoff_t max_off;
2364 
2365 	if (!shmem_inode_acct_block(inode, 1)) {
2366 		/*
2367 		 * We may have got a page, returned -ENOENT triggering a retry,
2368 		 * and now we find ourselves with -ENOMEM. Release the page, to
2369 		 * avoid a BUG_ON in our caller.
2370 		 */
2371 		if (unlikely(*pagep)) {
2372 			put_page(*pagep);
2373 			*pagep = NULL;
2374 		}
2375 		return -ENOMEM;
2376 	}
2377 
2378 	if (!*pagep) {
2379 		ret = -ENOMEM;
2380 		page = shmem_alloc_page(gfp, info, pgoff);
2381 		if (!page)
2382 			goto out_unacct_blocks;
2383 
2384 		if (!zeropage) {	/* COPY */
2385 			page_kaddr = kmap_atomic(page);
2386 			ret = copy_from_user(page_kaddr,
2387 					     (const void __user *)src_addr,
2388 					     PAGE_SIZE);
2389 			kunmap_atomic(page_kaddr);
2390 
2391 			/* fallback to copy_from_user outside mmap_lock */
2392 			if (unlikely(ret)) {
2393 				*pagep = page;
2394 				ret = -ENOENT;
2395 				/* don't free the page */
2396 				goto out_unacct_blocks;
2397 			}
2398 		} else {		/* ZEROPAGE */
2399 			clear_highpage(page);
2400 		}
2401 	} else {
2402 		page = *pagep;
2403 		*pagep = NULL;
2404 	}
2405 
2406 	VM_BUG_ON(PageLocked(page));
2407 	VM_BUG_ON(PageSwapBacked(page));
2408 	__SetPageLocked(page);
2409 	__SetPageSwapBacked(page);
2410 	__SetPageUptodate(page);
2411 
2412 	ret = -EFAULT;
2413 	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2414 	if (unlikely(pgoff >= max_off))
2415 		goto out_release;
2416 
2417 	ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
2418 				      gfp & GFP_RECLAIM_MASK, dst_mm);
2419 	if (ret)
2420 		goto out_release;
2421 
2422 	ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
2423 				       page, true, false);
2424 	if (ret)
2425 		goto out_delete_from_cache;
2426 
2427 	spin_lock_irq(&info->lock);
2428 	info->alloced++;
2429 	inode->i_blocks += BLOCKS_PER_PAGE;
2430 	shmem_recalc_inode(inode);
2431 	spin_unlock_irq(&info->lock);
2432 
2433 	unlock_page(page);
2434 	return 0;
2435 out_delete_from_cache:
2436 	delete_from_page_cache(page);
2437 out_release:
2438 	unlock_page(page);
2439 	put_page(page);
2440 out_unacct_blocks:
2441 	shmem_inode_unacct_blocks(inode, 1);
2442 	return ret;
2443 }
2444 #endif /* CONFIG_USERFAULTFD */
2445 
2446 #ifdef CONFIG_TMPFS
2447 static const struct inode_operations shmem_symlink_inode_operations;
2448 static const struct inode_operations shmem_short_symlink_operations;
2449 
2450 #ifdef CONFIG_TMPFS_XATTR
2451 static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2452 #else
2453 #define shmem_initxattrs NULL
2454 #endif
2455 
2456 static int
2457 shmem_write_begin(struct file *file, struct address_space *mapping,
2458 			loff_t pos, unsigned len, unsigned flags,
2459 			struct page **pagep, void **fsdata)
2460 {
2461 	struct inode *inode = mapping->host;
2462 	struct shmem_inode_info *info = SHMEM_I(inode);
2463 	pgoff_t index = pos >> PAGE_SHIFT;
2464 	int ret = 0;
2465 
2466 	/* i_rwsem is held by caller */
2467 	if (unlikely(info->seals & (F_SEAL_GROW |
2468 				   F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2469 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
2470 			return -EPERM;
2471 		if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
2472 			return -EPERM;
2473 	}
2474 
2475 	ret = shmem_getpage(inode, index, pagep, SGP_WRITE);
2476 
2477 	if (ret)
2478 		return ret;
2479 
2480 	if (PageHWPoison(*pagep)) {
2481 		unlock_page(*pagep);
2482 		put_page(*pagep);
2483 		*pagep = NULL;
2484 		return -EIO;
2485 	}
2486 
2487 	return 0;
2488 }
2489 
2490 static int
2491 shmem_write_end(struct file *file, struct address_space *mapping,
2492 			loff_t pos, unsigned len, unsigned copied,
2493 			struct page *page, void *fsdata)
2494 {
2495 	struct inode *inode = mapping->host;
2496 
2497 	if (pos + copied > inode->i_size)
2498 		i_size_write(inode, pos + copied);
2499 
2500 	if (!PageUptodate(page)) {
2501 		struct page *head = compound_head(page);
2502 		if (PageTransCompound(page)) {
2503 			int i;
2504 
2505 			for (i = 0; i < HPAGE_PMD_NR; i++) {
2506 				if (head + i == page)
2507 					continue;
2508 				clear_highpage(head + i);
2509 				flush_dcache_page(head + i);
2510 			}
2511 		}
2512 		if (copied < PAGE_SIZE) {
2513 			unsigned from = pos & (PAGE_SIZE - 1);
2514 			zero_user_segments(page, 0, from,
2515 					from + copied, PAGE_SIZE);
2516 		}
2517 		SetPageUptodate(head);
2518 	}
2519 	set_page_dirty(page);
2520 	unlock_page(page);
2521 	put_page(page);
2522 
2523 	return copied;
2524 }
2525 
2526 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
2527 {
2528 	struct file *file = iocb->ki_filp;
2529 	struct inode *inode = file_inode(file);
2530 	struct address_space *mapping = inode->i_mapping;
2531 	pgoff_t index;
2532 	unsigned long offset;
2533 	enum sgp_type sgp = SGP_READ;
2534 	int error = 0;
2535 	ssize_t retval = 0;
2536 	loff_t *ppos = &iocb->ki_pos;
2537 
2538 	/*
2539 	 * Might this read be for a stacking filesystem?  Then when reading
2540 	 * holes of a sparse file, we actually need to allocate those pages,
2541 	 * and even mark them dirty, so it cannot exceed the max_blocks limit.
2542 	 */
2543 	if (!iter_is_iovec(to))
2544 		sgp = SGP_CACHE;
2545 
2546 	index = *ppos >> PAGE_SHIFT;
2547 	offset = *ppos & ~PAGE_MASK;
2548 
2549 	for (;;) {
2550 		struct page *page = NULL;
2551 		pgoff_t end_index;
2552 		unsigned long nr, ret;
2553 		loff_t i_size = i_size_read(inode);
2554 
2555 		end_index = i_size >> PAGE_SHIFT;
2556 		if (index > end_index)
2557 			break;
2558 		if (index == end_index) {
2559 			nr = i_size & ~PAGE_MASK;
2560 			if (nr <= offset)
2561 				break;
2562 		}
2563 
2564 		error = shmem_getpage(inode, index, &page, sgp);
2565 		if (error) {
2566 			if (error == -EINVAL)
2567 				error = 0;
2568 			break;
2569 		}
2570 		if (page) {
2571 			if (sgp == SGP_CACHE)
2572 				set_page_dirty(page);
2573 			unlock_page(page);
2574 
2575 			if (PageHWPoison(page)) {
2576 				put_page(page);
2577 				error = -EIO;
2578 				break;
2579 			}
2580 		}
2581 
2582 		/*
2583 		 * We must evaluate after, since reads (unlike writes)
2584 		 * are called without i_rwsem protection against truncate
2585 		 */
2586 		nr = PAGE_SIZE;
2587 		i_size = i_size_read(inode);
2588 		end_index = i_size >> PAGE_SHIFT;
2589 		if (index == end_index) {
2590 			nr = i_size & ~PAGE_MASK;
2591 			if (nr <= offset) {
2592 				if (page)
2593 					put_page(page);
2594 				break;
2595 			}
2596 		}
2597 		nr -= offset;
2598 
2599 		if (page) {
2600 			/*
2601 			 * If users can be writing to this page using arbitrary
2602 			 * virtual addresses, take care about potential aliasing
2603 			 * before reading the page on the kernel side.
2604 			 */
2605 			if (mapping_writably_mapped(mapping))
2606 				flush_dcache_page(page);
2607 			/*
2608 			 * Mark the page accessed if we read the beginning.
2609 			 */
2610 			if (!offset)
2611 				mark_page_accessed(page);
2612 		} else {
2613 			page = ZERO_PAGE(0);
2614 			get_page(page);
2615 		}
2616 
2617 		/*
2618 		 * Ok, we have the page, and it's up-to-date, so
2619 		 * now we can copy it to user space...
2620 		 */
2621 		ret = copy_page_to_iter(page, offset, nr, to);
2622 		retval += ret;
2623 		offset += ret;
2624 		index += offset >> PAGE_SHIFT;
2625 		offset &= ~PAGE_MASK;
2626 
2627 		put_page(page);
2628 		if (!iov_iter_count(to))
2629 			break;
2630 		if (ret < nr) {
2631 			error = -EFAULT;
2632 			break;
2633 		}
2634 		cond_resched();
2635 	}
2636 
2637 	*ppos = ((loff_t) index << PAGE_SHIFT) + offset;
2638 	file_accessed(file);
2639 	return retval ? retval : error;
2640 }
2641 
2642 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2643 {
2644 	struct address_space *mapping = file->f_mapping;
2645 	struct inode *inode = mapping->host;
2646 
2647 	if (whence != SEEK_DATA && whence != SEEK_HOLE)
2648 		return generic_file_llseek_size(file, offset, whence,
2649 					MAX_LFS_FILESIZE, i_size_read(inode));
2650 	if (offset < 0)
2651 		return -ENXIO;
2652 
2653 	inode_lock(inode);
2654 	/* We're holding i_rwsem so we can access i_size directly */
2655 	offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
2656 	if (offset >= 0)
2657 		offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
2658 	inode_unlock(inode);
2659 	return offset;
2660 }
2661 
2662 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2663 							 loff_t len)
2664 {
2665 	struct inode *inode = file_inode(file);
2666 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2667 	struct shmem_inode_info *info = SHMEM_I(inode);
2668 	struct shmem_falloc shmem_falloc;
2669 	pgoff_t start, index, end, undo_fallocend;
2670 	int error;
2671 
2672 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2673 		return -EOPNOTSUPP;
2674 
2675 	inode_lock(inode);
2676 
2677 	if (mode & FALLOC_FL_PUNCH_HOLE) {
2678 		struct address_space *mapping = file->f_mapping;
2679 		loff_t unmap_start = round_up(offset, PAGE_SIZE);
2680 		loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
2681 		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
2682 
2683 		/* protected by i_rwsem */
2684 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
2685 			error = -EPERM;
2686 			goto out;
2687 		}
2688 
2689 		shmem_falloc.waitq = &shmem_falloc_waitq;
2690 		shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
2691 		shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2692 		spin_lock(&inode->i_lock);
2693 		inode->i_private = &shmem_falloc;
2694 		spin_unlock(&inode->i_lock);
2695 
2696 		if ((u64)unmap_end > (u64)unmap_start)
2697 			unmap_mapping_range(mapping, unmap_start,
2698 					    1 + unmap_end - unmap_start, 0);
2699 		shmem_truncate_range(inode, offset, offset + len - 1);
2700 		/* No need to unmap again: hole-punching leaves COWed pages */
2701 
2702 		spin_lock(&inode->i_lock);
2703 		inode->i_private = NULL;
2704 		wake_up_all(&shmem_falloc_waitq);
2705 		WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
2706 		spin_unlock(&inode->i_lock);
2707 		error = 0;
2708 		goto out;
2709 	}
2710 
2711 	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2712 	error = inode_newsize_ok(inode, offset + len);
2713 	if (error)
2714 		goto out;
2715 
2716 	if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
2717 		error = -EPERM;
2718 		goto out;
2719 	}
2720 
2721 	start = offset >> PAGE_SHIFT;
2722 	end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2723 	/* Try to avoid a swapstorm if len is impossible to satisfy */
2724 	if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2725 		error = -ENOSPC;
2726 		goto out;
2727 	}
2728 
2729 	shmem_falloc.waitq = NULL;
2730 	shmem_falloc.start = start;
2731 	shmem_falloc.next  = start;
2732 	shmem_falloc.nr_falloced = 0;
2733 	shmem_falloc.nr_unswapped = 0;
2734 	spin_lock(&inode->i_lock);
2735 	inode->i_private = &shmem_falloc;
2736 	spin_unlock(&inode->i_lock);
2737 
2738 	/*
2739 	 * info->fallocend is only relevant when huge pages might be
2740 	 * involved: to prevent split_huge_page() freeing fallocated
2741 	 * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size.
2742 	 */
2743 	undo_fallocend = info->fallocend;
2744 	if (info->fallocend < end)
2745 		info->fallocend = end;
2746 
2747 	for (index = start; index < end; ) {
2748 		struct page *page;
2749 
2750 		/*
2751 		 * Good, the fallocate(2) manpage permits EINTR: we may have
2752 		 * been interrupted because we are using up too much memory.
2753 		 */
2754 		if (signal_pending(current))
2755 			error = -EINTR;
2756 		else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
2757 			error = -ENOMEM;
2758 		else
2759 			error = shmem_getpage(inode, index, &page, SGP_FALLOC);
2760 		if (error) {
2761 			info->fallocend = undo_fallocend;
2762 			/* Remove the !PageUptodate pages we added */
2763 			if (index > start) {
2764 				shmem_undo_range(inode,
2765 				    (loff_t)start << PAGE_SHIFT,
2766 				    ((loff_t)index << PAGE_SHIFT) - 1, true);
2767 			}
2768 			goto undone;
2769 		}
2770 
2771 		index++;
2772 		/*
2773 		 * Here is a more important optimization than it appears:
2774 		 * a second SGP_FALLOC on the same huge page will clear it,
2775 		 * making it PageUptodate and un-undoable if we fail later.
2776 		 */
2777 		if (PageTransCompound(page)) {
2778 			index = round_up(index, HPAGE_PMD_NR);
2779 			/* Beware 32-bit wraparound */
2780 			if (!index)
2781 				index--;
2782 		}
2783 
2784 		/*
2785 		 * Inform shmem_writepage() how far we have reached.
2786 		 * No need for lock or barrier: we have the page lock.
2787 		 */
2788 		if (!PageUptodate(page))
2789 			shmem_falloc.nr_falloced += index - shmem_falloc.next;
2790 		shmem_falloc.next = index;
2791 
2792 		/*
2793 		 * If !PageUptodate, leave it that way so that freeable pages
2794 		 * can be recognized if we need to rollback on error later.
2795 		 * But set_page_dirty so that memory pressure will swap rather
2796 		 * than free the pages we are allocating (and SGP_CACHE pages
2797 		 * might still be clean: we now need to mark those dirty too).
2798 		 */
2799 		set_page_dirty(page);
2800 		unlock_page(page);
2801 		put_page(page);
2802 		cond_resched();
2803 	}
2804 
2805 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2806 		i_size_write(inode, offset + len);
2807 	inode->i_ctime = current_time(inode);
2808 undone:
2809 	spin_lock(&inode->i_lock);
2810 	inode->i_private = NULL;
2811 	spin_unlock(&inode->i_lock);
2812 out:
2813 	inode_unlock(inode);
2814 	return error;
2815 }
2816 
2817 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
2818 {
2819 	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
2820 
2821 	buf->f_type = TMPFS_MAGIC;
2822 	buf->f_bsize = PAGE_SIZE;
2823 	buf->f_namelen = NAME_MAX;
2824 	if (sbinfo->max_blocks) {
2825 		buf->f_blocks = sbinfo->max_blocks;
2826 		buf->f_bavail =
2827 		buf->f_bfree  = sbinfo->max_blocks -
2828 				percpu_counter_sum(&sbinfo->used_blocks);
2829 	}
2830 	if (sbinfo->max_inodes) {
2831 		buf->f_files = sbinfo->max_inodes;
2832 		buf->f_ffree = sbinfo->free_inodes;
2833 	}
2834 	/* else leave those fields 0 like simple_statfs */
2835 
2836 	buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
2837 
2838 	return 0;
2839 }
2840 
2841 /*
2842  * File creation. Allocate an inode, and we're done..
2843  */
2844 static int
2845 shmem_mknod(struct user_namespace *mnt_userns, struct inode *dir,
2846 	    struct dentry *dentry, umode_t mode, dev_t dev)
2847 {
2848 	struct inode *inode;
2849 	int error = -ENOSPC;
2850 
2851 	inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
2852 	if (inode) {
2853 		error = simple_acl_create(dir, inode);
2854 		if (error)
2855 			goto out_iput;
2856 		error = security_inode_init_security(inode, dir,
2857 						     &dentry->d_name,
2858 						     shmem_initxattrs, NULL);
2859 		if (error && error != -EOPNOTSUPP)
2860 			goto out_iput;
2861 
2862 		error = 0;
2863 		dir->i_size += BOGO_DIRENT_SIZE;
2864 		dir->i_ctime = dir->i_mtime = current_time(dir);
2865 		d_instantiate(dentry, inode);
2866 		dget(dentry); /* Extra count - pin the dentry in core */
2867 	}
2868 	return error;
2869 out_iput:
2870 	iput(inode);
2871 	return error;
2872 }
2873 
2874 static int
2875 shmem_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
2876 	      struct dentry *dentry, umode_t mode)
2877 {
2878 	struct inode *inode;
2879 	int error = -ENOSPC;
2880 
2881 	inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
2882 	if (inode) {
2883 		error = security_inode_init_security(inode, dir,
2884 						     NULL,
2885 						     shmem_initxattrs, NULL);
2886 		if (error && error != -EOPNOTSUPP)
2887 			goto out_iput;
2888 		error = simple_acl_create(dir, inode);
2889 		if (error)
2890 			goto out_iput;
2891 		d_tmpfile(dentry, inode);
2892 	}
2893 	return error;
2894 out_iput:
2895 	iput(inode);
2896 	return error;
2897 }
2898 
2899 static int shmem_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
2900 		       struct dentry *dentry, umode_t mode)
2901 {
2902 	int error;
2903 
2904 	if ((error = shmem_mknod(&init_user_ns, dir, dentry,
2905 				 mode | S_IFDIR, 0)))
2906 		return error;
2907 	inc_nlink(dir);
2908 	return 0;
2909 }
2910 
2911 static int shmem_create(struct user_namespace *mnt_userns, struct inode *dir,
2912 			struct dentry *dentry, umode_t mode, bool excl)
2913 {
2914 	return shmem_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0);
2915 }
2916 
2917 /*
2918  * Link a file..
2919  */
2920 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
2921 {
2922 	struct inode *inode = d_inode(old_dentry);
2923 	int ret = 0;
2924 
2925 	/*
2926 	 * No ordinary (disk based) filesystem counts links as inodes;
2927 	 * but each new link needs a new dentry, pinning lowmem, and
2928 	 * tmpfs dentries cannot be pruned until they are unlinked.
2929 	 * But if an O_TMPFILE file is linked into the tmpfs, the
2930 	 * first link must skip that, to get the accounting right.
2931 	 */
2932 	if (inode->i_nlink) {
2933 		ret = shmem_reserve_inode(inode->i_sb, NULL);
2934 		if (ret)
2935 			goto out;
2936 	}
2937 
2938 	dir->i_size += BOGO_DIRENT_SIZE;
2939 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2940 	inc_nlink(inode);
2941 	ihold(inode);	/* New dentry reference */
2942 	dget(dentry);		/* Extra pinning count for the created dentry */
2943 	d_instantiate(dentry, inode);
2944 out:
2945 	return ret;
2946 }
2947 
2948 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
2949 {
2950 	struct inode *inode = d_inode(dentry);
2951 
2952 	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
2953 		shmem_free_inode(inode->i_sb);
2954 
2955 	dir->i_size -= BOGO_DIRENT_SIZE;
2956 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2957 	drop_nlink(inode);
2958 	dput(dentry);	/* Undo the count from "create" - this does all the work */
2959 	return 0;
2960 }
2961 
2962 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
2963 {
2964 	if (!simple_empty(dentry))
2965 		return -ENOTEMPTY;
2966 
2967 	drop_nlink(d_inode(dentry));
2968 	drop_nlink(dir);
2969 	return shmem_unlink(dir, dentry);
2970 }
2971 
2972 static int shmem_whiteout(struct user_namespace *mnt_userns,
2973 			  struct inode *old_dir, struct dentry *old_dentry)
2974 {
2975 	struct dentry *whiteout;
2976 	int error;
2977 
2978 	whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
2979 	if (!whiteout)
2980 		return -ENOMEM;
2981 
2982 	error = shmem_mknod(&init_user_ns, old_dir, whiteout,
2983 			    S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
2984 	dput(whiteout);
2985 	if (error)
2986 		return error;
2987 
2988 	/*
2989 	 * Cheat and hash the whiteout while the old dentry is still in
2990 	 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
2991 	 *
2992 	 * d_lookup() will consistently find one of them at this point,
2993 	 * not sure which one, but that isn't even important.
2994 	 */
2995 	d_rehash(whiteout);
2996 	return 0;
2997 }
2998 
2999 /*
3000  * The VFS layer already does all the dentry stuff for rename,
3001  * we just have to decrement the usage count for the target if
3002  * it exists so that the VFS layer correctly free's it when it
3003  * gets overwritten.
3004  */
3005 static int shmem_rename2(struct user_namespace *mnt_userns,
3006 			 struct inode *old_dir, struct dentry *old_dentry,
3007 			 struct inode *new_dir, struct dentry *new_dentry,
3008 			 unsigned int flags)
3009 {
3010 	struct inode *inode = d_inode(old_dentry);
3011 	int they_are_dirs = S_ISDIR(inode->i_mode);
3012 
3013 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3014 		return -EINVAL;
3015 
3016 	if (flags & RENAME_EXCHANGE)
3017 		return simple_rename_exchange(old_dir, old_dentry, new_dir, new_dentry);
3018 
3019 	if (!simple_empty(new_dentry))
3020 		return -ENOTEMPTY;
3021 
3022 	if (flags & RENAME_WHITEOUT) {
3023 		int error;
3024 
3025 		error = shmem_whiteout(&init_user_ns, old_dir, old_dentry);
3026 		if (error)
3027 			return error;
3028 	}
3029 
3030 	if (d_really_is_positive(new_dentry)) {
3031 		(void) shmem_unlink(new_dir, new_dentry);
3032 		if (they_are_dirs) {
3033 			drop_nlink(d_inode(new_dentry));
3034 			drop_nlink(old_dir);
3035 		}
3036 	} else if (they_are_dirs) {
3037 		drop_nlink(old_dir);
3038 		inc_nlink(new_dir);
3039 	}
3040 
3041 	old_dir->i_size -= BOGO_DIRENT_SIZE;
3042 	new_dir->i_size += BOGO_DIRENT_SIZE;
3043 	old_dir->i_ctime = old_dir->i_mtime =
3044 	new_dir->i_ctime = new_dir->i_mtime =
3045 	inode->i_ctime = current_time(old_dir);
3046 	return 0;
3047 }
3048 
3049 static int shmem_symlink(struct user_namespace *mnt_userns, struct inode *dir,
3050 			 struct dentry *dentry, const char *symname)
3051 {
3052 	int error;
3053 	int len;
3054 	struct inode *inode;
3055 	struct page *page;
3056 
3057 	len = strlen(symname) + 1;
3058 	if (len > PAGE_SIZE)
3059 		return -ENAMETOOLONG;
3060 
3061 	inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0,
3062 				VM_NORESERVE);
3063 	if (!inode)
3064 		return -ENOSPC;
3065 
3066 	error = security_inode_init_security(inode, dir, &dentry->d_name,
3067 					     shmem_initxattrs, NULL);
3068 	if (error && error != -EOPNOTSUPP) {
3069 		iput(inode);
3070 		return error;
3071 	}
3072 
3073 	inode->i_size = len-1;
3074 	if (len <= SHORT_SYMLINK_LEN) {
3075 		inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3076 		if (!inode->i_link) {
3077 			iput(inode);
3078 			return -ENOMEM;
3079 		}
3080 		inode->i_op = &shmem_short_symlink_operations;
3081 	} else {
3082 		inode_nohighmem(inode);
3083 		error = shmem_getpage(inode, 0, &page, SGP_WRITE);
3084 		if (error) {
3085 			iput(inode);
3086 			return error;
3087 		}
3088 		inode->i_mapping->a_ops = &shmem_aops;
3089 		inode->i_op = &shmem_symlink_inode_operations;
3090 		memcpy(page_address(page), symname, len);
3091 		SetPageUptodate(page);
3092 		set_page_dirty(page);
3093 		unlock_page(page);
3094 		put_page(page);
3095 	}
3096 	dir->i_size += BOGO_DIRENT_SIZE;
3097 	dir->i_ctime = dir->i_mtime = current_time(dir);
3098 	d_instantiate(dentry, inode);
3099 	dget(dentry);
3100 	return 0;
3101 }
3102 
3103 static void shmem_put_link(void *arg)
3104 {
3105 	mark_page_accessed(arg);
3106 	put_page(arg);
3107 }
3108 
3109 static const char *shmem_get_link(struct dentry *dentry,
3110 				  struct inode *inode,
3111 				  struct delayed_call *done)
3112 {
3113 	struct page *page = NULL;
3114 	int error;
3115 	if (!dentry) {
3116 		page = find_get_page(inode->i_mapping, 0);
3117 		if (!page)
3118 			return ERR_PTR(-ECHILD);
3119 		if (PageHWPoison(page) ||
3120 		    !PageUptodate(page)) {
3121 			put_page(page);
3122 			return ERR_PTR(-ECHILD);
3123 		}
3124 	} else {
3125 		error = shmem_getpage(inode, 0, &page, SGP_READ);
3126 		if (error)
3127 			return ERR_PTR(error);
3128 		if (!page)
3129 			return ERR_PTR(-ECHILD);
3130 		if (PageHWPoison(page)) {
3131 			unlock_page(page);
3132 			put_page(page);
3133 			return ERR_PTR(-ECHILD);
3134 		}
3135 		unlock_page(page);
3136 	}
3137 	set_delayed_call(done, shmem_put_link, page);
3138 	return page_address(page);
3139 }
3140 
3141 #ifdef CONFIG_TMPFS_XATTR
3142 /*
3143  * Superblocks without xattr inode operations may get some security.* xattr
3144  * support from the LSM "for free". As soon as we have any other xattrs
3145  * like ACLs, we also need to implement the security.* handlers at
3146  * filesystem level, though.
3147  */
3148 
3149 /*
3150  * Callback for security_inode_init_security() for acquiring xattrs.
3151  */
3152 static int shmem_initxattrs(struct inode *inode,
3153 			    const struct xattr *xattr_array,
3154 			    void *fs_info)
3155 {
3156 	struct shmem_inode_info *info = SHMEM_I(inode);
3157 	const struct xattr *xattr;
3158 	struct simple_xattr *new_xattr;
3159 	size_t len;
3160 
3161 	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3162 		new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
3163 		if (!new_xattr)
3164 			return -ENOMEM;
3165 
3166 		len = strlen(xattr->name) + 1;
3167 		new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3168 					  GFP_KERNEL);
3169 		if (!new_xattr->name) {
3170 			kvfree(new_xattr);
3171 			return -ENOMEM;
3172 		}
3173 
3174 		memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
3175 		       XATTR_SECURITY_PREFIX_LEN);
3176 		memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
3177 		       xattr->name, len);
3178 
3179 		simple_xattr_list_add(&info->xattrs, new_xattr);
3180 	}
3181 
3182 	return 0;
3183 }
3184 
3185 static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3186 				   struct dentry *unused, struct inode *inode,
3187 				   const char *name, void *buffer, size_t size)
3188 {
3189 	struct shmem_inode_info *info = SHMEM_I(inode);
3190 
3191 	name = xattr_full_name(handler, name);
3192 	return simple_xattr_get(&info->xattrs, name, buffer, size);
3193 }
3194 
3195 static int shmem_xattr_handler_set(const struct xattr_handler *handler,
3196 				   struct user_namespace *mnt_userns,
3197 				   struct dentry *unused, struct inode *inode,
3198 				   const char *name, const void *value,
3199 				   size_t size, int flags)
3200 {
3201 	struct shmem_inode_info *info = SHMEM_I(inode);
3202 
3203 	name = xattr_full_name(handler, name);
3204 	return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
3205 }
3206 
3207 static const struct xattr_handler shmem_security_xattr_handler = {
3208 	.prefix = XATTR_SECURITY_PREFIX,
3209 	.get = shmem_xattr_handler_get,
3210 	.set = shmem_xattr_handler_set,
3211 };
3212 
3213 static const struct xattr_handler shmem_trusted_xattr_handler = {
3214 	.prefix = XATTR_TRUSTED_PREFIX,
3215 	.get = shmem_xattr_handler_get,
3216 	.set = shmem_xattr_handler_set,
3217 };
3218 
3219 static const struct xattr_handler *shmem_xattr_handlers[] = {
3220 #ifdef CONFIG_TMPFS_POSIX_ACL
3221 	&posix_acl_access_xattr_handler,
3222 	&posix_acl_default_xattr_handler,
3223 #endif
3224 	&shmem_security_xattr_handler,
3225 	&shmem_trusted_xattr_handler,
3226 	NULL
3227 };
3228 
3229 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3230 {
3231 	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3232 	return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3233 }
3234 #endif /* CONFIG_TMPFS_XATTR */
3235 
3236 static const struct inode_operations shmem_short_symlink_operations = {
3237 	.get_link	= simple_get_link,
3238 #ifdef CONFIG_TMPFS_XATTR
3239 	.listxattr	= shmem_listxattr,
3240 #endif
3241 };
3242 
3243 static const struct inode_operations shmem_symlink_inode_operations = {
3244 	.get_link	= shmem_get_link,
3245 #ifdef CONFIG_TMPFS_XATTR
3246 	.listxattr	= shmem_listxattr,
3247 #endif
3248 };
3249 
3250 static struct dentry *shmem_get_parent(struct dentry *child)
3251 {
3252 	return ERR_PTR(-ESTALE);
3253 }
3254 
3255 static int shmem_match(struct inode *ino, void *vfh)
3256 {
3257 	__u32 *fh = vfh;
3258 	__u64 inum = fh[2];
3259 	inum = (inum << 32) | fh[1];
3260 	return ino->i_ino == inum && fh[0] == ino->i_generation;
3261 }
3262 
3263 /* Find any alias of inode, but prefer a hashed alias */
3264 static struct dentry *shmem_find_alias(struct inode *inode)
3265 {
3266 	struct dentry *alias = d_find_alias(inode);
3267 
3268 	return alias ?: d_find_any_alias(inode);
3269 }
3270 
3271 
3272 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3273 		struct fid *fid, int fh_len, int fh_type)
3274 {
3275 	struct inode *inode;
3276 	struct dentry *dentry = NULL;
3277 	u64 inum;
3278 
3279 	if (fh_len < 3)
3280 		return NULL;
3281 
3282 	inum = fid->raw[2];
3283 	inum = (inum << 32) | fid->raw[1];
3284 
3285 	inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3286 			shmem_match, fid->raw);
3287 	if (inode) {
3288 		dentry = shmem_find_alias(inode);
3289 		iput(inode);
3290 	}
3291 
3292 	return dentry;
3293 }
3294 
3295 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3296 				struct inode *parent)
3297 {
3298 	if (*len < 3) {
3299 		*len = 3;
3300 		return FILEID_INVALID;
3301 	}
3302 
3303 	if (inode_unhashed(inode)) {
3304 		/* Unfortunately insert_inode_hash is not idempotent,
3305 		 * so as we hash inodes here rather than at creation
3306 		 * time, we need a lock to ensure we only try
3307 		 * to do it once
3308 		 */
3309 		static DEFINE_SPINLOCK(lock);
3310 		spin_lock(&lock);
3311 		if (inode_unhashed(inode))
3312 			__insert_inode_hash(inode,
3313 					    inode->i_ino + inode->i_generation);
3314 		spin_unlock(&lock);
3315 	}
3316 
3317 	fh[0] = inode->i_generation;
3318 	fh[1] = inode->i_ino;
3319 	fh[2] = ((__u64)inode->i_ino) >> 32;
3320 
3321 	*len = 3;
3322 	return 1;
3323 }
3324 
3325 static const struct export_operations shmem_export_ops = {
3326 	.get_parent     = shmem_get_parent,
3327 	.encode_fh      = shmem_encode_fh,
3328 	.fh_to_dentry	= shmem_fh_to_dentry,
3329 };
3330 
3331 enum shmem_param {
3332 	Opt_gid,
3333 	Opt_huge,
3334 	Opt_mode,
3335 	Opt_mpol,
3336 	Opt_nr_blocks,
3337 	Opt_nr_inodes,
3338 	Opt_size,
3339 	Opt_uid,
3340 	Opt_inode32,
3341 	Opt_inode64,
3342 };
3343 
3344 static const struct constant_table shmem_param_enums_huge[] = {
3345 	{"never",	SHMEM_HUGE_NEVER },
3346 	{"always",	SHMEM_HUGE_ALWAYS },
3347 	{"within_size",	SHMEM_HUGE_WITHIN_SIZE },
3348 	{"advise",	SHMEM_HUGE_ADVISE },
3349 	{}
3350 };
3351 
3352 const struct fs_parameter_spec shmem_fs_parameters[] = {
3353 	fsparam_u32   ("gid",		Opt_gid),
3354 	fsparam_enum  ("huge",		Opt_huge,  shmem_param_enums_huge),
3355 	fsparam_u32oct("mode",		Opt_mode),
3356 	fsparam_string("mpol",		Opt_mpol),
3357 	fsparam_string("nr_blocks",	Opt_nr_blocks),
3358 	fsparam_string("nr_inodes",	Opt_nr_inodes),
3359 	fsparam_string("size",		Opt_size),
3360 	fsparam_u32   ("uid",		Opt_uid),
3361 	fsparam_flag  ("inode32",	Opt_inode32),
3362 	fsparam_flag  ("inode64",	Opt_inode64),
3363 	{}
3364 };
3365 
3366 static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
3367 {
3368 	struct shmem_options *ctx = fc->fs_private;
3369 	struct fs_parse_result result;
3370 	unsigned long long size;
3371 	char *rest;
3372 	int opt;
3373 
3374 	opt = fs_parse(fc, shmem_fs_parameters, param, &result);
3375 	if (opt < 0)
3376 		return opt;
3377 
3378 	switch (opt) {
3379 	case Opt_size:
3380 		size = memparse(param->string, &rest);
3381 		if (*rest == '%') {
3382 			size <<= PAGE_SHIFT;
3383 			size *= totalram_pages();
3384 			do_div(size, 100);
3385 			rest++;
3386 		}
3387 		if (*rest)
3388 			goto bad_value;
3389 		ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3390 		ctx->seen |= SHMEM_SEEN_BLOCKS;
3391 		break;
3392 	case Opt_nr_blocks:
3393 		ctx->blocks = memparse(param->string, &rest);
3394 		if (*rest)
3395 			goto bad_value;
3396 		ctx->seen |= SHMEM_SEEN_BLOCKS;
3397 		break;
3398 	case Opt_nr_inodes:
3399 		ctx->inodes = memparse(param->string, &rest);
3400 		if (*rest)
3401 			goto bad_value;
3402 		ctx->seen |= SHMEM_SEEN_INODES;
3403 		break;
3404 	case Opt_mode:
3405 		ctx->mode = result.uint_32 & 07777;
3406 		break;
3407 	case Opt_uid:
3408 		ctx->uid = make_kuid(current_user_ns(), result.uint_32);
3409 		if (!uid_valid(ctx->uid))
3410 			goto bad_value;
3411 		break;
3412 	case Opt_gid:
3413 		ctx->gid = make_kgid(current_user_ns(), result.uint_32);
3414 		if (!gid_valid(ctx->gid))
3415 			goto bad_value;
3416 		break;
3417 	case Opt_huge:
3418 		ctx->huge = result.uint_32;
3419 		if (ctx->huge != SHMEM_HUGE_NEVER &&
3420 		    !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
3421 		      has_transparent_hugepage()))
3422 			goto unsupported_parameter;
3423 		ctx->seen |= SHMEM_SEEN_HUGE;
3424 		break;
3425 	case Opt_mpol:
3426 		if (IS_ENABLED(CONFIG_NUMA)) {
3427 			mpol_put(ctx->mpol);
3428 			ctx->mpol = NULL;
3429 			if (mpol_parse_str(param->string, &ctx->mpol))
3430 				goto bad_value;
3431 			break;
3432 		}
3433 		goto unsupported_parameter;
3434 	case Opt_inode32:
3435 		ctx->full_inums = false;
3436 		ctx->seen |= SHMEM_SEEN_INUMS;
3437 		break;
3438 	case Opt_inode64:
3439 		if (sizeof(ino_t) < 8) {
3440 			return invalfc(fc,
3441 				       "Cannot use inode64 with <64bit inums in kernel\n");
3442 		}
3443 		ctx->full_inums = true;
3444 		ctx->seen |= SHMEM_SEEN_INUMS;
3445 		break;
3446 	}
3447 	return 0;
3448 
3449 unsupported_parameter:
3450 	return invalfc(fc, "Unsupported parameter '%s'", param->key);
3451 bad_value:
3452 	return invalfc(fc, "Bad value for '%s'", param->key);
3453 }
3454 
3455 static int shmem_parse_options(struct fs_context *fc, void *data)
3456 {
3457 	char *options = data;
3458 
3459 	if (options) {
3460 		int err = security_sb_eat_lsm_opts(options, &fc->security);
3461 		if (err)
3462 			return err;
3463 	}
3464 
3465 	while (options != NULL) {
3466 		char *this_char = options;
3467 		for (;;) {
3468 			/*
3469 			 * NUL-terminate this option: unfortunately,
3470 			 * mount options form a comma-separated list,
3471 			 * but mpol's nodelist may also contain commas.
3472 			 */
3473 			options = strchr(options, ',');
3474 			if (options == NULL)
3475 				break;
3476 			options++;
3477 			if (!isdigit(*options)) {
3478 				options[-1] = '\0';
3479 				break;
3480 			}
3481 		}
3482 		if (*this_char) {
3483 			char *value = strchr(this_char, '=');
3484 			size_t len = 0;
3485 			int err;
3486 
3487 			if (value) {
3488 				*value++ = '\0';
3489 				len = strlen(value);
3490 			}
3491 			err = vfs_parse_fs_string(fc, this_char, value, len);
3492 			if (err < 0)
3493 				return err;
3494 		}
3495 	}
3496 	return 0;
3497 }
3498 
3499 /*
3500  * Reconfigure a shmem filesystem.
3501  *
3502  * Note that we disallow change from limited->unlimited blocks/inodes while any
3503  * are in use; but we must separately disallow unlimited->limited, because in
3504  * that case we have no record of how much is already in use.
3505  */
3506 static int shmem_reconfigure(struct fs_context *fc)
3507 {
3508 	struct shmem_options *ctx = fc->fs_private;
3509 	struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
3510 	unsigned long inodes;
3511 	struct mempolicy *mpol = NULL;
3512 	const char *err;
3513 
3514 	raw_spin_lock(&sbinfo->stat_lock);
3515 	inodes = sbinfo->max_inodes - sbinfo->free_inodes;
3516 	if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
3517 		if (!sbinfo->max_blocks) {
3518 			err = "Cannot retroactively limit size";
3519 			goto out;
3520 		}
3521 		if (percpu_counter_compare(&sbinfo->used_blocks,
3522 					   ctx->blocks) > 0) {
3523 			err = "Too small a size for current use";
3524 			goto out;
3525 		}
3526 	}
3527 	if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
3528 		if (!sbinfo->max_inodes) {
3529 			err = "Cannot retroactively limit inodes";
3530 			goto out;
3531 		}
3532 		if (ctx->inodes < inodes) {
3533 			err = "Too few inodes for current use";
3534 			goto out;
3535 		}
3536 	}
3537 
3538 	if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
3539 	    sbinfo->next_ino > UINT_MAX) {
3540 		err = "Current inum too high to switch to 32-bit inums";
3541 		goto out;
3542 	}
3543 
3544 	if (ctx->seen & SHMEM_SEEN_HUGE)
3545 		sbinfo->huge = ctx->huge;
3546 	if (ctx->seen & SHMEM_SEEN_INUMS)
3547 		sbinfo->full_inums = ctx->full_inums;
3548 	if (ctx->seen & SHMEM_SEEN_BLOCKS)
3549 		sbinfo->max_blocks  = ctx->blocks;
3550 	if (ctx->seen & SHMEM_SEEN_INODES) {
3551 		sbinfo->max_inodes  = ctx->inodes;
3552 		sbinfo->free_inodes = ctx->inodes - inodes;
3553 	}
3554 
3555 	/*
3556 	 * Preserve previous mempolicy unless mpol remount option was specified.
3557 	 */
3558 	if (ctx->mpol) {
3559 		mpol = sbinfo->mpol;
3560 		sbinfo->mpol = ctx->mpol;	/* transfers initial ref */
3561 		ctx->mpol = NULL;
3562 	}
3563 	raw_spin_unlock(&sbinfo->stat_lock);
3564 	mpol_put(mpol);
3565 	return 0;
3566 out:
3567 	raw_spin_unlock(&sbinfo->stat_lock);
3568 	return invalfc(fc, "%s", err);
3569 }
3570 
3571 static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3572 {
3573 	struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3574 
3575 	if (sbinfo->max_blocks != shmem_default_max_blocks())
3576 		seq_printf(seq, ",size=%luk",
3577 			sbinfo->max_blocks << (PAGE_SHIFT - 10));
3578 	if (sbinfo->max_inodes != shmem_default_max_inodes())
3579 		seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
3580 	if (sbinfo->mode != (0777 | S_ISVTX))
3581 		seq_printf(seq, ",mode=%03ho", sbinfo->mode);
3582 	if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
3583 		seq_printf(seq, ",uid=%u",
3584 				from_kuid_munged(&init_user_ns, sbinfo->uid));
3585 	if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
3586 		seq_printf(seq, ",gid=%u",
3587 				from_kgid_munged(&init_user_ns, sbinfo->gid));
3588 
3589 	/*
3590 	 * Showing inode{64,32} might be useful even if it's the system default,
3591 	 * since then people don't have to resort to checking both here and
3592 	 * /proc/config.gz to confirm 64-bit inums were successfully applied
3593 	 * (which may not even exist if IKCONFIG_PROC isn't enabled).
3594 	 *
3595 	 * We hide it when inode64 isn't the default and we are using 32-bit
3596 	 * inodes, since that probably just means the feature isn't even under
3597 	 * consideration.
3598 	 *
3599 	 * As such:
3600 	 *
3601 	 *                     +-----------------+-----------------+
3602 	 *                     | TMPFS_INODE64=y | TMPFS_INODE64=n |
3603 	 *  +------------------+-----------------+-----------------+
3604 	 *  | full_inums=true  | show            | show            |
3605 	 *  | full_inums=false | show            | hide            |
3606 	 *  +------------------+-----------------+-----------------+
3607 	 *
3608 	 */
3609 	if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
3610 		seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
3611 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3612 	/* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
3613 	if (sbinfo->huge)
3614 		seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
3615 #endif
3616 	shmem_show_mpol(seq, sbinfo->mpol);
3617 	return 0;
3618 }
3619 
3620 #endif /* CONFIG_TMPFS */
3621 
3622 static void shmem_put_super(struct super_block *sb)
3623 {
3624 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3625 
3626 	free_percpu(sbinfo->ino_batch);
3627 	percpu_counter_destroy(&sbinfo->used_blocks);
3628 	mpol_put(sbinfo->mpol);
3629 	kfree(sbinfo);
3630 	sb->s_fs_info = NULL;
3631 }
3632 
3633 static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
3634 {
3635 	struct shmem_options *ctx = fc->fs_private;
3636 	struct inode *inode;
3637 	struct shmem_sb_info *sbinfo;
3638 
3639 	/* Round up to L1_CACHE_BYTES to resist false sharing */
3640 	sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3641 				L1_CACHE_BYTES), GFP_KERNEL);
3642 	if (!sbinfo)
3643 		return -ENOMEM;
3644 
3645 	sb->s_fs_info = sbinfo;
3646 
3647 #ifdef CONFIG_TMPFS
3648 	/*
3649 	 * Per default we only allow half of the physical ram per
3650 	 * tmpfs instance, limiting inodes to one per page of lowmem;
3651 	 * but the internal instance is left unlimited.
3652 	 */
3653 	if (!(sb->s_flags & SB_KERNMOUNT)) {
3654 		if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
3655 			ctx->blocks = shmem_default_max_blocks();
3656 		if (!(ctx->seen & SHMEM_SEEN_INODES))
3657 			ctx->inodes = shmem_default_max_inodes();
3658 		if (!(ctx->seen & SHMEM_SEEN_INUMS))
3659 			ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
3660 	} else {
3661 		sb->s_flags |= SB_NOUSER;
3662 	}
3663 	sb->s_export_op = &shmem_export_ops;
3664 	sb->s_flags |= SB_NOSEC;
3665 #else
3666 	sb->s_flags |= SB_NOUSER;
3667 #endif
3668 	sbinfo->max_blocks = ctx->blocks;
3669 	sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
3670 	if (sb->s_flags & SB_KERNMOUNT) {
3671 		sbinfo->ino_batch = alloc_percpu(ino_t);
3672 		if (!sbinfo->ino_batch)
3673 			goto failed;
3674 	}
3675 	sbinfo->uid = ctx->uid;
3676 	sbinfo->gid = ctx->gid;
3677 	sbinfo->full_inums = ctx->full_inums;
3678 	sbinfo->mode = ctx->mode;
3679 	sbinfo->huge = ctx->huge;
3680 	sbinfo->mpol = ctx->mpol;
3681 	ctx->mpol = NULL;
3682 
3683 	raw_spin_lock_init(&sbinfo->stat_lock);
3684 	if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3685 		goto failed;
3686 	spin_lock_init(&sbinfo->shrinklist_lock);
3687 	INIT_LIST_HEAD(&sbinfo->shrinklist);
3688 
3689 	sb->s_maxbytes = MAX_LFS_FILESIZE;
3690 	sb->s_blocksize = PAGE_SIZE;
3691 	sb->s_blocksize_bits = PAGE_SHIFT;
3692 	sb->s_magic = TMPFS_MAGIC;
3693 	sb->s_op = &shmem_ops;
3694 	sb->s_time_gran = 1;
3695 #ifdef CONFIG_TMPFS_XATTR
3696 	sb->s_xattr = shmem_xattr_handlers;
3697 #endif
3698 #ifdef CONFIG_TMPFS_POSIX_ACL
3699 	sb->s_flags |= SB_POSIXACL;
3700 #endif
3701 	uuid_gen(&sb->s_uuid);
3702 
3703 	inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
3704 	if (!inode)
3705 		goto failed;
3706 	inode->i_uid = sbinfo->uid;
3707 	inode->i_gid = sbinfo->gid;
3708 	sb->s_root = d_make_root(inode);
3709 	if (!sb->s_root)
3710 		goto failed;
3711 	return 0;
3712 
3713 failed:
3714 	shmem_put_super(sb);
3715 	return -ENOMEM;
3716 }
3717 
3718 static int shmem_get_tree(struct fs_context *fc)
3719 {
3720 	return get_tree_nodev(fc, shmem_fill_super);
3721 }
3722 
3723 static void shmem_free_fc(struct fs_context *fc)
3724 {
3725 	struct shmem_options *ctx = fc->fs_private;
3726 
3727 	if (ctx) {
3728 		mpol_put(ctx->mpol);
3729 		kfree(ctx);
3730 	}
3731 }
3732 
3733 static const struct fs_context_operations shmem_fs_context_ops = {
3734 	.free			= shmem_free_fc,
3735 	.get_tree		= shmem_get_tree,
3736 #ifdef CONFIG_TMPFS
3737 	.parse_monolithic	= shmem_parse_options,
3738 	.parse_param		= shmem_parse_one,
3739 	.reconfigure		= shmem_reconfigure,
3740 #endif
3741 };
3742 
3743 static struct kmem_cache *shmem_inode_cachep;
3744 
3745 static struct inode *shmem_alloc_inode(struct super_block *sb)
3746 {
3747 	struct shmem_inode_info *info;
3748 	info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
3749 	if (!info)
3750 		return NULL;
3751 	return &info->vfs_inode;
3752 }
3753 
3754 static void shmem_free_in_core_inode(struct inode *inode)
3755 {
3756 	if (S_ISLNK(inode->i_mode))
3757 		kfree(inode->i_link);
3758 	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3759 }
3760 
3761 static void shmem_destroy_inode(struct inode *inode)
3762 {
3763 	if (S_ISREG(inode->i_mode))
3764 		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
3765 }
3766 
3767 static void shmem_init_inode(void *foo)
3768 {
3769 	struct shmem_inode_info *info = foo;
3770 	inode_init_once(&info->vfs_inode);
3771 }
3772 
3773 static void shmem_init_inodecache(void)
3774 {
3775 	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
3776 				sizeof(struct shmem_inode_info),
3777 				0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
3778 }
3779 
3780 static void shmem_destroy_inodecache(void)
3781 {
3782 	kmem_cache_destroy(shmem_inode_cachep);
3783 }
3784 
3785 /* Keep the page in page cache instead of truncating it */
3786 static int shmem_error_remove_page(struct address_space *mapping,
3787 				   struct page *page)
3788 {
3789 	return 0;
3790 }
3791 
3792 const struct address_space_operations shmem_aops = {
3793 	.writepage	= shmem_writepage,
3794 	.set_page_dirty	= __set_page_dirty_no_writeback,
3795 #ifdef CONFIG_TMPFS
3796 	.write_begin	= shmem_write_begin,
3797 	.write_end	= shmem_write_end,
3798 #endif
3799 #ifdef CONFIG_MIGRATION
3800 	.migratepage	= migrate_page,
3801 #endif
3802 	.error_remove_page = shmem_error_remove_page,
3803 };
3804 EXPORT_SYMBOL(shmem_aops);
3805 
3806 static const struct file_operations shmem_file_operations = {
3807 	.mmap		= shmem_mmap,
3808 	.get_unmapped_area = shmem_get_unmapped_area,
3809 #ifdef CONFIG_TMPFS
3810 	.llseek		= shmem_file_llseek,
3811 	.read_iter	= shmem_file_read_iter,
3812 	.write_iter	= generic_file_write_iter,
3813 	.fsync		= noop_fsync,
3814 	.splice_read	= generic_file_splice_read,
3815 	.splice_write	= iter_file_splice_write,
3816 	.fallocate	= shmem_fallocate,
3817 #endif
3818 };
3819 
3820 static const struct inode_operations shmem_inode_operations = {
3821 	.getattr	= shmem_getattr,
3822 	.setattr	= shmem_setattr,
3823 #ifdef CONFIG_TMPFS_XATTR
3824 	.listxattr	= shmem_listxattr,
3825 	.set_acl	= simple_set_acl,
3826 #endif
3827 };
3828 
3829 static const struct inode_operations shmem_dir_inode_operations = {
3830 #ifdef CONFIG_TMPFS
3831 	.create		= shmem_create,
3832 	.lookup		= simple_lookup,
3833 	.link		= shmem_link,
3834 	.unlink		= shmem_unlink,
3835 	.symlink	= shmem_symlink,
3836 	.mkdir		= shmem_mkdir,
3837 	.rmdir		= shmem_rmdir,
3838 	.mknod		= shmem_mknod,
3839 	.rename		= shmem_rename2,
3840 	.tmpfile	= shmem_tmpfile,
3841 #endif
3842 #ifdef CONFIG_TMPFS_XATTR
3843 	.listxattr	= shmem_listxattr,
3844 #endif
3845 #ifdef CONFIG_TMPFS_POSIX_ACL
3846 	.setattr	= shmem_setattr,
3847 	.set_acl	= simple_set_acl,
3848 #endif
3849 };
3850 
3851 static const struct inode_operations shmem_special_inode_operations = {
3852 #ifdef CONFIG_TMPFS_XATTR
3853 	.listxattr	= shmem_listxattr,
3854 #endif
3855 #ifdef CONFIG_TMPFS_POSIX_ACL
3856 	.setattr	= shmem_setattr,
3857 	.set_acl	= simple_set_acl,
3858 #endif
3859 };
3860 
3861 static const struct super_operations shmem_ops = {
3862 	.alloc_inode	= shmem_alloc_inode,
3863 	.free_inode	= shmem_free_in_core_inode,
3864 	.destroy_inode	= shmem_destroy_inode,
3865 #ifdef CONFIG_TMPFS
3866 	.statfs		= shmem_statfs,
3867 	.show_options	= shmem_show_options,
3868 #endif
3869 	.evict_inode	= shmem_evict_inode,
3870 	.drop_inode	= generic_delete_inode,
3871 	.put_super	= shmem_put_super,
3872 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3873 	.nr_cached_objects	= shmem_unused_huge_count,
3874 	.free_cached_objects	= shmem_unused_huge_scan,
3875 #endif
3876 };
3877 
3878 static const struct vm_operations_struct shmem_vm_ops = {
3879 	.fault		= shmem_fault,
3880 	.map_pages	= filemap_map_pages,
3881 #ifdef CONFIG_NUMA
3882 	.set_policy     = shmem_set_policy,
3883 	.get_policy     = shmem_get_policy,
3884 #endif
3885 };
3886 
3887 int shmem_init_fs_context(struct fs_context *fc)
3888 {
3889 	struct shmem_options *ctx;
3890 
3891 	ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
3892 	if (!ctx)
3893 		return -ENOMEM;
3894 
3895 	ctx->mode = 0777 | S_ISVTX;
3896 	ctx->uid = current_fsuid();
3897 	ctx->gid = current_fsgid();
3898 
3899 	fc->fs_private = ctx;
3900 	fc->ops = &shmem_fs_context_ops;
3901 	return 0;
3902 }
3903 
3904 static struct file_system_type shmem_fs_type = {
3905 	.owner		= THIS_MODULE,
3906 	.name		= "tmpfs",
3907 	.init_fs_context = shmem_init_fs_context,
3908 #ifdef CONFIG_TMPFS
3909 	.parameters	= shmem_fs_parameters,
3910 #endif
3911 	.kill_sb	= kill_litter_super,
3912 	.fs_flags	= FS_USERNS_MOUNT,
3913 };
3914 
3915 int __init shmem_init(void)
3916 {
3917 	int error;
3918 
3919 	shmem_init_inodecache();
3920 
3921 	error = register_filesystem(&shmem_fs_type);
3922 	if (error) {
3923 		pr_err("Could not register tmpfs\n");
3924 		goto out2;
3925 	}
3926 
3927 	shm_mnt = kern_mount(&shmem_fs_type);
3928 	if (IS_ERR(shm_mnt)) {
3929 		error = PTR_ERR(shm_mnt);
3930 		pr_err("Could not kern_mount tmpfs\n");
3931 		goto out1;
3932 	}
3933 
3934 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3935 	if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
3936 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3937 	else
3938 		shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
3939 #endif
3940 	return 0;
3941 
3942 out1:
3943 	unregister_filesystem(&shmem_fs_type);
3944 out2:
3945 	shmem_destroy_inodecache();
3946 	shm_mnt = ERR_PTR(error);
3947 	return error;
3948 }
3949 
3950 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
3951 static ssize_t shmem_enabled_show(struct kobject *kobj,
3952 				  struct kobj_attribute *attr, char *buf)
3953 {
3954 	static const int values[] = {
3955 		SHMEM_HUGE_ALWAYS,
3956 		SHMEM_HUGE_WITHIN_SIZE,
3957 		SHMEM_HUGE_ADVISE,
3958 		SHMEM_HUGE_NEVER,
3959 		SHMEM_HUGE_DENY,
3960 		SHMEM_HUGE_FORCE,
3961 	};
3962 	int len = 0;
3963 	int i;
3964 
3965 	for (i = 0; i < ARRAY_SIZE(values); i++) {
3966 		len += sysfs_emit_at(buf, len,
3967 				     shmem_huge == values[i] ? "%s[%s]" : "%s%s",
3968 				     i ? " " : "",
3969 				     shmem_format_huge(values[i]));
3970 	}
3971 
3972 	len += sysfs_emit_at(buf, len, "\n");
3973 
3974 	return len;
3975 }
3976 
3977 static ssize_t shmem_enabled_store(struct kobject *kobj,
3978 		struct kobj_attribute *attr, const char *buf, size_t count)
3979 {
3980 	char tmp[16];
3981 	int huge;
3982 
3983 	if (count + 1 > sizeof(tmp))
3984 		return -EINVAL;
3985 	memcpy(tmp, buf, count);
3986 	tmp[count] = '\0';
3987 	if (count && tmp[count - 1] == '\n')
3988 		tmp[count - 1] = '\0';
3989 
3990 	huge = shmem_parse_huge(tmp);
3991 	if (huge == -EINVAL)
3992 		return -EINVAL;
3993 	if (!has_transparent_hugepage() &&
3994 			huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
3995 		return -EINVAL;
3996 
3997 	shmem_huge = huge;
3998 	if (shmem_huge > SHMEM_HUGE_DENY)
3999 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
4000 	return count;
4001 }
4002 
4003 struct kobj_attribute shmem_enabled_attr =
4004 	__ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
4005 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
4006 
4007 #else /* !CONFIG_SHMEM */
4008 
4009 /*
4010  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4011  *
4012  * This is intended for small system where the benefits of the full
4013  * shmem code (swap-backed and resource-limited) are outweighed by
4014  * their complexity. On systems without swap this code should be
4015  * effectively equivalent, but much lighter weight.
4016  */
4017 
4018 static struct file_system_type shmem_fs_type = {
4019 	.name		= "tmpfs",
4020 	.init_fs_context = ramfs_init_fs_context,
4021 	.parameters	= ramfs_fs_parameters,
4022 	.kill_sb	= kill_litter_super,
4023 	.fs_flags	= FS_USERNS_MOUNT,
4024 };
4025 
4026 int __init shmem_init(void)
4027 {
4028 	BUG_ON(register_filesystem(&shmem_fs_type) != 0);
4029 
4030 	shm_mnt = kern_mount(&shmem_fs_type);
4031 	BUG_ON(IS_ERR(shm_mnt));
4032 
4033 	return 0;
4034 }
4035 
4036 int shmem_unuse(unsigned int type, bool frontswap,
4037 		unsigned long *fs_pages_to_unuse)
4038 {
4039 	return 0;
4040 }
4041 
4042 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
4043 {
4044 	return 0;
4045 }
4046 
4047 void shmem_unlock_mapping(struct address_space *mapping)
4048 {
4049 }
4050 
4051 #ifdef CONFIG_MMU
4052 unsigned long shmem_get_unmapped_area(struct file *file,
4053 				      unsigned long addr, unsigned long len,
4054 				      unsigned long pgoff, unsigned long flags)
4055 {
4056 	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4057 }
4058 #endif
4059 
4060 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
4061 {
4062 	truncate_inode_pages_range(inode->i_mapping, lstart, lend);
4063 }
4064 EXPORT_SYMBOL_GPL(shmem_truncate_range);
4065 
4066 #define shmem_vm_ops				generic_file_vm_ops
4067 #define shmem_file_operations			ramfs_file_operations
4068 #define shmem_get_inode(sb, dir, mode, dev, flags)	ramfs_get_inode(sb, dir, mode, dev)
4069 #define shmem_acct_size(flags, size)		0
4070 #define shmem_unacct_size(flags, size)		do {} while (0)
4071 
4072 #endif /* CONFIG_SHMEM */
4073 
4074 /* common code */
4075 
4076 static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
4077 				       unsigned long flags, unsigned int i_flags)
4078 {
4079 	struct inode *inode;
4080 	struct file *res;
4081 
4082 	if (IS_ERR(mnt))
4083 		return ERR_CAST(mnt);
4084 
4085 	if (size < 0 || size > MAX_LFS_FILESIZE)
4086 		return ERR_PTR(-EINVAL);
4087 
4088 	if (shmem_acct_size(flags, size))
4089 		return ERR_PTR(-ENOMEM);
4090 
4091 	inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
4092 				flags);
4093 	if (unlikely(!inode)) {
4094 		shmem_unacct_size(flags, size);
4095 		return ERR_PTR(-ENOSPC);
4096 	}
4097 	inode->i_flags |= i_flags;
4098 	inode->i_size = size;
4099 	clear_nlink(inode);	/* It is unlinked */
4100 	res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
4101 	if (!IS_ERR(res))
4102 		res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
4103 				&shmem_file_operations);
4104 	if (IS_ERR(res))
4105 		iput(inode);
4106 	return res;
4107 }
4108 
4109 /**
4110  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4111  * 	kernel internal.  There will be NO LSM permission checks against the
4112  * 	underlying inode.  So users of this interface must do LSM checks at a
4113  *	higher layer.  The users are the big_key and shm implementations.  LSM
4114  *	checks are provided at the key or shm level rather than the inode.
4115  * @name: name for dentry (to be seen in /proc/<pid>/maps
4116  * @size: size to be set for the file
4117  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4118  */
4119 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4120 {
4121 	return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
4122 }
4123 
4124 /**
4125  * shmem_file_setup - get an unlinked file living in tmpfs
4126  * @name: name for dentry (to be seen in /proc/<pid>/maps
4127  * @size: size to be set for the file
4128  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4129  */
4130 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4131 {
4132 	return __shmem_file_setup(shm_mnt, name, size, flags, 0);
4133 }
4134 EXPORT_SYMBOL_GPL(shmem_file_setup);
4135 
4136 /**
4137  * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4138  * @mnt: the tmpfs mount where the file will be created
4139  * @name: name for dentry (to be seen in /proc/<pid>/maps
4140  * @size: size to be set for the file
4141  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4142  */
4143 struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4144 				       loff_t size, unsigned long flags)
4145 {
4146 	return __shmem_file_setup(mnt, name, size, flags, 0);
4147 }
4148 EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4149 
4150 /**
4151  * shmem_zero_setup - setup a shared anonymous mapping
4152  * @vma: the vma to be mmapped is prepared by do_mmap
4153  */
4154 int shmem_zero_setup(struct vm_area_struct *vma)
4155 {
4156 	struct file *file;
4157 	loff_t size = vma->vm_end - vma->vm_start;
4158 
4159 	/*
4160 	 * Cloning a new file under mmap_lock leads to a lock ordering conflict
4161 	 * between XFS directory reading and selinux: since this file is only
4162 	 * accessible to the user through its mapping, use S_PRIVATE flag to
4163 	 * bypass file security, in the same way as shmem_kernel_file_setup().
4164 	 */
4165 	file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
4166 	if (IS_ERR(file))
4167 		return PTR_ERR(file);
4168 
4169 	if (vma->vm_file)
4170 		fput(vma->vm_file);
4171 	vma->vm_file = file;
4172 	vma->vm_ops = &shmem_vm_ops;
4173 
4174 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
4175 			((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
4176 			(vma->vm_end & HPAGE_PMD_MASK)) {
4177 		khugepaged_enter(vma, vma->vm_flags);
4178 	}
4179 
4180 	return 0;
4181 }
4182 
4183 /**
4184  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4185  * @mapping:	the page's address_space
4186  * @index:	the page index
4187  * @gfp:	the page allocator flags to use if allocating
4188  *
4189  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4190  * with any new page allocations done using the specified allocation flags.
4191  * But read_cache_page_gfp() uses the ->readpage() method: which does not
4192  * suit tmpfs, since it may have pages in swapcache, and needs to find those
4193  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4194  *
4195  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
4196  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4197  */
4198 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4199 					 pgoff_t index, gfp_t gfp)
4200 {
4201 #ifdef CONFIG_SHMEM
4202 	struct inode *inode = mapping->host;
4203 	struct page *page;
4204 	int error;
4205 
4206 	BUG_ON(!shmem_mapping(mapping));
4207 	error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
4208 				  gfp, NULL, NULL, NULL);
4209 	if (error)
4210 		return ERR_PTR(error);
4211 
4212 	unlock_page(page);
4213 	if (PageHWPoison(page)) {
4214 		put_page(page);
4215 		return ERR_PTR(-EIO);
4216 	}
4217 
4218 	return page;
4219 #else
4220 	/*
4221 	 * The tiny !SHMEM case uses ramfs without swap
4222 	 */
4223 	return read_cache_page_gfp(mapping, index, gfp);
4224 #endif
4225 }
4226 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
4227