xref: /linux/mm/shmem.c (revision 1b044f1cfc65a7d90b209dfabd57e16d98b58c5b)
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *		 2000 Transmeta Corp.
6  *		 2000-2001 Christoph Rohland
7  *		 2000-2001 SAP AG
8  *		 2002 Red Hat Inc.
9  * Copyright (C) 2002-2011 Hugh Dickins.
10  * Copyright (C) 2011 Google Inc.
11  * Copyright (C) 2002-2005 VERITAS Software Corporation.
12  * Copyright (C) 2004 Andi Kleen, SuSE Labs
13  *
14  * Extended attribute support for tmpfs:
15  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17  *
18  * tiny-shmem:
19  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20  *
21  * This file is released under the GPL.
22  */
23 
24 #include <linux/fs.h>
25 #include <linux/init.h>
26 #include <linux/vfs.h>
27 #include <linux/mount.h>
28 #include <linux/ramfs.h>
29 #include <linux/pagemap.h>
30 #include <linux/file.h>
31 #include <linux/mm.h>
32 #include <linux/sched/signal.h>
33 #include <linux/export.h>
34 #include <linux/swap.h>
35 #include <linux/uio.h>
36 #include <linux/khugepaged.h>
37 
38 #include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */
39 
40 static struct vfsmount *shm_mnt;
41 
42 #ifdef CONFIG_SHMEM
43 /*
44  * This virtual memory filesystem is heavily based on the ramfs. It
45  * extends ramfs by the ability to use swap and honor resource limits
46  * which makes it a completely usable filesystem.
47  */
48 
49 #include <linux/xattr.h>
50 #include <linux/exportfs.h>
51 #include <linux/posix_acl.h>
52 #include <linux/posix_acl_xattr.h>
53 #include <linux/mman.h>
54 #include <linux/string.h>
55 #include <linux/slab.h>
56 #include <linux/backing-dev.h>
57 #include <linux/shmem_fs.h>
58 #include <linux/writeback.h>
59 #include <linux/blkdev.h>
60 #include <linux/pagevec.h>
61 #include <linux/percpu_counter.h>
62 #include <linux/falloc.h>
63 #include <linux/splice.h>
64 #include <linux/security.h>
65 #include <linux/swapops.h>
66 #include <linux/mempolicy.h>
67 #include <linux/namei.h>
68 #include <linux/ctype.h>
69 #include <linux/migrate.h>
70 #include <linux/highmem.h>
71 #include <linux/seq_file.h>
72 #include <linux/magic.h>
73 #include <linux/syscalls.h>
74 #include <linux/fcntl.h>
75 #include <uapi/linux/memfd.h>
76 #include <linux/userfaultfd_k.h>
77 #include <linux/rmap.h>
78 #include <linux/uuid.h>
79 
80 #include <linux/uaccess.h>
81 #include <asm/pgtable.h>
82 
83 #include "internal.h"
84 
85 #define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
86 #define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
87 
88 /* Pretend that each entry is of this size in directory's i_size */
89 #define BOGO_DIRENT_SIZE 20
90 
91 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
92 #define SHORT_SYMLINK_LEN 128
93 
94 /*
95  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
96  * inode->i_private (with i_mutex making sure that it has only one user at
97  * a time): we would prefer not to enlarge the shmem inode just for that.
98  */
99 struct shmem_falloc {
100 	wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
101 	pgoff_t start;		/* start of range currently being fallocated */
102 	pgoff_t next;		/* the next page offset to be fallocated */
103 	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
104 	pgoff_t nr_unswapped;	/* how often writepage refused to swap out */
105 };
106 
107 #ifdef CONFIG_TMPFS
108 static unsigned long shmem_default_max_blocks(void)
109 {
110 	return totalram_pages / 2;
111 }
112 
113 static unsigned long shmem_default_max_inodes(void)
114 {
115 	return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
116 }
117 #endif
118 
119 static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
120 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
121 				struct shmem_inode_info *info, pgoff_t index);
122 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
123 		struct page **pagep, enum sgp_type sgp,
124 		gfp_t gfp, struct vm_area_struct *vma,
125 		struct vm_fault *vmf, int *fault_type);
126 
127 int shmem_getpage(struct inode *inode, pgoff_t index,
128 		struct page **pagep, enum sgp_type sgp)
129 {
130 	return shmem_getpage_gfp(inode, index, pagep, sgp,
131 		mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
132 }
133 
134 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
135 {
136 	return sb->s_fs_info;
137 }
138 
139 /*
140  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
141  * for shared memory and for shared anonymous (/dev/zero) mappings
142  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
143  * consistent with the pre-accounting of private mappings ...
144  */
145 static inline int shmem_acct_size(unsigned long flags, loff_t size)
146 {
147 	return (flags & VM_NORESERVE) ?
148 		0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
149 }
150 
151 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
152 {
153 	if (!(flags & VM_NORESERVE))
154 		vm_unacct_memory(VM_ACCT(size));
155 }
156 
157 static inline int shmem_reacct_size(unsigned long flags,
158 		loff_t oldsize, loff_t newsize)
159 {
160 	if (!(flags & VM_NORESERVE)) {
161 		if (VM_ACCT(newsize) > VM_ACCT(oldsize))
162 			return security_vm_enough_memory_mm(current->mm,
163 					VM_ACCT(newsize) - VM_ACCT(oldsize));
164 		else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
165 			vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
166 	}
167 	return 0;
168 }
169 
170 /*
171  * ... whereas tmpfs objects are accounted incrementally as
172  * pages are allocated, in order to allow large sparse files.
173  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
174  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
175  */
176 static inline int shmem_acct_block(unsigned long flags, long pages)
177 {
178 	if (!(flags & VM_NORESERVE))
179 		return 0;
180 
181 	return security_vm_enough_memory_mm(current->mm,
182 			pages * VM_ACCT(PAGE_SIZE));
183 }
184 
185 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
186 {
187 	if (flags & VM_NORESERVE)
188 		vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
189 }
190 
191 static const struct super_operations shmem_ops;
192 static const struct address_space_operations shmem_aops;
193 static const struct file_operations shmem_file_operations;
194 static const struct inode_operations shmem_inode_operations;
195 static const struct inode_operations shmem_dir_inode_operations;
196 static const struct inode_operations shmem_special_inode_operations;
197 static const struct vm_operations_struct shmem_vm_ops;
198 static struct file_system_type shmem_fs_type;
199 
200 bool vma_is_shmem(struct vm_area_struct *vma)
201 {
202 	return vma->vm_ops == &shmem_vm_ops;
203 }
204 
205 static LIST_HEAD(shmem_swaplist);
206 static DEFINE_MUTEX(shmem_swaplist_mutex);
207 
208 static int shmem_reserve_inode(struct super_block *sb)
209 {
210 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
211 	if (sbinfo->max_inodes) {
212 		spin_lock(&sbinfo->stat_lock);
213 		if (!sbinfo->free_inodes) {
214 			spin_unlock(&sbinfo->stat_lock);
215 			return -ENOSPC;
216 		}
217 		sbinfo->free_inodes--;
218 		spin_unlock(&sbinfo->stat_lock);
219 	}
220 	return 0;
221 }
222 
223 static void shmem_free_inode(struct super_block *sb)
224 {
225 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
226 	if (sbinfo->max_inodes) {
227 		spin_lock(&sbinfo->stat_lock);
228 		sbinfo->free_inodes++;
229 		spin_unlock(&sbinfo->stat_lock);
230 	}
231 }
232 
233 /**
234  * shmem_recalc_inode - recalculate the block usage of an inode
235  * @inode: inode to recalc
236  *
237  * We have to calculate the free blocks since the mm can drop
238  * undirtied hole pages behind our back.
239  *
240  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
241  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
242  *
243  * It has to be called with the spinlock held.
244  */
245 static void shmem_recalc_inode(struct inode *inode)
246 {
247 	struct shmem_inode_info *info = SHMEM_I(inode);
248 	long freed;
249 
250 	freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
251 	if (freed > 0) {
252 		struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
253 		if (sbinfo->max_blocks)
254 			percpu_counter_add(&sbinfo->used_blocks, -freed);
255 		info->alloced -= freed;
256 		inode->i_blocks -= freed * BLOCKS_PER_PAGE;
257 		shmem_unacct_blocks(info->flags, freed);
258 	}
259 }
260 
261 bool shmem_charge(struct inode *inode, long pages)
262 {
263 	struct shmem_inode_info *info = SHMEM_I(inode);
264 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
265 	unsigned long flags;
266 
267 	if (shmem_acct_block(info->flags, pages))
268 		return false;
269 	spin_lock_irqsave(&info->lock, flags);
270 	info->alloced += pages;
271 	inode->i_blocks += pages * BLOCKS_PER_PAGE;
272 	shmem_recalc_inode(inode);
273 	spin_unlock_irqrestore(&info->lock, flags);
274 	inode->i_mapping->nrpages += pages;
275 
276 	if (!sbinfo->max_blocks)
277 		return true;
278 	if (percpu_counter_compare(&sbinfo->used_blocks,
279 				sbinfo->max_blocks - pages) > 0) {
280 		inode->i_mapping->nrpages -= pages;
281 		spin_lock_irqsave(&info->lock, flags);
282 		info->alloced -= pages;
283 		shmem_recalc_inode(inode);
284 		spin_unlock_irqrestore(&info->lock, flags);
285 		shmem_unacct_blocks(info->flags, pages);
286 		return false;
287 	}
288 	percpu_counter_add(&sbinfo->used_blocks, pages);
289 	return true;
290 }
291 
292 void shmem_uncharge(struct inode *inode, long pages)
293 {
294 	struct shmem_inode_info *info = SHMEM_I(inode);
295 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
296 	unsigned long flags;
297 
298 	spin_lock_irqsave(&info->lock, flags);
299 	info->alloced -= pages;
300 	inode->i_blocks -= pages * BLOCKS_PER_PAGE;
301 	shmem_recalc_inode(inode);
302 	spin_unlock_irqrestore(&info->lock, flags);
303 
304 	if (sbinfo->max_blocks)
305 		percpu_counter_sub(&sbinfo->used_blocks, pages);
306 	shmem_unacct_blocks(info->flags, pages);
307 }
308 
309 /*
310  * Replace item expected in radix tree by a new item, while holding tree lock.
311  */
312 static int shmem_radix_tree_replace(struct address_space *mapping,
313 			pgoff_t index, void *expected, void *replacement)
314 {
315 	struct radix_tree_node *node;
316 	void **pslot;
317 	void *item;
318 
319 	VM_BUG_ON(!expected);
320 	VM_BUG_ON(!replacement);
321 	item = __radix_tree_lookup(&mapping->page_tree, index, &node, &pslot);
322 	if (!item)
323 		return -ENOENT;
324 	if (item != expected)
325 		return -ENOENT;
326 	__radix_tree_replace(&mapping->page_tree, node, pslot,
327 			     replacement, NULL, NULL);
328 	return 0;
329 }
330 
331 /*
332  * Sometimes, before we decide whether to proceed or to fail, we must check
333  * that an entry was not already brought back from swap by a racing thread.
334  *
335  * Checking page is not enough: by the time a SwapCache page is locked, it
336  * might be reused, and again be SwapCache, using the same swap as before.
337  */
338 static bool shmem_confirm_swap(struct address_space *mapping,
339 			       pgoff_t index, swp_entry_t swap)
340 {
341 	void *item;
342 
343 	rcu_read_lock();
344 	item = radix_tree_lookup(&mapping->page_tree, index);
345 	rcu_read_unlock();
346 	return item == swp_to_radix_entry(swap);
347 }
348 
349 /*
350  * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
351  *
352  * SHMEM_HUGE_NEVER:
353  *	disables huge pages for the mount;
354  * SHMEM_HUGE_ALWAYS:
355  *	enables huge pages for the mount;
356  * SHMEM_HUGE_WITHIN_SIZE:
357  *	only allocate huge pages if the page will be fully within i_size,
358  *	also respect fadvise()/madvise() hints;
359  * SHMEM_HUGE_ADVISE:
360  *	only allocate huge pages if requested with fadvise()/madvise();
361  */
362 
363 #define SHMEM_HUGE_NEVER	0
364 #define SHMEM_HUGE_ALWAYS	1
365 #define SHMEM_HUGE_WITHIN_SIZE	2
366 #define SHMEM_HUGE_ADVISE	3
367 
368 /*
369  * Special values.
370  * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
371  *
372  * SHMEM_HUGE_DENY:
373  *	disables huge on shm_mnt and all mounts, for emergency use;
374  * SHMEM_HUGE_FORCE:
375  *	enables huge on shm_mnt and all mounts, w/o needing option, for testing;
376  *
377  */
378 #define SHMEM_HUGE_DENY		(-1)
379 #define SHMEM_HUGE_FORCE	(-2)
380 
381 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
382 /* ifdef here to avoid bloating shmem.o when not necessary */
383 
384 int shmem_huge __read_mostly;
385 
386 #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
387 static int shmem_parse_huge(const char *str)
388 {
389 	if (!strcmp(str, "never"))
390 		return SHMEM_HUGE_NEVER;
391 	if (!strcmp(str, "always"))
392 		return SHMEM_HUGE_ALWAYS;
393 	if (!strcmp(str, "within_size"))
394 		return SHMEM_HUGE_WITHIN_SIZE;
395 	if (!strcmp(str, "advise"))
396 		return SHMEM_HUGE_ADVISE;
397 	if (!strcmp(str, "deny"))
398 		return SHMEM_HUGE_DENY;
399 	if (!strcmp(str, "force"))
400 		return SHMEM_HUGE_FORCE;
401 	return -EINVAL;
402 }
403 
404 static const char *shmem_format_huge(int huge)
405 {
406 	switch (huge) {
407 	case SHMEM_HUGE_NEVER:
408 		return "never";
409 	case SHMEM_HUGE_ALWAYS:
410 		return "always";
411 	case SHMEM_HUGE_WITHIN_SIZE:
412 		return "within_size";
413 	case SHMEM_HUGE_ADVISE:
414 		return "advise";
415 	case SHMEM_HUGE_DENY:
416 		return "deny";
417 	case SHMEM_HUGE_FORCE:
418 		return "force";
419 	default:
420 		VM_BUG_ON(1);
421 		return "bad_val";
422 	}
423 }
424 #endif
425 
426 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
427 		struct shrink_control *sc, unsigned long nr_to_split)
428 {
429 	LIST_HEAD(list), *pos, *next;
430 	LIST_HEAD(to_remove);
431 	struct inode *inode;
432 	struct shmem_inode_info *info;
433 	struct page *page;
434 	unsigned long batch = sc ? sc->nr_to_scan : 128;
435 	int removed = 0, split = 0;
436 
437 	if (list_empty(&sbinfo->shrinklist))
438 		return SHRINK_STOP;
439 
440 	spin_lock(&sbinfo->shrinklist_lock);
441 	list_for_each_safe(pos, next, &sbinfo->shrinklist) {
442 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
443 
444 		/* pin the inode */
445 		inode = igrab(&info->vfs_inode);
446 
447 		/* inode is about to be evicted */
448 		if (!inode) {
449 			list_del_init(&info->shrinklist);
450 			removed++;
451 			goto next;
452 		}
453 
454 		/* Check if there's anything to gain */
455 		if (round_up(inode->i_size, PAGE_SIZE) ==
456 				round_up(inode->i_size, HPAGE_PMD_SIZE)) {
457 			list_move(&info->shrinklist, &to_remove);
458 			removed++;
459 			goto next;
460 		}
461 
462 		list_move(&info->shrinklist, &list);
463 next:
464 		if (!--batch)
465 			break;
466 	}
467 	spin_unlock(&sbinfo->shrinklist_lock);
468 
469 	list_for_each_safe(pos, next, &to_remove) {
470 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
471 		inode = &info->vfs_inode;
472 		list_del_init(&info->shrinklist);
473 		iput(inode);
474 	}
475 
476 	list_for_each_safe(pos, next, &list) {
477 		int ret;
478 
479 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
480 		inode = &info->vfs_inode;
481 
482 		if (nr_to_split && split >= nr_to_split) {
483 			iput(inode);
484 			continue;
485 		}
486 
487 		page = find_lock_page(inode->i_mapping,
488 				(inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
489 		if (!page)
490 			goto drop;
491 
492 		if (!PageTransHuge(page)) {
493 			unlock_page(page);
494 			put_page(page);
495 			goto drop;
496 		}
497 
498 		ret = split_huge_page(page);
499 		unlock_page(page);
500 		put_page(page);
501 
502 		if (ret) {
503 			/* split failed: leave it on the list */
504 			iput(inode);
505 			continue;
506 		}
507 
508 		split++;
509 drop:
510 		list_del_init(&info->shrinklist);
511 		removed++;
512 		iput(inode);
513 	}
514 
515 	spin_lock(&sbinfo->shrinklist_lock);
516 	list_splice_tail(&list, &sbinfo->shrinklist);
517 	sbinfo->shrinklist_len -= removed;
518 	spin_unlock(&sbinfo->shrinklist_lock);
519 
520 	return split;
521 }
522 
523 static long shmem_unused_huge_scan(struct super_block *sb,
524 		struct shrink_control *sc)
525 {
526 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
527 
528 	if (!READ_ONCE(sbinfo->shrinklist_len))
529 		return SHRINK_STOP;
530 
531 	return shmem_unused_huge_shrink(sbinfo, sc, 0);
532 }
533 
534 static long shmem_unused_huge_count(struct super_block *sb,
535 		struct shrink_control *sc)
536 {
537 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
538 	return READ_ONCE(sbinfo->shrinklist_len);
539 }
540 #else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */
541 
542 #define shmem_huge SHMEM_HUGE_DENY
543 
544 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
545 		struct shrink_control *sc, unsigned long nr_to_split)
546 {
547 	return 0;
548 }
549 #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
550 
551 /*
552  * Like add_to_page_cache_locked, but error if expected item has gone.
553  */
554 static int shmem_add_to_page_cache(struct page *page,
555 				   struct address_space *mapping,
556 				   pgoff_t index, void *expected)
557 {
558 	int error, nr = hpage_nr_pages(page);
559 
560 	VM_BUG_ON_PAGE(PageTail(page), page);
561 	VM_BUG_ON_PAGE(index != round_down(index, nr), page);
562 	VM_BUG_ON_PAGE(!PageLocked(page), page);
563 	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
564 	VM_BUG_ON(expected && PageTransHuge(page));
565 
566 	page_ref_add(page, nr);
567 	page->mapping = mapping;
568 	page->index = index;
569 
570 	spin_lock_irq(&mapping->tree_lock);
571 	if (PageTransHuge(page)) {
572 		void __rcu **results;
573 		pgoff_t idx;
574 		int i;
575 
576 		error = 0;
577 		if (radix_tree_gang_lookup_slot(&mapping->page_tree,
578 					&results, &idx, index, 1) &&
579 				idx < index + HPAGE_PMD_NR) {
580 			error = -EEXIST;
581 		}
582 
583 		if (!error) {
584 			for (i = 0; i < HPAGE_PMD_NR; i++) {
585 				error = radix_tree_insert(&mapping->page_tree,
586 						index + i, page + i);
587 				VM_BUG_ON(error);
588 			}
589 			count_vm_event(THP_FILE_ALLOC);
590 		}
591 	} else if (!expected) {
592 		error = radix_tree_insert(&mapping->page_tree, index, page);
593 	} else {
594 		error = shmem_radix_tree_replace(mapping, index, expected,
595 								 page);
596 	}
597 
598 	if (!error) {
599 		mapping->nrpages += nr;
600 		if (PageTransHuge(page))
601 			__inc_node_page_state(page, NR_SHMEM_THPS);
602 		__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
603 		__mod_node_page_state(page_pgdat(page), NR_SHMEM, nr);
604 		spin_unlock_irq(&mapping->tree_lock);
605 	} else {
606 		page->mapping = NULL;
607 		spin_unlock_irq(&mapping->tree_lock);
608 		page_ref_sub(page, nr);
609 	}
610 	return error;
611 }
612 
613 /*
614  * Like delete_from_page_cache, but substitutes swap for page.
615  */
616 static void shmem_delete_from_page_cache(struct page *page, void *radswap)
617 {
618 	struct address_space *mapping = page->mapping;
619 	int error;
620 
621 	VM_BUG_ON_PAGE(PageCompound(page), page);
622 
623 	spin_lock_irq(&mapping->tree_lock);
624 	error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
625 	page->mapping = NULL;
626 	mapping->nrpages--;
627 	__dec_node_page_state(page, NR_FILE_PAGES);
628 	__dec_node_page_state(page, NR_SHMEM);
629 	spin_unlock_irq(&mapping->tree_lock);
630 	put_page(page);
631 	BUG_ON(error);
632 }
633 
634 /*
635  * Remove swap entry from radix tree, free the swap and its page cache.
636  */
637 static int shmem_free_swap(struct address_space *mapping,
638 			   pgoff_t index, void *radswap)
639 {
640 	void *old;
641 
642 	spin_lock_irq(&mapping->tree_lock);
643 	old = radix_tree_delete_item(&mapping->page_tree, index, radswap);
644 	spin_unlock_irq(&mapping->tree_lock);
645 	if (old != radswap)
646 		return -ENOENT;
647 	free_swap_and_cache(radix_to_swp_entry(radswap));
648 	return 0;
649 }
650 
651 /*
652  * Determine (in bytes) how many of the shmem object's pages mapped by the
653  * given offsets are swapped out.
654  *
655  * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU,
656  * as long as the inode doesn't go away and racy results are not a problem.
657  */
658 unsigned long shmem_partial_swap_usage(struct address_space *mapping,
659 						pgoff_t start, pgoff_t end)
660 {
661 	struct radix_tree_iter iter;
662 	void **slot;
663 	struct page *page;
664 	unsigned long swapped = 0;
665 
666 	rcu_read_lock();
667 
668 	radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
669 		if (iter.index >= end)
670 			break;
671 
672 		page = radix_tree_deref_slot(slot);
673 
674 		if (radix_tree_deref_retry(page)) {
675 			slot = radix_tree_iter_retry(&iter);
676 			continue;
677 		}
678 
679 		if (radix_tree_exceptional_entry(page))
680 			swapped++;
681 
682 		if (need_resched()) {
683 			slot = radix_tree_iter_resume(slot, &iter);
684 			cond_resched_rcu();
685 		}
686 	}
687 
688 	rcu_read_unlock();
689 
690 	return swapped << PAGE_SHIFT;
691 }
692 
693 /*
694  * Determine (in bytes) how many of the shmem object's pages mapped by the
695  * given vma is swapped out.
696  *
697  * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU,
698  * as long as the inode doesn't go away and racy results are not a problem.
699  */
700 unsigned long shmem_swap_usage(struct vm_area_struct *vma)
701 {
702 	struct inode *inode = file_inode(vma->vm_file);
703 	struct shmem_inode_info *info = SHMEM_I(inode);
704 	struct address_space *mapping = inode->i_mapping;
705 	unsigned long swapped;
706 
707 	/* Be careful as we don't hold info->lock */
708 	swapped = READ_ONCE(info->swapped);
709 
710 	/*
711 	 * The easier cases are when the shmem object has nothing in swap, or
712 	 * the vma maps it whole. Then we can simply use the stats that we
713 	 * already track.
714 	 */
715 	if (!swapped)
716 		return 0;
717 
718 	if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
719 		return swapped << PAGE_SHIFT;
720 
721 	/* Here comes the more involved part */
722 	return shmem_partial_swap_usage(mapping,
723 			linear_page_index(vma, vma->vm_start),
724 			linear_page_index(vma, vma->vm_end));
725 }
726 
727 /*
728  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
729  */
730 void shmem_unlock_mapping(struct address_space *mapping)
731 {
732 	struct pagevec pvec;
733 	pgoff_t indices[PAGEVEC_SIZE];
734 	pgoff_t index = 0;
735 
736 	pagevec_init(&pvec, 0);
737 	/*
738 	 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
739 	 */
740 	while (!mapping_unevictable(mapping)) {
741 		/*
742 		 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
743 		 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
744 		 */
745 		pvec.nr = find_get_entries(mapping, index,
746 					   PAGEVEC_SIZE, pvec.pages, indices);
747 		if (!pvec.nr)
748 			break;
749 		index = indices[pvec.nr - 1] + 1;
750 		pagevec_remove_exceptionals(&pvec);
751 		check_move_unevictable_pages(pvec.pages, pvec.nr);
752 		pagevec_release(&pvec);
753 		cond_resched();
754 	}
755 }
756 
757 /*
758  * Remove range of pages and swap entries from radix tree, and free them.
759  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
760  */
761 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
762 								 bool unfalloc)
763 {
764 	struct address_space *mapping = inode->i_mapping;
765 	struct shmem_inode_info *info = SHMEM_I(inode);
766 	pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
767 	pgoff_t end = (lend + 1) >> PAGE_SHIFT;
768 	unsigned int partial_start = lstart & (PAGE_SIZE - 1);
769 	unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
770 	struct pagevec pvec;
771 	pgoff_t indices[PAGEVEC_SIZE];
772 	long nr_swaps_freed = 0;
773 	pgoff_t index;
774 	int i;
775 
776 	if (lend == -1)
777 		end = -1;	/* unsigned, so actually very big */
778 
779 	pagevec_init(&pvec, 0);
780 	index = start;
781 	while (index < end) {
782 		pvec.nr = find_get_entries(mapping, index,
783 			min(end - index, (pgoff_t)PAGEVEC_SIZE),
784 			pvec.pages, indices);
785 		if (!pvec.nr)
786 			break;
787 		for (i = 0; i < pagevec_count(&pvec); i++) {
788 			struct page *page = pvec.pages[i];
789 
790 			index = indices[i];
791 			if (index >= end)
792 				break;
793 
794 			if (radix_tree_exceptional_entry(page)) {
795 				if (unfalloc)
796 					continue;
797 				nr_swaps_freed += !shmem_free_swap(mapping,
798 								index, page);
799 				continue;
800 			}
801 
802 			VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page);
803 
804 			if (!trylock_page(page))
805 				continue;
806 
807 			if (PageTransTail(page)) {
808 				/* Middle of THP: zero out the page */
809 				clear_highpage(page);
810 				unlock_page(page);
811 				continue;
812 			} else if (PageTransHuge(page)) {
813 				if (index == round_down(end, HPAGE_PMD_NR)) {
814 					/*
815 					 * Range ends in the middle of THP:
816 					 * zero out the page
817 					 */
818 					clear_highpage(page);
819 					unlock_page(page);
820 					continue;
821 				}
822 				index += HPAGE_PMD_NR - 1;
823 				i += HPAGE_PMD_NR - 1;
824 			}
825 
826 			if (!unfalloc || !PageUptodate(page)) {
827 				VM_BUG_ON_PAGE(PageTail(page), page);
828 				if (page_mapping(page) == mapping) {
829 					VM_BUG_ON_PAGE(PageWriteback(page), page);
830 					truncate_inode_page(mapping, page);
831 				}
832 			}
833 			unlock_page(page);
834 		}
835 		pagevec_remove_exceptionals(&pvec);
836 		pagevec_release(&pvec);
837 		cond_resched();
838 		index++;
839 	}
840 
841 	if (partial_start) {
842 		struct page *page = NULL;
843 		shmem_getpage(inode, start - 1, &page, SGP_READ);
844 		if (page) {
845 			unsigned int top = PAGE_SIZE;
846 			if (start > end) {
847 				top = partial_end;
848 				partial_end = 0;
849 			}
850 			zero_user_segment(page, partial_start, top);
851 			set_page_dirty(page);
852 			unlock_page(page);
853 			put_page(page);
854 		}
855 	}
856 	if (partial_end) {
857 		struct page *page = NULL;
858 		shmem_getpage(inode, end, &page, SGP_READ);
859 		if (page) {
860 			zero_user_segment(page, 0, partial_end);
861 			set_page_dirty(page);
862 			unlock_page(page);
863 			put_page(page);
864 		}
865 	}
866 	if (start >= end)
867 		return;
868 
869 	index = start;
870 	while (index < end) {
871 		cond_resched();
872 
873 		pvec.nr = find_get_entries(mapping, index,
874 				min(end - index, (pgoff_t)PAGEVEC_SIZE),
875 				pvec.pages, indices);
876 		if (!pvec.nr) {
877 			/* If all gone or hole-punch or unfalloc, we're done */
878 			if (index == start || end != -1)
879 				break;
880 			/* But if truncating, restart to make sure all gone */
881 			index = start;
882 			continue;
883 		}
884 		for (i = 0; i < pagevec_count(&pvec); i++) {
885 			struct page *page = pvec.pages[i];
886 
887 			index = indices[i];
888 			if (index >= end)
889 				break;
890 
891 			if (radix_tree_exceptional_entry(page)) {
892 				if (unfalloc)
893 					continue;
894 				if (shmem_free_swap(mapping, index, page)) {
895 					/* Swap was replaced by page: retry */
896 					index--;
897 					break;
898 				}
899 				nr_swaps_freed++;
900 				continue;
901 			}
902 
903 			lock_page(page);
904 
905 			if (PageTransTail(page)) {
906 				/* Middle of THP: zero out the page */
907 				clear_highpage(page);
908 				unlock_page(page);
909 				/*
910 				 * Partial thp truncate due 'start' in middle
911 				 * of THP: don't need to look on these pages
912 				 * again on !pvec.nr restart.
913 				 */
914 				if (index != round_down(end, HPAGE_PMD_NR))
915 					start++;
916 				continue;
917 			} else if (PageTransHuge(page)) {
918 				if (index == round_down(end, HPAGE_PMD_NR)) {
919 					/*
920 					 * Range ends in the middle of THP:
921 					 * zero out the page
922 					 */
923 					clear_highpage(page);
924 					unlock_page(page);
925 					continue;
926 				}
927 				index += HPAGE_PMD_NR - 1;
928 				i += HPAGE_PMD_NR - 1;
929 			}
930 
931 			if (!unfalloc || !PageUptodate(page)) {
932 				VM_BUG_ON_PAGE(PageTail(page), page);
933 				if (page_mapping(page) == mapping) {
934 					VM_BUG_ON_PAGE(PageWriteback(page), page);
935 					truncate_inode_page(mapping, page);
936 				} else {
937 					/* Page was replaced by swap: retry */
938 					unlock_page(page);
939 					index--;
940 					break;
941 				}
942 			}
943 			unlock_page(page);
944 		}
945 		pagevec_remove_exceptionals(&pvec);
946 		pagevec_release(&pvec);
947 		index++;
948 	}
949 
950 	spin_lock_irq(&info->lock);
951 	info->swapped -= nr_swaps_freed;
952 	shmem_recalc_inode(inode);
953 	spin_unlock_irq(&info->lock);
954 }
955 
956 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
957 {
958 	shmem_undo_range(inode, lstart, lend, false);
959 	inode->i_ctime = inode->i_mtime = current_time(inode);
960 }
961 EXPORT_SYMBOL_GPL(shmem_truncate_range);
962 
963 static int shmem_getattr(const struct path *path, struct kstat *stat,
964 			 u32 request_mask, unsigned int query_flags)
965 {
966 	struct inode *inode = path->dentry->d_inode;
967 	struct shmem_inode_info *info = SHMEM_I(inode);
968 
969 	if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
970 		spin_lock_irq(&info->lock);
971 		shmem_recalc_inode(inode);
972 		spin_unlock_irq(&info->lock);
973 	}
974 	generic_fillattr(inode, stat);
975 	return 0;
976 }
977 
978 static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
979 {
980 	struct inode *inode = d_inode(dentry);
981 	struct shmem_inode_info *info = SHMEM_I(inode);
982 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
983 	int error;
984 
985 	error = setattr_prepare(dentry, attr);
986 	if (error)
987 		return error;
988 
989 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
990 		loff_t oldsize = inode->i_size;
991 		loff_t newsize = attr->ia_size;
992 
993 		/* protected by i_mutex */
994 		if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
995 		    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
996 			return -EPERM;
997 
998 		if (newsize != oldsize) {
999 			error = shmem_reacct_size(SHMEM_I(inode)->flags,
1000 					oldsize, newsize);
1001 			if (error)
1002 				return error;
1003 			i_size_write(inode, newsize);
1004 			inode->i_ctime = inode->i_mtime = current_time(inode);
1005 		}
1006 		if (newsize <= oldsize) {
1007 			loff_t holebegin = round_up(newsize, PAGE_SIZE);
1008 			if (oldsize > holebegin)
1009 				unmap_mapping_range(inode->i_mapping,
1010 							holebegin, 0, 1);
1011 			if (info->alloced)
1012 				shmem_truncate_range(inode,
1013 							newsize, (loff_t)-1);
1014 			/* unmap again to remove racily COWed private pages */
1015 			if (oldsize > holebegin)
1016 				unmap_mapping_range(inode->i_mapping,
1017 							holebegin, 0, 1);
1018 
1019 			/*
1020 			 * Part of the huge page can be beyond i_size: subject
1021 			 * to shrink under memory pressure.
1022 			 */
1023 			if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
1024 				spin_lock(&sbinfo->shrinklist_lock);
1025 				if (list_empty(&info->shrinklist)) {
1026 					list_add_tail(&info->shrinklist,
1027 							&sbinfo->shrinklist);
1028 					sbinfo->shrinklist_len++;
1029 				}
1030 				spin_unlock(&sbinfo->shrinklist_lock);
1031 			}
1032 		}
1033 	}
1034 
1035 	setattr_copy(inode, attr);
1036 	if (attr->ia_valid & ATTR_MODE)
1037 		error = posix_acl_chmod(inode, inode->i_mode);
1038 	return error;
1039 }
1040 
1041 static void shmem_evict_inode(struct inode *inode)
1042 {
1043 	struct shmem_inode_info *info = SHMEM_I(inode);
1044 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1045 
1046 	if (inode->i_mapping->a_ops == &shmem_aops) {
1047 		shmem_unacct_size(info->flags, inode->i_size);
1048 		inode->i_size = 0;
1049 		shmem_truncate_range(inode, 0, (loff_t)-1);
1050 		if (!list_empty(&info->shrinklist)) {
1051 			spin_lock(&sbinfo->shrinklist_lock);
1052 			if (!list_empty(&info->shrinklist)) {
1053 				list_del_init(&info->shrinklist);
1054 				sbinfo->shrinklist_len--;
1055 			}
1056 			spin_unlock(&sbinfo->shrinklist_lock);
1057 		}
1058 		if (!list_empty(&info->swaplist)) {
1059 			mutex_lock(&shmem_swaplist_mutex);
1060 			list_del_init(&info->swaplist);
1061 			mutex_unlock(&shmem_swaplist_mutex);
1062 		}
1063 	}
1064 
1065 	simple_xattrs_free(&info->xattrs);
1066 	WARN_ON(inode->i_blocks);
1067 	shmem_free_inode(inode->i_sb);
1068 	clear_inode(inode);
1069 }
1070 
1071 static unsigned long find_swap_entry(struct radix_tree_root *root, void *item)
1072 {
1073 	struct radix_tree_iter iter;
1074 	void **slot;
1075 	unsigned long found = -1;
1076 	unsigned int checked = 0;
1077 
1078 	rcu_read_lock();
1079 	radix_tree_for_each_slot(slot, root, &iter, 0) {
1080 		if (*slot == item) {
1081 			found = iter.index;
1082 			break;
1083 		}
1084 		checked++;
1085 		if ((checked % 4096) != 0)
1086 			continue;
1087 		slot = radix_tree_iter_resume(slot, &iter);
1088 		cond_resched_rcu();
1089 	}
1090 
1091 	rcu_read_unlock();
1092 	return found;
1093 }
1094 
1095 /*
1096  * If swap found in inode, free it and move page from swapcache to filecache.
1097  */
1098 static int shmem_unuse_inode(struct shmem_inode_info *info,
1099 			     swp_entry_t swap, struct page **pagep)
1100 {
1101 	struct address_space *mapping = info->vfs_inode.i_mapping;
1102 	void *radswap;
1103 	pgoff_t index;
1104 	gfp_t gfp;
1105 	int error = 0;
1106 
1107 	radswap = swp_to_radix_entry(swap);
1108 	index = find_swap_entry(&mapping->page_tree, radswap);
1109 	if (index == -1)
1110 		return -EAGAIN;	/* tell shmem_unuse we found nothing */
1111 
1112 	/*
1113 	 * Move _head_ to start search for next from here.
1114 	 * But be careful: shmem_evict_inode checks list_empty without taking
1115 	 * mutex, and there's an instant in list_move_tail when info->swaplist
1116 	 * would appear empty, if it were the only one on shmem_swaplist.
1117 	 */
1118 	if (shmem_swaplist.next != &info->swaplist)
1119 		list_move_tail(&shmem_swaplist, &info->swaplist);
1120 
1121 	gfp = mapping_gfp_mask(mapping);
1122 	if (shmem_should_replace_page(*pagep, gfp)) {
1123 		mutex_unlock(&shmem_swaplist_mutex);
1124 		error = shmem_replace_page(pagep, gfp, info, index);
1125 		mutex_lock(&shmem_swaplist_mutex);
1126 		/*
1127 		 * We needed to drop mutex to make that restrictive page
1128 		 * allocation, but the inode might have been freed while we
1129 		 * dropped it: although a racing shmem_evict_inode() cannot
1130 		 * complete without emptying the radix_tree, our page lock
1131 		 * on this swapcache page is not enough to prevent that -
1132 		 * free_swap_and_cache() of our swap entry will only
1133 		 * trylock_page(), removing swap from radix_tree whatever.
1134 		 *
1135 		 * We must not proceed to shmem_add_to_page_cache() if the
1136 		 * inode has been freed, but of course we cannot rely on
1137 		 * inode or mapping or info to check that.  However, we can
1138 		 * safely check if our swap entry is still in use (and here
1139 		 * it can't have got reused for another page): if it's still
1140 		 * in use, then the inode cannot have been freed yet, and we
1141 		 * can safely proceed (if it's no longer in use, that tells
1142 		 * nothing about the inode, but we don't need to unuse swap).
1143 		 */
1144 		if (!page_swapcount(*pagep))
1145 			error = -ENOENT;
1146 	}
1147 
1148 	/*
1149 	 * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
1150 	 * but also to hold up shmem_evict_inode(): so inode cannot be freed
1151 	 * beneath us (pagelock doesn't help until the page is in pagecache).
1152 	 */
1153 	if (!error)
1154 		error = shmem_add_to_page_cache(*pagep, mapping, index,
1155 						radswap);
1156 	if (error != -ENOMEM) {
1157 		/*
1158 		 * Truncation and eviction use free_swap_and_cache(), which
1159 		 * only does trylock page: if we raced, best clean up here.
1160 		 */
1161 		delete_from_swap_cache(*pagep);
1162 		set_page_dirty(*pagep);
1163 		if (!error) {
1164 			spin_lock_irq(&info->lock);
1165 			info->swapped--;
1166 			spin_unlock_irq(&info->lock);
1167 			swap_free(swap);
1168 		}
1169 	}
1170 	return error;
1171 }
1172 
1173 /*
1174  * Search through swapped inodes to find and replace swap by page.
1175  */
1176 int shmem_unuse(swp_entry_t swap, struct page *page)
1177 {
1178 	struct list_head *this, *next;
1179 	struct shmem_inode_info *info;
1180 	struct mem_cgroup *memcg;
1181 	int error = 0;
1182 
1183 	/*
1184 	 * There's a faint possibility that swap page was replaced before
1185 	 * caller locked it: caller will come back later with the right page.
1186 	 */
1187 	if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val))
1188 		goto out;
1189 
1190 	/*
1191 	 * Charge page using GFP_KERNEL while we can wait, before taking
1192 	 * the shmem_swaplist_mutex which might hold up shmem_writepage().
1193 	 * Charged back to the user (not to caller) when swap account is used.
1194 	 */
1195 	error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg,
1196 			false);
1197 	if (error)
1198 		goto out;
1199 	/* No radix_tree_preload: swap entry keeps a place for page in tree */
1200 	error = -EAGAIN;
1201 
1202 	mutex_lock(&shmem_swaplist_mutex);
1203 	list_for_each_safe(this, next, &shmem_swaplist) {
1204 		info = list_entry(this, struct shmem_inode_info, swaplist);
1205 		if (info->swapped)
1206 			error = shmem_unuse_inode(info, swap, &page);
1207 		else
1208 			list_del_init(&info->swaplist);
1209 		cond_resched();
1210 		if (error != -EAGAIN)
1211 			break;
1212 		/* found nothing in this: move on to search the next */
1213 	}
1214 	mutex_unlock(&shmem_swaplist_mutex);
1215 
1216 	if (error) {
1217 		if (error != -ENOMEM)
1218 			error = 0;
1219 		mem_cgroup_cancel_charge(page, memcg, false);
1220 	} else
1221 		mem_cgroup_commit_charge(page, memcg, true, false);
1222 out:
1223 	unlock_page(page);
1224 	put_page(page);
1225 	return error;
1226 }
1227 
1228 /*
1229  * Move the page from the page cache to the swap cache.
1230  */
1231 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1232 {
1233 	struct shmem_inode_info *info;
1234 	struct address_space *mapping;
1235 	struct inode *inode;
1236 	swp_entry_t swap;
1237 	pgoff_t index;
1238 
1239 	VM_BUG_ON_PAGE(PageCompound(page), page);
1240 	BUG_ON(!PageLocked(page));
1241 	mapping = page->mapping;
1242 	index = page->index;
1243 	inode = mapping->host;
1244 	info = SHMEM_I(inode);
1245 	if (info->flags & VM_LOCKED)
1246 		goto redirty;
1247 	if (!total_swap_pages)
1248 		goto redirty;
1249 
1250 	/*
1251 	 * Our capabilities prevent regular writeback or sync from ever calling
1252 	 * shmem_writepage; but a stacking filesystem might use ->writepage of
1253 	 * its underlying filesystem, in which case tmpfs should write out to
1254 	 * swap only in response to memory pressure, and not for the writeback
1255 	 * threads or sync.
1256 	 */
1257 	if (!wbc->for_reclaim) {
1258 		WARN_ON_ONCE(1);	/* Still happens? Tell us about it! */
1259 		goto redirty;
1260 	}
1261 
1262 	/*
1263 	 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1264 	 * value into swapfile.c, the only way we can correctly account for a
1265 	 * fallocated page arriving here is now to initialize it and write it.
1266 	 *
1267 	 * That's okay for a page already fallocated earlier, but if we have
1268 	 * not yet completed the fallocation, then (a) we want to keep track
1269 	 * of this page in case we have to undo it, and (b) it may not be a
1270 	 * good idea to continue anyway, once we're pushing into swap.  So
1271 	 * reactivate the page, and let shmem_fallocate() quit when too many.
1272 	 */
1273 	if (!PageUptodate(page)) {
1274 		if (inode->i_private) {
1275 			struct shmem_falloc *shmem_falloc;
1276 			spin_lock(&inode->i_lock);
1277 			shmem_falloc = inode->i_private;
1278 			if (shmem_falloc &&
1279 			    !shmem_falloc->waitq &&
1280 			    index >= shmem_falloc->start &&
1281 			    index < shmem_falloc->next)
1282 				shmem_falloc->nr_unswapped++;
1283 			else
1284 				shmem_falloc = NULL;
1285 			spin_unlock(&inode->i_lock);
1286 			if (shmem_falloc)
1287 				goto redirty;
1288 		}
1289 		clear_highpage(page);
1290 		flush_dcache_page(page);
1291 		SetPageUptodate(page);
1292 	}
1293 
1294 	swap = get_swap_page();
1295 	if (!swap.val)
1296 		goto redirty;
1297 
1298 	if (mem_cgroup_try_charge_swap(page, swap))
1299 		goto free_swap;
1300 
1301 	/*
1302 	 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1303 	 * if it's not already there.  Do it now before the page is
1304 	 * moved to swap cache, when its pagelock no longer protects
1305 	 * the inode from eviction.  But don't unlock the mutex until
1306 	 * we've incremented swapped, because shmem_unuse_inode() will
1307 	 * prune a !swapped inode from the swaplist under this mutex.
1308 	 */
1309 	mutex_lock(&shmem_swaplist_mutex);
1310 	if (list_empty(&info->swaplist))
1311 		list_add_tail(&info->swaplist, &shmem_swaplist);
1312 
1313 	if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
1314 		spin_lock_irq(&info->lock);
1315 		shmem_recalc_inode(inode);
1316 		info->swapped++;
1317 		spin_unlock_irq(&info->lock);
1318 
1319 		swap_shmem_alloc(swap);
1320 		shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
1321 
1322 		mutex_unlock(&shmem_swaplist_mutex);
1323 		BUG_ON(page_mapped(page));
1324 		swap_writepage(page, wbc);
1325 		return 0;
1326 	}
1327 
1328 	mutex_unlock(&shmem_swaplist_mutex);
1329 free_swap:
1330 	swapcache_free(swap);
1331 redirty:
1332 	set_page_dirty(page);
1333 	if (wbc->for_reclaim)
1334 		return AOP_WRITEPAGE_ACTIVATE;	/* Return with page locked */
1335 	unlock_page(page);
1336 	return 0;
1337 }
1338 
1339 #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
1340 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1341 {
1342 	char buffer[64];
1343 
1344 	if (!mpol || mpol->mode == MPOL_DEFAULT)
1345 		return;		/* show nothing */
1346 
1347 	mpol_to_str(buffer, sizeof(buffer), mpol);
1348 
1349 	seq_printf(seq, ",mpol=%s", buffer);
1350 }
1351 
1352 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1353 {
1354 	struct mempolicy *mpol = NULL;
1355 	if (sbinfo->mpol) {
1356 		spin_lock(&sbinfo->stat_lock);	/* prevent replace/use races */
1357 		mpol = sbinfo->mpol;
1358 		mpol_get(mpol);
1359 		spin_unlock(&sbinfo->stat_lock);
1360 	}
1361 	return mpol;
1362 }
1363 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
1364 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1365 {
1366 }
1367 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1368 {
1369 	return NULL;
1370 }
1371 #endif /* CONFIG_NUMA && CONFIG_TMPFS */
1372 #ifndef CONFIG_NUMA
1373 #define vm_policy vm_private_data
1374 #endif
1375 
1376 static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1377 		struct shmem_inode_info *info, pgoff_t index)
1378 {
1379 	/* Create a pseudo vma that just contains the policy */
1380 	vma->vm_start = 0;
1381 	/* Bias interleave by inode number to distribute better across nodes */
1382 	vma->vm_pgoff = index + info->vfs_inode.i_ino;
1383 	vma->vm_ops = NULL;
1384 	vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1385 }
1386 
1387 static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1388 {
1389 	/* Drop reference taken by mpol_shared_policy_lookup() */
1390 	mpol_cond_put(vma->vm_policy);
1391 }
1392 
1393 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
1394 			struct shmem_inode_info *info, pgoff_t index)
1395 {
1396 	struct vm_area_struct pvma;
1397 	struct page *page;
1398 
1399 	shmem_pseudo_vma_init(&pvma, info, index);
1400 	page = swapin_readahead(swap, gfp, &pvma, 0);
1401 	shmem_pseudo_vma_destroy(&pvma);
1402 
1403 	return page;
1404 }
1405 
1406 static struct page *shmem_alloc_hugepage(gfp_t gfp,
1407 		struct shmem_inode_info *info, pgoff_t index)
1408 {
1409 	struct vm_area_struct pvma;
1410 	struct inode *inode = &info->vfs_inode;
1411 	struct address_space *mapping = inode->i_mapping;
1412 	pgoff_t idx, hindex;
1413 	void __rcu **results;
1414 	struct page *page;
1415 
1416 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
1417 		return NULL;
1418 
1419 	hindex = round_down(index, HPAGE_PMD_NR);
1420 	rcu_read_lock();
1421 	if (radix_tree_gang_lookup_slot(&mapping->page_tree, &results, &idx,
1422 				hindex, 1) && idx < hindex + HPAGE_PMD_NR) {
1423 		rcu_read_unlock();
1424 		return NULL;
1425 	}
1426 	rcu_read_unlock();
1427 
1428 	shmem_pseudo_vma_init(&pvma, info, hindex);
1429 	page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
1430 			HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
1431 	shmem_pseudo_vma_destroy(&pvma);
1432 	if (page)
1433 		prep_transhuge_page(page);
1434 	return page;
1435 }
1436 
1437 static struct page *shmem_alloc_page(gfp_t gfp,
1438 			struct shmem_inode_info *info, pgoff_t index)
1439 {
1440 	struct vm_area_struct pvma;
1441 	struct page *page;
1442 
1443 	shmem_pseudo_vma_init(&pvma, info, index);
1444 	page = alloc_page_vma(gfp, &pvma, 0);
1445 	shmem_pseudo_vma_destroy(&pvma);
1446 
1447 	return page;
1448 }
1449 
1450 static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
1451 		struct shmem_inode_info *info, struct shmem_sb_info *sbinfo,
1452 		pgoff_t index, bool huge)
1453 {
1454 	struct page *page;
1455 	int nr;
1456 	int err = -ENOSPC;
1457 
1458 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
1459 		huge = false;
1460 	nr = huge ? HPAGE_PMD_NR : 1;
1461 
1462 	if (shmem_acct_block(info->flags, nr))
1463 		goto failed;
1464 	if (sbinfo->max_blocks) {
1465 		if (percpu_counter_compare(&sbinfo->used_blocks,
1466 					sbinfo->max_blocks - nr) > 0)
1467 			goto unacct;
1468 		percpu_counter_add(&sbinfo->used_blocks, nr);
1469 	}
1470 
1471 	if (huge)
1472 		page = shmem_alloc_hugepage(gfp, info, index);
1473 	else
1474 		page = shmem_alloc_page(gfp, info, index);
1475 	if (page) {
1476 		__SetPageLocked(page);
1477 		__SetPageSwapBacked(page);
1478 		return page;
1479 	}
1480 
1481 	err = -ENOMEM;
1482 	if (sbinfo->max_blocks)
1483 		percpu_counter_add(&sbinfo->used_blocks, -nr);
1484 unacct:
1485 	shmem_unacct_blocks(info->flags, nr);
1486 failed:
1487 	return ERR_PTR(err);
1488 }
1489 
1490 /*
1491  * When a page is moved from swapcache to shmem filecache (either by the
1492  * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
1493  * shmem_unuse_inode()), it may have been read in earlier from swap, in
1494  * ignorance of the mapping it belongs to.  If that mapping has special
1495  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1496  * we may need to copy to a suitable page before moving to filecache.
1497  *
1498  * In a future release, this may well be extended to respect cpuset and
1499  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1500  * but for now it is a simple matter of zone.
1501  */
1502 static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
1503 {
1504 	return page_zonenum(page) > gfp_zone(gfp);
1505 }
1506 
1507 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1508 				struct shmem_inode_info *info, pgoff_t index)
1509 {
1510 	struct page *oldpage, *newpage;
1511 	struct address_space *swap_mapping;
1512 	pgoff_t swap_index;
1513 	int error;
1514 
1515 	oldpage = *pagep;
1516 	swap_index = page_private(oldpage);
1517 	swap_mapping = page_mapping(oldpage);
1518 
1519 	/*
1520 	 * We have arrived here because our zones are constrained, so don't
1521 	 * limit chance of success by further cpuset and node constraints.
1522 	 */
1523 	gfp &= ~GFP_CONSTRAINT_MASK;
1524 	newpage = shmem_alloc_page(gfp, info, index);
1525 	if (!newpage)
1526 		return -ENOMEM;
1527 
1528 	get_page(newpage);
1529 	copy_highpage(newpage, oldpage);
1530 	flush_dcache_page(newpage);
1531 
1532 	__SetPageLocked(newpage);
1533 	__SetPageSwapBacked(newpage);
1534 	SetPageUptodate(newpage);
1535 	set_page_private(newpage, swap_index);
1536 	SetPageSwapCache(newpage);
1537 
1538 	/*
1539 	 * Our caller will very soon move newpage out of swapcache, but it's
1540 	 * a nice clean interface for us to replace oldpage by newpage there.
1541 	 */
1542 	spin_lock_irq(&swap_mapping->tree_lock);
1543 	error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
1544 								   newpage);
1545 	if (!error) {
1546 		__inc_node_page_state(newpage, NR_FILE_PAGES);
1547 		__dec_node_page_state(oldpage, NR_FILE_PAGES);
1548 	}
1549 	spin_unlock_irq(&swap_mapping->tree_lock);
1550 
1551 	if (unlikely(error)) {
1552 		/*
1553 		 * Is this possible?  I think not, now that our callers check
1554 		 * both PageSwapCache and page_private after getting page lock;
1555 		 * but be defensive.  Reverse old to newpage for clear and free.
1556 		 */
1557 		oldpage = newpage;
1558 	} else {
1559 		mem_cgroup_migrate(oldpage, newpage);
1560 		lru_cache_add_anon(newpage);
1561 		*pagep = newpage;
1562 	}
1563 
1564 	ClearPageSwapCache(oldpage);
1565 	set_page_private(oldpage, 0);
1566 
1567 	unlock_page(oldpage);
1568 	put_page(oldpage);
1569 	put_page(oldpage);
1570 	return error;
1571 }
1572 
1573 /*
1574  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1575  *
1576  * If we allocate a new one we do not mark it dirty. That's up to the
1577  * vm. If we swap it in we mark it dirty since we also free the swap
1578  * entry since a page cannot live in both the swap and page cache.
1579  *
1580  * fault_mm and fault_type are only supplied by shmem_fault:
1581  * otherwise they are NULL.
1582  */
1583 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1584 	struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1585 	struct vm_area_struct *vma, struct vm_fault *vmf, int *fault_type)
1586 {
1587 	struct address_space *mapping = inode->i_mapping;
1588 	struct shmem_inode_info *info = SHMEM_I(inode);
1589 	struct shmem_sb_info *sbinfo;
1590 	struct mm_struct *charge_mm;
1591 	struct mem_cgroup *memcg;
1592 	struct page *page;
1593 	swp_entry_t swap;
1594 	enum sgp_type sgp_huge = sgp;
1595 	pgoff_t hindex = index;
1596 	int error;
1597 	int once = 0;
1598 	int alloced = 0;
1599 
1600 	if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1601 		return -EFBIG;
1602 	if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
1603 		sgp = SGP_CACHE;
1604 repeat:
1605 	swap.val = 0;
1606 	page = find_lock_entry(mapping, index);
1607 	if (radix_tree_exceptional_entry(page)) {
1608 		swap = radix_to_swp_entry(page);
1609 		page = NULL;
1610 	}
1611 
1612 	if (sgp <= SGP_CACHE &&
1613 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1614 		error = -EINVAL;
1615 		goto unlock;
1616 	}
1617 
1618 	if (page && sgp == SGP_WRITE)
1619 		mark_page_accessed(page);
1620 
1621 	/* fallocated page? */
1622 	if (page && !PageUptodate(page)) {
1623 		if (sgp != SGP_READ)
1624 			goto clear;
1625 		unlock_page(page);
1626 		put_page(page);
1627 		page = NULL;
1628 	}
1629 	if (page || (sgp == SGP_READ && !swap.val)) {
1630 		*pagep = page;
1631 		return 0;
1632 	}
1633 
1634 	/*
1635 	 * Fast cache lookup did not find it:
1636 	 * bring it back from swap or allocate.
1637 	 */
1638 	sbinfo = SHMEM_SB(inode->i_sb);
1639 	charge_mm = vma ? vma->vm_mm : current->mm;
1640 
1641 	if (swap.val) {
1642 		/* Look it up and read it in.. */
1643 		page = lookup_swap_cache(swap);
1644 		if (!page) {
1645 			/* Or update major stats only when swapin succeeds?? */
1646 			if (fault_type) {
1647 				*fault_type |= VM_FAULT_MAJOR;
1648 				count_vm_event(PGMAJFAULT);
1649 				mem_cgroup_count_vm_event(charge_mm,
1650 							  PGMAJFAULT);
1651 			}
1652 			/* Here we actually start the io */
1653 			page = shmem_swapin(swap, gfp, info, index);
1654 			if (!page) {
1655 				error = -ENOMEM;
1656 				goto failed;
1657 			}
1658 		}
1659 
1660 		/* We have to do this with page locked to prevent races */
1661 		lock_page(page);
1662 		if (!PageSwapCache(page) || page_private(page) != swap.val ||
1663 		    !shmem_confirm_swap(mapping, index, swap)) {
1664 			error = -EEXIST;	/* try again */
1665 			goto unlock;
1666 		}
1667 		if (!PageUptodate(page)) {
1668 			error = -EIO;
1669 			goto failed;
1670 		}
1671 		wait_on_page_writeback(page);
1672 
1673 		if (shmem_should_replace_page(page, gfp)) {
1674 			error = shmem_replace_page(&page, gfp, info, index);
1675 			if (error)
1676 				goto failed;
1677 		}
1678 
1679 		error = mem_cgroup_try_charge(page, charge_mm, gfp, &memcg,
1680 				false);
1681 		if (!error) {
1682 			error = shmem_add_to_page_cache(page, mapping, index,
1683 						swp_to_radix_entry(swap));
1684 			/*
1685 			 * We already confirmed swap under page lock, and make
1686 			 * no memory allocation here, so usually no possibility
1687 			 * of error; but free_swap_and_cache() only trylocks a
1688 			 * page, so it is just possible that the entry has been
1689 			 * truncated or holepunched since swap was confirmed.
1690 			 * shmem_undo_range() will have done some of the
1691 			 * unaccounting, now delete_from_swap_cache() will do
1692 			 * the rest.
1693 			 * Reset swap.val? No, leave it so "failed" goes back to
1694 			 * "repeat": reading a hole and writing should succeed.
1695 			 */
1696 			if (error) {
1697 				mem_cgroup_cancel_charge(page, memcg, false);
1698 				delete_from_swap_cache(page);
1699 			}
1700 		}
1701 		if (error)
1702 			goto failed;
1703 
1704 		mem_cgroup_commit_charge(page, memcg, true, false);
1705 
1706 		spin_lock_irq(&info->lock);
1707 		info->swapped--;
1708 		shmem_recalc_inode(inode);
1709 		spin_unlock_irq(&info->lock);
1710 
1711 		if (sgp == SGP_WRITE)
1712 			mark_page_accessed(page);
1713 
1714 		delete_from_swap_cache(page);
1715 		set_page_dirty(page);
1716 		swap_free(swap);
1717 
1718 	} else {
1719 		if (vma && userfaultfd_missing(vma)) {
1720 			*fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1721 			return 0;
1722 		}
1723 
1724 		/* shmem_symlink() */
1725 		if (mapping->a_ops != &shmem_aops)
1726 			goto alloc_nohuge;
1727 		if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
1728 			goto alloc_nohuge;
1729 		if (shmem_huge == SHMEM_HUGE_FORCE)
1730 			goto alloc_huge;
1731 		switch (sbinfo->huge) {
1732 			loff_t i_size;
1733 			pgoff_t off;
1734 		case SHMEM_HUGE_NEVER:
1735 			goto alloc_nohuge;
1736 		case SHMEM_HUGE_WITHIN_SIZE:
1737 			off = round_up(index, HPAGE_PMD_NR);
1738 			i_size = round_up(i_size_read(inode), PAGE_SIZE);
1739 			if (i_size >= HPAGE_PMD_SIZE &&
1740 					i_size >> PAGE_SHIFT >= off)
1741 				goto alloc_huge;
1742 			/* fallthrough */
1743 		case SHMEM_HUGE_ADVISE:
1744 			if (sgp_huge == SGP_HUGE)
1745 				goto alloc_huge;
1746 			/* TODO: implement fadvise() hints */
1747 			goto alloc_nohuge;
1748 		}
1749 
1750 alloc_huge:
1751 		page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
1752 				index, true);
1753 		if (IS_ERR(page)) {
1754 alloc_nohuge:		page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
1755 					index, false);
1756 		}
1757 		if (IS_ERR(page)) {
1758 			int retry = 5;
1759 			error = PTR_ERR(page);
1760 			page = NULL;
1761 			if (error != -ENOSPC)
1762 				goto failed;
1763 			/*
1764 			 * Try to reclaim some spece by splitting a huge page
1765 			 * beyond i_size on the filesystem.
1766 			 */
1767 			while (retry--) {
1768 				int ret;
1769 				ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1770 				if (ret == SHRINK_STOP)
1771 					break;
1772 				if (ret)
1773 					goto alloc_nohuge;
1774 			}
1775 			goto failed;
1776 		}
1777 
1778 		if (PageTransHuge(page))
1779 			hindex = round_down(index, HPAGE_PMD_NR);
1780 		else
1781 			hindex = index;
1782 
1783 		if (sgp == SGP_WRITE)
1784 			__SetPageReferenced(page);
1785 
1786 		error = mem_cgroup_try_charge(page, charge_mm, gfp, &memcg,
1787 				PageTransHuge(page));
1788 		if (error)
1789 			goto unacct;
1790 		error = radix_tree_maybe_preload_order(gfp & GFP_RECLAIM_MASK,
1791 				compound_order(page));
1792 		if (!error) {
1793 			error = shmem_add_to_page_cache(page, mapping, hindex,
1794 							NULL);
1795 			radix_tree_preload_end();
1796 		}
1797 		if (error) {
1798 			mem_cgroup_cancel_charge(page, memcg,
1799 					PageTransHuge(page));
1800 			goto unacct;
1801 		}
1802 		mem_cgroup_commit_charge(page, memcg, false,
1803 				PageTransHuge(page));
1804 		lru_cache_add_anon(page);
1805 
1806 		spin_lock_irq(&info->lock);
1807 		info->alloced += 1 << compound_order(page);
1808 		inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
1809 		shmem_recalc_inode(inode);
1810 		spin_unlock_irq(&info->lock);
1811 		alloced = true;
1812 
1813 		if (PageTransHuge(page) &&
1814 				DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1815 				hindex + HPAGE_PMD_NR - 1) {
1816 			/*
1817 			 * Part of the huge page is beyond i_size: subject
1818 			 * to shrink under memory pressure.
1819 			 */
1820 			spin_lock(&sbinfo->shrinklist_lock);
1821 			if (list_empty(&info->shrinklist)) {
1822 				list_add_tail(&info->shrinklist,
1823 						&sbinfo->shrinklist);
1824 				sbinfo->shrinklist_len++;
1825 			}
1826 			spin_unlock(&sbinfo->shrinklist_lock);
1827 		}
1828 
1829 		/*
1830 		 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
1831 		 */
1832 		if (sgp == SGP_FALLOC)
1833 			sgp = SGP_WRITE;
1834 clear:
1835 		/*
1836 		 * Let SGP_WRITE caller clear ends if write does not fill page;
1837 		 * but SGP_FALLOC on a page fallocated earlier must initialize
1838 		 * it now, lest undo on failure cancel our earlier guarantee.
1839 		 */
1840 		if (sgp != SGP_WRITE && !PageUptodate(page)) {
1841 			struct page *head = compound_head(page);
1842 			int i;
1843 
1844 			for (i = 0; i < (1 << compound_order(head)); i++) {
1845 				clear_highpage(head + i);
1846 				flush_dcache_page(head + i);
1847 			}
1848 			SetPageUptodate(head);
1849 		}
1850 	}
1851 
1852 	/* Perhaps the file has been truncated since we checked */
1853 	if (sgp <= SGP_CACHE &&
1854 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1855 		if (alloced) {
1856 			ClearPageDirty(page);
1857 			delete_from_page_cache(page);
1858 			spin_lock_irq(&info->lock);
1859 			shmem_recalc_inode(inode);
1860 			spin_unlock_irq(&info->lock);
1861 		}
1862 		error = -EINVAL;
1863 		goto unlock;
1864 	}
1865 	*pagep = page + index - hindex;
1866 	return 0;
1867 
1868 	/*
1869 	 * Error recovery.
1870 	 */
1871 unacct:
1872 	if (sbinfo->max_blocks)
1873 		percpu_counter_sub(&sbinfo->used_blocks,
1874 				1 << compound_order(page));
1875 	shmem_unacct_blocks(info->flags, 1 << compound_order(page));
1876 
1877 	if (PageTransHuge(page)) {
1878 		unlock_page(page);
1879 		put_page(page);
1880 		goto alloc_nohuge;
1881 	}
1882 failed:
1883 	if (swap.val && !shmem_confirm_swap(mapping, index, swap))
1884 		error = -EEXIST;
1885 unlock:
1886 	if (page) {
1887 		unlock_page(page);
1888 		put_page(page);
1889 	}
1890 	if (error == -ENOSPC && !once++) {
1891 		spin_lock_irq(&info->lock);
1892 		shmem_recalc_inode(inode);
1893 		spin_unlock_irq(&info->lock);
1894 		goto repeat;
1895 	}
1896 	if (error == -EEXIST)	/* from above or from radix_tree_insert */
1897 		goto repeat;
1898 	return error;
1899 }
1900 
1901 /*
1902  * This is like autoremove_wake_function, but it removes the wait queue
1903  * entry unconditionally - even if something else had already woken the
1904  * target.
1905  */
1906 static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
1907 {
1908 	int ret = default_wake_function(wait, mode, sync, key);
1909 	list_del_init(&wait->entry);
1910 	return ret;
1911 }
1912 
1913 static int shmem_fault(struct vm_fault *vmf)
1914 {
1915 	struct vm_area_struct *vma = vmf->vma;
1916 	struct inode *inode = file_inode(vma->vm_file);
1917 	gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
1918 	enum sgp_type sgp;
1919 	int error;
1920 	int ret = VM_FAULT_LOCKED;
1921 
1922 	/*
1923 	 * Trinity finds that probing a hole which tmpfs is punching can
1924 	 * prevent the hole-punch from ever completing: which in turn
1925 	 * locks writers out with its hold on i_mutex.  So refrain from
1926 	 * faulting pages into the hole while it's being punched.  Although
1927 	 * shmem_undo_range() does remove the additions, it may be unable to
1928 	 * keep up, as each new page needs its own unmap_mapping_range() call,
1929 	 * and the i_mmap tree grows ever slower to scan if new vmas are added.
1930 	 *
1931 	 * It does not matter if we sometimes reach this check just before the
1932 	 * hole-punch begins, so that one fault then races with the punch:
1933 	 * we just need to make racing faults a rare case.
1934 	 *
1935 	 * The implementation below would be much simpler if we just used a
1936 	 * standard mutex or completion: but we cannot take i_mutex in fault,
1937 	 * and bloating every shmem inode for this unlikely case would be sad.
1938 	 */
1939 	if (unlikely(inode->i_private)) {
1940 		struct shmem_falloc *shmem_falloc;
1941 
1942 		spin_lock(&inode->i_lock);
1943 		shmem_falloc = inode->i_private;
1944 		if (shmem_falloc &&
1945 		    shmem_falloc->waitq &&
1946 		    vmf->pgoff >= shmem_falloc->start &&
1947 		    vmf->pgoff < shmem_falloc->next) {
1948 			wait_queue_head_t *shmem_falloc_waitq;
1949 			DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
1950 
1951 			ret = VM_FAULT_NOPAGE;
1952 			if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
1953 			   !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
1954 				/* It's polite to up mmap_sem if we can */
1955 				up_read(&vma->vm_mm->mmap_sem);
1956 				ret = VM_FAULT_RETRY;
1957 			}
1958 
1959 			shmem_falloc_waitq = shmem_falloc->waitq;
1960 			prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
1961 					TASK_UNINTERRUPTIBLE);
1962 			spin_unlock(&inode->i_lock);
1963 			schedule();
1964 
1965 			/*
1966 			 * shmem_falloc_waitq points into the shmem_fallocate()
1967 			 * stack of the hole-punching task: shmem_falloc_waitq
1968 			 * is usually invalid by the time we reach here, but
1969 			 * finish_wait() does not dereference it in that case;
1970 			 * though i_lock needed lest racing with wake_up_all().
1971 			 */
1972 			spin_lock(&inode->i_lock);
1973 			finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
1974 			spin_unlock(&inode->i_lock);
1975 			return ret;
1976 		}
1977 		spin_unlock(&inode->i_lock);
1978 	}
1979 
1980 	sgp = SGP_CACHE;
1981 	if (vma->vm_flags & VM_HUGEPAGE)
1982 		sgp = SGP_HUGE;
1983 	else if (vma->vm_flags & VM_NOHUGEPAGE)
1984 		sgp = SGP_NOHUGE;
1985 
1986 	error = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
1987 				  gfp, vma, vmf, &ret);
1988 	if (error)
1989 		return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1990 	return ret;
1991 }
1992 
1993 unsigned long shmem_get_unmapped_area(struct file *file,
1994 				      unsigned long uaddr, unsigned long len,
1995 				      unsigned long pgoff, unsigned long flags)
1996 {
1997 	unsigned long (*get_area)(struct file *,
1998 		unsigned long, unsigned long, unsigned long, unsigned long);
1999 	unsigned long addr;
2000 	unsigned long offset;
2001 	unsigned long inflated_len;
2002 	unsigned long inflated_addr;
2003 	unsigned long inflated_offset;
2004 
2005 	if (len > TASK_SIZE)
2006 		return -ENOMEM;
2007 
2008 	get_area = current->mm->get_unmapped_area;
2009 	addr = get_area(file, uaddr, len, pgoff, flags);
2010 
2011 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
2012 		return addr;
2013 	if (IS_ERR_VALUE(addr))
2014 		return addr;
2015 	if (addr & ~PAGE_MASK)
2016 		return addr;
2017 	if (addr > TASK_SIZE - len)
2018 		return addr;
2019 
2020 	if (shmem_huge == SHMEM_HUGE_DENY)
2021 		return addr;
2022 	if (len < HPAGE_PMD_SIZE)
2023 		return addr;
2024 	if (flags & MAP_FIXED)
2025 		return addr;
2026 	/*
2027 	 * Our priority is to support MAP_SHARED mapped hugely;
2028 	 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2029 	 * But if caller specified an address hint, respect that as before.
2030 	 */
2031 	if (uaddr)
2032 		return addr;
2033 
2034 	if (shmem_huge != SHMEM_HUGE_FORCE) {
2035 		struct super_block *sb;
2036 
2037 		if (file) {
2038 			VM_BUG_ON(file->f_op != &shmem_file_operations);
2039 			sb = file_inode(file)->i_sb;
2040 		} else {
2041 			/*
2042 			 * Called directly from mm/mmap.c, or drivers/char/mem.c
2043 			 * for "/dev/zero", to create a shared anonymous object.
2044 			 */
2045 			if (IS_ERR(shm_mnt))
2046 				return addr;
2047 			sb = shm_mnt->mnt_sb;
2048 		}
2049 		if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2050 			return addr;
2051 	}
2052 
2053 	offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2054 	if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2055 		return addr;
2056 	if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2057 		return addr;
2058 
2059 	inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2060 	if (inflated_len > TASK_SIZE)
2061 		return addr;
2062 	if (inflated_len < len)
2063 		return addr;
2064 
2065 	inflated_addr = get_area(NULL, 0, inflated_len, 0, flags);
2066 	if (IS_ERR_VALUE(inflated_addr))
2067 		return addr;
2068 	if (inflated_addr & ~PAGE_MASK)
2069 		return addr;
2070 
2071 	inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2072 	inflated_addr += offset - inflated_offset;
2073 	if (inflated_offset > offset)
2074 		inflated_addr += HPAGE_PMD_SIZE;
2075 
2076 	if (inflated_addr > TASK_SIZE - len)
2077 		return addr;
2078 	return inflated_addr;
2079 }
2080 
2081 #ifdef CONFIG_NUMA
2082 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2083 {
2084 	struct inode *inode = file_inode(vma->vm_file);
2085 	return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2086 }
2087 
2088 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2089 					  unsigned long addr)
2090 {
2091 	struct inode *inode = file_inode(vma->vm_file);
2092 	pgoff_t index;
2093 
2094 	index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2095 	return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2096 }
2097 #endif
2098 
2099 int shmem_lock(struct file *file, int lock, struct user_struct *user)
2100 {
2101 	struct inode *inode = file_inode(file);
2102 	struct shmem_inode_info *info = SHMEM_I(inode);
2103 	int retval = -ENOMEM;
2104 
2105 	spin_lock_irq(&info->lock);
2106 	if (lock && !(info->flags & VM_LOCKED)) {
2107 		if (!user_shm_lock(inode->i_size, user))
2108 			goto out_nomem;
2109 		info->flags |= VM_LOCKED;
2110 		mapping_set_unevictable(file->f_mapping);
2111 	}
2112 	if (!lock && (info->flags & VM_LOCKED) && user) {
2113 		user_shm_unlock(inode->i_size, user);
2114 		info->flags &= ~VM_LOCKED;
2115 		mapping_clear_unevictable(file->f_mapping);
2116 	}
2117 	retval = 0;
2118 
2119 out_nomem:
2120 	spin_unlock_irq(&info->lock);
2121 	return retval;
2122 }
2123 
2124 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2125 {
2126 	file_accessed(file);
2127 	vma->vm_ops = &shmem_vm_ops;
2128 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
2129 			((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
2130 			(vma->vm_end & HPAGE_PMD_MASK)) {
2131 		khugepaged_enter(vma, vma->vm_flags);
2132 	}
2133 	return 0;
2134 }
2135 
2136 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
2137 				     umode_t mode, dev_t dev, unsigned long flags)
2138 {
2139 	struct inode *inode;
2140 	struct shmem_inode_info *info;
2141 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2142 
2143 	if (shmem_reserve_inode(sb))
2144 		return NULL;
2145 
2146 	inode = new_inode(sb);
2147 	if (inode) {
2148 		inode->i_ino = get_next_ino();
2149 		inode_init_owner(inode, dir, mode);
2150 		inode->i_blocks = 0;
2151 		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
2152 		inode->i_generation = get_seconds();
2153 		info = SHMEM_I(inode);
2154 		memset(info, 0, (char *)inode - (char *)info);
2155 		spin_lock_init(&info->lock);
2156 		info->seals = F_SEAL_SEAL;
2157 		info->flags = flags & VM_NORESERVE;
2158 		INIT_LIST_HEAD(&info->shrinklist);
2159 		INIT_LIST_HEAD(&info->swaplist);
2160 		simple_xattrs_init(&info->xattrs);
2161 		cache_no_acl(inode);
2162 
2163 		switch (mode & S_IFMT) {
2164 		default:
2165 			inode->i_op = &shmem_special_inode_operations;
2166 			init_special_inode(inode, mode, dev);
2167 			break;
2168 		case S_IFREG:
2169 			inode->i_mapping->a_ops = &shmem_aops;
2170 			inode->i_op = &shmem_inode_operations;
2171 			inode->i_fop = &shmem_file_operations;
2172 			mpol_shared_policy_init(&info->policy,
2173 						 shmem_get_sbmpol(sbinfo));
2174 			break;
2175 		case S_IFDIR:
2176 			inc_nlink(inode);
2177 			/* Some things misbehave if size == 0 on a directory */
2178 			inode->i_size = 2 * BOGO_DIRENT_SIZE;
2179 			inode->i_op = &shmem_dir_inode_operations;
2180 			inode->i_fop = &simple_dir_operations;
2181 			break;
2182 		case S_IFLNK:
2183 			/*
2184 			 * Must not load anything in the rbtree,
2185 			 * mpol_free_shared_policy will not be called.
2186 			 */
2187 			mpol_shared_policy_init(&info->policy, NULL);
2188 			break;
2189 		}
2190 	} else
2191 		shmem_free_inode(sb);
2192 	return inode;
2193 }
2194 
2195 bool shmem_mapping(struct address_space *mapping)
2196 {
2197 	return mapping->a_ops == &shmem_aops;
2198 }
2199 
2200 int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
2201 			   pmd_t *dst_pmd,
2202 			   struct vm_area_struct *dst_vma,
2203 			   unsigned long dst_addr,
2204 			   unsigned long src_addr,
2205 			   struct page **pagep)
2206 {
2207 	struct inode *inode = file_inode(dst_vma->vm_file);
2208 	struct shmem_inode_info *info = SHMEM_I(inode);
2209 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2210 	struct address_space *mapping = inode->i_mapping;
2211 	gfp_t gfp = mapping_gfp_mask(mapping);
2212 	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
2213 	struct mem_cgroup *memcg;
2214 	spinlock_t *ptl;
2215 	void *page_kaddr;
2216 	struct page *page;
2217 	pte_t _dst_pte, *dst_pte;
2218 	int ret;
2219 
2220 	ret = -ENOMEM;
2221 	if (shmem_acct_block(info->flags, 1))
2222 		goto out;
2223 	if (sbinfo->max_blocks) {
2224 		if (percpu_counter_compare(&sbinfo->used_blocks,
2225 					   sbinfo->max_blocks) >= 0)
2226 			goto out_unacct_blocks;
2227 		percpu_counter_inc(&sbinfo->used_blocks);
2228 	}
2229 
2230 	if (!*pagep) {
2231 		page = shmem_alloc_page(gfp, info, pgoff);
2232 		if (!page)
2233 			goto out_dec_used_blocks;
2234 
2235 		page_kaddr = kmap_atomic(page);
2236 		ret = copy_from_user(page_kaddr, (const void __user *)src_addr,
2237 				     PAGE_SIZE);
2238 		kunmap_atomic(page_kaddr);
2239 
2240 		/* fallback to copy_from_user outside mmap_sem */
2241 		if (unlikely(ret)) {
2242 			*pagep = page;
2243 			if (sbinfo->max_blocks)
2244 				percpu_counter_add(&sbinfo->used_blocks, -1);
2245 			shmem_unacct_blocks(info->flags, 1);
2246 			/* don't free the page */
2247 			return -EFAULT;
2248 		}
2249 	} else {
2250 		page = *pagep;
2251 		*pagep = NULL;
2252 	}
2253 
2254 	VM_BUG_ON(PageLocked(page) || PageSwapBacked(page));
2255 	__SetPageLocked(page);
2256 	__SetPageSwapBacked(page);
2257 	__SetPageUptodate(page);
2258 
2259 	ret = mem_cgroup_try_charge(page, dst_mm, gfp, &memcg, false);
2260 	if (ret)
2261 		goto out_release;
2262 
2263 	ret = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK);
2264 	if (!ret) {
2265 		ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL);
2266 		radix_tree_preload_end();
2267 	}
2268 	if (ret)
2269 		goto out_release_uncharge;
2270 
2271 	mem_cgroup_commit_charge(page, memcg, false, false);
2272 
2273 	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
2274 	if (dst_vma->vm_flags & VM_WRITE)
2275 		_dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
2276 
2277 	ret = -EEXIST;
2278 	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
2279 	if (!pte_none(*dst_pte))
2280 		goto out_release_uncharge_unlock;
2281 
2282 	lru_cache_add_anon(page);
2283 
2284 	spin_lock(&info->lock);
2285 	info->alloced++;
2286 	inode->i_blocks += BLOCKS_PER_PAGE;
2287 	shmem_recalc_inode(inode);
2288 	spin_unlock(&info->lock);
2289 
2290 	inc_mm_counter(dst_mm, mm_counter_file(page));
2291 	page_add_file_rmap(page, false);
2292 	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
2293 
2294 	/* No need to invalidate - it was non-present before */
2295 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
2296 	unlock_page(page);
2297 	pte_unmap_unlock(dst_pte, ptl);
2298 	ret = 0;
2299 out:
2300 	return ret;
2301 out_release_uncharge_unlock:
2302 	pte_unmap_unlock(dst_pte, ptl);
2303 out_release_uncharge:
2304 	mem_cgroup_cancel_charge(page, memcg, false);
2305 out_release:
2306 	unlock_page(page);
2307 	put_page(page);
2308 out_dec_used_blocks:
2309 	if (sbinfo->max_blocks)
2310 		percpu_counter_add(&sbinfo->used_blocks, -1);
2311 out_unacct_blocks:
2312 	shmem_unacct_blocks(info->flags, 1);
2313 	goto out;
2314 }
2315 
2316 #ifdef CONFIG_TMPFS
2317 static const struct inode_operations shmem_symlink_inode_operations;
2318 static const struct inode_operations shmem_short_symlink_operations;
2319 
2320 #ifdef CONFIG_TMPFS_XATTR
2321 static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2322 #else
2323 #define shmem_initxattrs NULL
2324 #endif
2325 
2326 static int
2327 shmem_write_begin(struct file *file, struct address_space *mapping,
2328 			loff_t pos, unsigned len, unsigned flags,
2329 			struct page **pagep, void **fsdata)
2330 {
2331 	struct inode *inode = mapping->host;
2332 	struct shmem_inode_info *info = SHMEM_I(inode);
2333 	pgoff_t index = pos >> PAGE_SHIFT;
2334 
2335 	/* i_mutex is held by caller */
2336 	if (unlikely(info->seals & (F_SEAL_WRITE | F_SEAL_GROW))) {
2337 		if (info->seals & F_SEAL_WRITE)
2338 			return -EPERM;
2339 		if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
2340 			return -EPERM;
2341 	}
2342 
2343 	return shmem_getpage(inode, index, pagep, SGP_WRITE);
2344 }
2345 
2346 static int
2347 shmem_write_end(struct file *file, struct address_space *mapping,
2348 			loff_t pos, unsigned len, unsigned copied,
2349 			struct page *page, void *fsdata)
2350 {
2351 	struct inode *inode = mapping->host;
2352 
2353 	if (pos + copied > inode->i_size)
2354 		i_size_write(inode, pos + copied);
2355 
2356 	if (!PageUptodate(page)) {
2357 		struct page *head = compound_head(page);
2358 		if (PageTransCompound(page)) {
2359 			int i;
2360 
2361 			for (i = 0; i < HPAGE_PMD_NR; i++) {
2362 				if (head + i == page)
2363 					continue;
2364 				clear_highpage(head + i);
2365 				flush_dcache_page(head + i);
2366 			}
2367 		}
2368 		if (copied < PAGE_SIZE) {
2369 			unsigned from = pos & (PAGE_SIZE - 1);
2370 			zero_user_segments(page, 0, from,
2371 					from + copied, PAGE_SIZE);
2372 		}
2373 		SetPageUptodate(head);
2374 	}
2375 	set_page_dirty(page);
2376 	unlock_page(page);
2377 	put_page(page);
2378 
2379 	return copied;
2380 }
2381 
2382 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
2383 {
2384 	struct file *file = iocb->ki_filp;
2385 	struct inode *inode = file_inode(file);
2386 	struct address_space *mapping = inode->i_mapping;
2387 	pgoff_t index;
2388 	unsigned long offset;
2389 	enum sgp_type sgp = SGP_READ;
2390 	int error = 0;
2391 	ssize_t retval = 0;
2392 	loff_t *ppos = &iocb->ki_pos;
2393 
2394 	/*
2395 	 * Might this read be for a stacking filesystem?  Then when reading
2396 	 * holes of a sparse file, we actually need to allocate those pages,
2397 	 * and even mark them dirty, so it cannot exceed the max_blocks limit.
2398 	 */
2399 	if (!iter_is_iovec(to))
2400 		sgp = SGP_CACHE;
2401 
2402 	index = *ppos >> PAGE_SHIFT;
2403 	offset = *ppos & ~PAGE_MASK;
2404 
2405 	for (;;) {
2406 		struct page *page = NULL;
2407 		pgoff_t end_index;
2408 		unsigned long nr, ret;
2409 		loff_t i_size = i_size_read(inode);
2410 
2411 		end_index = i_size >> PAGE_SHIFT;
2412 		if (index > end_index)
2413 			break;
2414 		if (index == end_index) {
2415 			nr = i_size & ~PAGE_MASK;
2416 			if (nr <= offset)
2417 				break;
2418 		}
2419 
2420 		error = shmem_getpage(inode, index, &page, sgp);
2421 		if (error) {
2422 			if (error == -EINVAL)
2423 				error = 0;
2424 			break;
2425 		}
2426 		if (page) {
2427 			if (sgp == SGP_CACHE)
2428 				set_page_dirty(page);
2429 			unlock_page(page);
2430 		}
2431 
2432 		/*
2433 		 * We must evaluate after, since reads (unlike writes)
2434 		 * are called without i_mutex protection against truncate
2435 		 */
2436 		nr = PAGE_SIZE;
2437 		i_size = i_size_read(inode);
2438 		end_index = i_size >> PAGE_SHIFT;
2439 		if (index == end_index) {
2440 			nr = i_size & ~PAGE_MASK;
2441 			if (nr <= offset) {
2442 				if (page)
2443 					put_page(page);
2444 				break;
2445 			}
2446 		}
2447 		nr -= offset;
2448 
2449 		if (page) {
2450 			/*
2451 			 * If users can be writing to this page using arbitrary
2452 			 * virtual addresses, take care about potential aliasing
2453 			 * before reading the page on the kernel side.
2454 			 */
2455 			if (mapping_writably_mapped(mapping))
2456 				flush_dcache_page(page);
2457 			/*
2458 			 * Mark the page accessed if we read the beginning.
2459 			 */
2460 			if (!offset)
2461 				mark_page_accessed(page);
2462 		} else {
2463 			page = ZERO_PAGE(0);
2464 			get_page(page);
2465 		}
2466 
2467 		/*
2468 		 * Ok, we have the page, and it's up-to-date, so
2469 		 * now we can copy it to user space...
2470 		 */
2471 		ret = copy_page_to_iter(page, offset, nr, to);
2472 		retval += ret;
2473 		offset += ret;
2474 		index += offset >> PAGE_SHIFT;
2475 		offset &= ~PAGE_MASK;
2476 
2477 		put_page(page);
2478 		if (!iov_iter_count(to))
2479 			break;
2480 		if (ret < nr) {
2481 			error = -EFAULT;
2482 			break;
2483 		}
2484 		cond_resched();
2485 	}
2486 
2487 	*ppos = ((loff_t) index << PAGE_SHIFT) + offset;
2488 	file_accessed(file);
2489 	return retval ? retval : error;
2490 }
2491 
2492 /*
2493  * llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
2494  */
2495 static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
2496 				    pgoff_t index, pgoff_t end, int whence)
2497 {
2498 	struct page *page;
2499 	struct pagevec pvec;
2500 	pgoff_t indices[PAGEVEC_SIZE];
2501 	bool done = false;
2502 	int i;
2503 
2504 	pagevec_init(&pvec, 0);
2505 	pvec.nr = 1;		/* start small: we may be there already */
2506 	while (!done) {
2507 		pvec.nr = find_get_entries(mapping, index,
2508 					pvec.nr, pvec.pages, indices);
2509 		if (!pvec.nr) {
2510 			if (whence == SEEK_DATA)
2511 				index = end;
2512 			break;
2513 		}
2514 		for (i = 0; i < pvec.nr; i++, index++) {
2515 			if (index < indices[i]) {
2516 				if (whence == SEEK_HOLE) {
2517 					done = true;
2518 					break;
2519 				}
2520 				index = indices[i];
2521 			}
2522 			page = pvec.pages[i];
2523 			if (page && !radix_tree_exceptional_entry(page)) {
2524 				if (!PageUptodate(page))
2525 					page = NULL;
2526 			}
2527 			if (index >= end ||
2528 			    (page && whence == SEEK_DATA) ||
2529 			    (!page && whence == SEEK_HOLE)) {
2530 				done = true;
2531 				break;
2532 			}
2533 		}
2534 		pagevec_remove_exceptionals(&pvec);
2535 		pagevec_release(&pvec);
2536 		pvec.nr = PAGEVEC_SIZE;
2537 		cond_resched();
2538 	}
2539 	return index;
2540 }
2541 
2542 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2543 {
2544 	struct address_space *mapping = file->f_mapping;
2545 	struct inode *inode = mapping->host;
2546 	pgoff_t start, end;
2547 	loff_t new_offset;
2548 
2549 	if (whence != SEEK_DATA && whence != SEEK_HOLE)
2550 		return generic_file_llseek_size(file, offset, whence,
2551 					MAX_LFS_FILESIZE, i_size_read(inode));
2552 	inode_lock(inode);
2553 	/* We're holding i_mutex so we can access i_size directly */
2554 
2555 	if (offset < 0)
2556 		offset = -EINVAL;
2557 	else if (offset >= inode->i_size)
2558 		offset = -ENXIO;
2559 	else {
2560 		start = offset >> PAGE_SHIFT;
2561 		end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2562 		new_offset = shmem_seek_hole_data(mapping, start, end, whence);
2563 		new_offset <<= PAGE_SHIFT;
2564 		if (new_offset > offset) {
2565 			if (new_offset < inode->i_size)
2566 				offset = new_offset;
2567 			else if (whence == SEEK_DATA)
2568 				offset = -ENXIO;
2569 			else
2570 				offset = inode->i_size;
2571 		}
2572 	}
2573 
2574 	if (offset >= 0)
2575 		offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
2576 	inode_unlock(inode);
2577 	return offset;
2578 }
2579 
2580 /*
2581  * We need a tag: a new tag would expand every radix_tree_node by 8 bytes,
2582  * so reuse a tag which we firmly believe is never set or cleared on shmem.
2583  */
2584 #define SHMEM_TAG_PINNED        PAGECACHE_TAG_TOWRITE
2585 #define LAST_SCAN               4       /* about 150ms max */
2586 
2587 static void shmem_tag_pins(struct address_space *mapping)
2588 {
2589 	struct radix_tree_iter iter;
2590 	void **slot;
2591 	pgoff_t start;
2592 	struct page *page;
2593 
2594 	lru_add_drain();
2595 	start = 0;
2596 	rcu_read_lock();
2597 
2598 	radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
2599 		page = radix_tree_deref_slot(slot);
2600 		if (!page || radix_tree_exception(page)) {
2601 			if (radix_tree_deref_retry(page)) {
2602 				slot = radix_tree_iter_retry(&iter);
2603 				continue;
2604 			}
2605 		} else if (page_count(page) - page_mapcount(page) > 1) {
2606 			spin_lock_irq(&mapping->tree_lock);
2607 			radix_tree_tag_set(&mapping->page_tree, iter.index,
2608 					   SHMEM_TAG_PINNED);
2609 			spin_unlock_irq(&mapping->tree_lock);
2610 		}
2611 
2612 		if (need_resched()) {
2613 			slot = radix_tree_iter_resume(slot, &iter);
2614 			cond_resched_rcu();
2615 		}
2616 	}
2617 	rcu_read_unlock();
2618 }
2619 
2620 /*
2621  * Setting SEAL_WRITE requires us to verify there's no pending writer. However,
2622  * via get_user_pages(), drivers might have some pending I/O without any active
2623  * user-space mappings (eg., direct-IO, AIO). Therefore, we look at all pages
2624  * and see whether it has an elevated ref-count. If so, we tag them and wait for
2625  * them to be dropped.
2626  * The caller must guarantee that no new user will acquire writable references
2627  * to those pages to avoid races.
2628  */
2629 static int shmem_wait_for_pins(struct address_space *mapping)
2630 {
2631 	struct radix_tree_iter iter;
2632 	void **slot;
2633 	pgoff_t start;
2634 	struct page *page;
2635 	int error, scan;
2636 
2637 	shmem_tag_pins(mapping);
2638 
2639 	error = 0;
2640 	for (scan = 0; scan <= LAST_SCAN; scan++) {
2641 		if (!radix_tree_tagged(&mapping->page_tree, SHMEM_TAG_PINNED))
2642 			break;
2643 
2644 		if (!scan)
2645 			lru_add_drain_all();
2646 		else if (schedule_timeout_killable((HZ << scan) / 200))
2647 			scan = LAST_SCAN;
2648 
2649 		start = 0;
2650 		rcu_read_lock();
2651 		radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter,
2652 					   start, SHMEM_TAG_PINNED) {
2653 
2654 			page = radix_tree_deref_slot(slot);
2655 			if (radix_tree_exception(page)) {
2656 				if (radix_tree_deref_retry(page)) {
2657 					slot = radix_tree_iter_retry(&iter);
2658 					continue;
2659 				}
2660 
2661 				page = NULL;
2662 			}
2663 
2664 			if (page &&
2665 			    page_count(page) - page_mapcount(page) != 1) {
2666 				if (scan < LAST_SCAN)
2667 					goto continue_resched;
2668 
2669 				/*
2670 				 * On the last scan, we clean up all those tags
2671 				 * we inserted; but make a note that we still
2672 				 * found pages pinned.
2673 				 */
2674 				error = -EBUSY;
2675 			}
2676 
2677 			spin_lock_irq(&mapping->tree_lock);
2678 			radix_tree_tag_clear(&mapping->page_tree,
2679 					     iter.index, SHMEM_TAG_PINNED);
2680 			spin_unlock_irq(&mapping->tree_lock);
2681 continue_resched:
2682 			if (need_resched()) {
2683 				slot = radix_tree_iter_resume(slot, &iter);
2684 				cond_resched_rcu();
2685 			}
2686 		}
2687 		rcu_read_unlock();
2688 	}
2689 
2690 	return error;
2691 }
2692 
2693 #define F_ALL_SEALS (F_SEAL_SEAL | \
2694 		     F_SEAL_SHRINK | \
2695 		     F_SEAL_GROW | \
2696 		     F_SEAL_WRITE)
2697 
2698 int shmem_add_seals(struct file *file, unsigned int seals)
2699 {
2700 	struct inode *inode = file_inode(file);
2701 	struct shmem_inode_info *info = SHMEM_I(inode);
2702 	int error;
2703 
2704 	/*
2705 	 * SEALING
2706 	 * Sealing allows multiple parties to share a shmem-file but restrict
2707 	 * access to a specific subset of file operations. Seals can only be
2708 	 * added, but never removed. This way, mutually untrusted parties can
2709 	 * share common memory regions with a well-defined policy. A malicious
2710 	 * peer can thus never perform unwanted operations on a shared object.
2711 	 *
2712 	 * Seals are only supported on special shmem-files and always affect
2713 	 * the whole underlying inode. Once a seal is set, it may prevent some
2714 	 * kinds of access to the file. Currently, the following seals are
2715 	 * defined:
2716 	 *   SEAL_SEAL: Prevent further seals from being set on this file
2717 	 *   SEAL_SHRINK: Prevent the file from shrinking
2718 	 *   SEAL_GROW: Prevent the file from growing
2719 	 *   SEAL_WRITE: Prevent write access to the file
2720 	 *
2721 	 * As we don't require any trust relationship between two parties, we
2722 	 * must prevent seals from being removed. Therefore, sealing a file
2723 	 * only adds a given set of seals to the file, it never touches
2724 	 * existing seals. Furthermore, the "setting seals"-operation can be
2725 	 * sealed itself, which basically prevents any further seal from being
2726 	 * added.
2727 	 *
2728 	 * Semantics of sealing are only defined on volatile files. Only
2729 	 * anonymous shmem files support sealing. More importantly, seals are
2730 	 * never written to disk. Therefore, there's no plan to support it on
2731 	 * other file types.
2732 	 */
2733 
2734 	if (file->f_op != &shmem_file_operations)
2735 		return -EINVAL;
2736 	if (!(file->f_mode & FMODE_WRITE))
2737 		return -EPERM;
2738 	if (seals & ~(unsigned int)F_ALL_SEALS)
2739 		return -EINVAL;
2740 
2741 	inode_lock(inode);
2742 
2743 	if (info->seals & F_SEAL_SEAL) {
2744 		error = -EPERM;
2745 		goto unlock;
2746 	}
2747 
2748 	if ((seals & F_SEAL_WRITE) && !(info->seals & F_SEAL_WRITE)) {
2749 		error = mapping_deny_writable(file->f_mapping);
2750 		if (error)
2751 			goto unlock;
2752 
2753 		error = shmem_wait_for_pins(file->f_mapping);
2754 		if (error) {
2755 			mapping_allow_writable(file->f_mapping);
2756 			goto unlock;
2757 		}
2758 	}
2759 
2760 	info->seals |= seals;
2761 	error = 0;
2762 
2763 unlock:
2764 	inode_unlock(inode);
2765 	return error;
2766 }
2767 EXPORT_SYMBOL_GPL(shmem_add_seals);
2768 
2769 int shmem_get_seals(struct file *file)
2770 {
2771 	if (file->f_op != &shmem_file_operations)
2772 		return -EINVAL;
2773 
2774 	return SHMEM_I(file_inode(file))->seals;
2775 }
2776 EXPORT_SYMBOL_GPL(shmem_get_seals);
2777 
2778 long shmem_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
2779 {
2780 	long error;
2781 
2782 	switch (cmd) {
2783 	case F_ADD_SEALS:
2784 		/* disallow upper 32bit */
2785 		if (arg > UINT_MAX)
2786 			return -EINVAL;
2787 
2788 		error = shmem_add_seals(file, arg);
2789 		break;
2790 	case F_GET_SEALS:
2791 		error = shmem_get_seals(file);
2792 		break;
2793 	default:
2794 		error = -EINVAL;
2795 		break;
2796 	}
2797 
2798 	return error;
2799 }
2800 
2801 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2802 							 loff_t len)
2803 {
2804 	struct inode *inode = file_inode(file);
2805 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2806 	struct shmem_inode_info *info = SHMEM_I(inode);
2807 	struct shmem_falloc shmem_falloc;
2808 	pgoff_t start, index, end;
2809 	int error;
2810 
2811 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2812 		return -EOPNOTSUPP;
2813 
2814 	inode_lock(inode);
2815 
2816 	if (mode & FALLOC_FL_PUNCH_HOLE) {
2817 		struct address_space *mapping = file->f_mapping;
2818 		loff_t unmap_start = round_up(offset, PAGE_SIZE);
2819 		loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
2820 		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
2821 
2822 		/* protected by i_mutex */
2823 		if (info->seals & F_SEAL_WRITE) {
2824 			error = -EPERM;
2825 			goto out;
2826 		}
2827 
2828 		shmem_falloc.waitq = &shmem_falloc_waitq;
2829 		shmem_falloc.start = unmap_start >> PAGE_SHIFT;
2830 		shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2831 		spin_lock(&inode->i_lock);
2832 		inode->i_private = &shmem_falloc;
2833 		spin_unlock(&inode->i_lock);
2834 
2835 		if ((u64)unmap_end > (u64)unmap_start)
2836 			unmap_mapping_range(mapping, unmap_start,
2837 					    1 + unmap_end - unmap_start, 0);
2838 		shmem_truncate_range(inode, offset, offset + len - 1);
2839 		/* No need to unmap again: hole-punching leaves COWed pages */
2840 
2841 		spin_lock(&inode->i_lock);
2842 		inode->i_private = NULL;
2843 		wake_up_all(&shmem_falloc_waitq);
2844 		WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
2845 		spin_unlock(&inode->i_lock);
2846 		error = 0;
2847 		goto out;
2848 	}
2849 
2850 	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2851 	error = inode_newsize_ok(inode, offset + len);
2852 	if (error)
2853 		goto out;
2854 
2855 	if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
2856 		error = -EPERM;
2857 		goto out;
2858 	}
2859 
2860 	start = offset >> PAGE_SHIFT;
2861 	end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2862 	/* Try to avoid a swapstorm if len is impossible to satisfy */
2863 	if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2864 		error = -ENOSPC;
2865 		goto out;
2866 	}
2867 
2868 	shmem_falloc.waitq = NULL;
2869 	shmem_falloc.start = start;
2870 	shmem_falloc.next  = start;
2871 	shmem_falloc.nr_falloced = 0;
2872 	shmem_falloc.nr_unswapped = 0;
2873 	spin_lock(&inode->i_lock);
2874 	inode->i_private = &shmem_falloc;
2875 	spin_unlock(&inode->i_lock);
2876 
2877 	for (index = start; index < end; index++) {
2878 		struct page *page;
2879 
2880 		/*
2881 		 * Good, the fallocate(2) manpage permits EINTR: we may have
2882 		 * been interrupted because we are using up too much memory.
2883 		 */
2884 		if (signal_pending(current))
2885 			error = -EINTR;
2886 		else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
2887 			error = -ENOMEM;
2888 		else
2889 			error = shmem_getpage(inode, index, &page, SGP_FALLOC);
2890 		if (error) {
2891 			/* Remove the !PageUptodate pages we added */
2892 			if (index > start) {
2893 				shmem_undo_range(inode,
2894 				    (loff_t)start << PAGE_SHIFT,
2895 				    ((loff_t)index << PAGE_SHIFT) - 1, true);
2896 			}
2897 			goto undone;
2898 		}
2899 
2900 		/*
2901 		 * Inform shmem_writepage() how far we have reached.
2902 		 * No need for lock or barrier: we have the page lock.
2903 		 */
2904 		shmem_falloc.next++;
2905 		if (!PageUptodate(page))
2906 			shmem_falloc.nr_falloced++;
2907 
2908 		/*
2909 		 * If !PageUptodate, leave it that way so that freeable pages
2910 		 * can be recognized if we need to rollback on error later.
2911 		 * But set_page_dirty so that memory pressure will swap rather
2912 		 * than free the pages we are allocating (and SGP_CACHE pages
2913 		 * might still be clean: we now need to mark those dirty too).
2914 		 */
2915 		set_page_dirty(page);
2916 		unlock_page(page);
2917 		put_page(page);
2918 		cond_resched();
2919 	}
2920 
2921 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2922 		i_size_write(inode, offset + len);
2923 	inode->i_ctime = current_time(inode);
2924 undone:
2925 	spin_lock(&inode->i_lock);
2926 	inode->i_private = NULL;
2927 	spin_unlock(&inode->i_lock);
2928 out:
2929 	inode_unlock(inode);
2930 	return error;
2931 }
2932 
2933 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
2934 {
2935 	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
2936 
2937 	buf->f_type = TMPFS_MAGIC;
2938 	buf->f_bsize = PAGE_SIZE;
2939 	buf->f_namelen = NAME_MAX;
2940 	if (sbinfo->max_blocks) {
2941 		buf->f_blocks = sbinfo->max_blocks;
2942 		buf->f_bavail =
2943 		buf->f_bfree  = sbinfo->max_blocks -
2944 				percpu_counter_sum(&sbinfo->used_blocks);
2945 	}
2946 	if (sbinfo->max_inodes) {
2947 		buf->f_files = sbinfo->max_inodes;
2948 		buf->f_ffree = sbinfo->free_inodes;
2949 	}
2950 	/* else leave those fields 0 like simple_statfs */
2951 	return 0;
2952 }
2953 
2954 /*
2955  * File creation. Allocate an inode, and we're done..
2956  */
2957 static int
2958 shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
2959 {
2960 	struct inode *inode;
2961 	int error = -ENOSPC;
2962 
2963 	inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
2964 	if (inode) {
2965 		error = simple_acl_create(dir, inode);
2966 		if (error)
2967 			goto out_iput;
2968 		error = security_inode_init_security(inode, dir,
2969 						     &dentry->d_name,
2970 						     shmem_initxattrs, NULL);
2971 		if (error && error != -EOPNOTSUPP)
2972 			goto out_iput;
2973 
2974 		error = 0;
2975 		dir->i_size += BOGO_DIRENT_SIZE;
2976 		dir->i_ctime = dir->i_mtime = current_time(dir);
2977 		d_instantiate(dentry, inode);
2978 		dget(dentry); /* Extra count - pin the dentry in core */
2979 	}
2980 	return error;
2981 out_iput:
2982 	iput(inode);
2983 	return error;
2984 }
2985 
2986 static int
2987 shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
2988 {
2989 	struct inode *inode;
2990 	int error = -ENOSPC;
2991 
2992 	inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
2993 	if (inode) {
2994 		error = security_inode_init_security(inode, dir,
2995 						     NULL,
2996 						     shmem_initxattrs, NULL);
2997 		if (error && error != -EOPNOTSUPP)
2998 			goto out_iput;
2999 		error = simple_acl_create(dir, inode);
3000 		if (error)
3001 			goto out_iput;
3002 		d_tmpfile(dentry, inode);
3003 	}
3004 	return error;
3005 out_iput:
3006 	iput(inode);
3007 	return error;
3008 }
3009 
3010 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
3011 {
3012 	int error;
3013 
3014 	if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
3015 		return error;
3016 	inc_nlink(dir);
3017 	return 0;
3018 }
3019 
3020 static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
3021 		bool excl)
3022 {
3023 	return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
3024 }
3025 
3026 /*
3027  * Link a file..
3028  */
3029 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
3030 {
3031 	struct inode *inode = d_inode(old_dentry);
3032 	int ret;
3033 
3034 	/*
3035 	 * No ordinary (disk based) filesystem counts links as inodes;
3036 	 * but each new link needs a new dentry, pinning lowmem, and
3037 	 * tmpfs dentries cannot be pruned until they are unlinked.
3038 	 */
3039 	ret = shmem_reserve_inode(inode->i_sb);
3040 	if (ret)
3041 		goto out;
3042 
3043 	dir->i_size += BOGO_DIRENT_SIZE;
3044 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
3045 	inc_nlink(inode);
3046 	ihold(inode);	/* New dentry reference */
3047 	dget(dentry);		/* Extra pinning count for the created dentry */
3048 	d_instantiate(dentry, inode);
3049 out:
3050 	return ret;
3051 }
3052 
3053 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
3054 {
3055 	struct inode *inode = d_inode(dentry);
3056 
3057 	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
3058 		shmem_free_inode(inode->i_sb);
3059 
3060 	dir->i_size -= BOGO_DIRENT_SIZE;
3061 	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
3062 	drop_nlink(inode);
3063 	dput(dentry);	/* Undo the count from "create" - this does all the work */
3064 	return 0;
3065 }
3066 
3067 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
3068 {
3069 	if (!simple_empty(dentry))
3070 		return -ENOTEMPTY;
3071 
3072 	drop_nlink(d_inode(dentry));
3073 	drop_nlink(dir);
3074 	return shmem_unlink(dir, dentry);
3075 }
3076 
3077 static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
3078 {
3079 	bool old_is_dir = d_is_dir(old_dentry);
3080 	bool new_is_dir = d_is_dir(new_dentry);
3081 
3082 	if (old_dir != new_dir && old_is_dir != new_is_dir) {
3083 		if (old_is_dir) {
3084 			drop_nlink(old_dir);
3085 			inc_nlink(new_dir);
3086 		} else {
3087 			drop_nlink(new_dir);
3088 			inc_nlink(old_dir);
3089 		}
3090 	}
3091 	old_dir->i_ctime = old_dir->i_mtime =
3092 	new_dir->i_ctime = new_dir->i_mtime =
3093 	d_inode(old_dentry)->i_ctime =
3094 	d_inode(new_dentry)->i_ctime = current_time(old_dir);
3095 
3096 	return 0;
3097 }
3098 
3099 static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry)
3100 {
3101 	struct dentry *whiteout;
3102 	int error;
3103 
3104 	whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
3105 	if (!whiteout)
3106 		return -ENOMEM;
3107 
3108 	error = shmem_mknod(old_dir, whiteout,
3109 			    S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
3110 	dput(whiteout);
3111 	if (error)
3112 		return error;
3113 
3114 	/*
3115 	 * Cheat and hash the whiteout while the old dentry is still in
3116 	 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
3117 	 *
3118 	 * d_lookup() will consistently find one of them at this point,
3119 	 * not sure which one, but that isn't even important.
3120 	 */
3121 	d_rehash(whiteout);
3122 	return 0;
3123 }
3124 
3125 /*
3126  * The VFS layer already does all the dentry stuff for rename,
3127  * we just have to decrement the usage count for the target if
3128  * it exists so that the VFS layer correctly free's it when it
3129  * gets overwritten.
3130  */
3131 static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags)
3132 {
3133 	struct inode *inode = d_inode(old_dentry);
3134 	int they_are_dirs = S_ISDIR(inode->i_mode);
3135 
3136 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3137 		return -EINVAL;
3138 
3139 	if (flags & RENAME_EXCHANGE)
3140 		return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
3141 
3142 	if (!simple_empty(new_dentry))
3143 		return -ENOTEMPTY;
3144 
3145 	if (flags & RENAME_WHITEOUT) {
3146 		int error;
3147 
3148 		error = shmem_whiteout(old_dir, old_dentry);
3149 		if (error)
3150 			return error;
3151 	}
3152 
3153 	if (d_really_is_positive(new_dentry)) {
3154 		(void) shmem_unlink(new_dir, new_dentry);
3155 		if (they_are_dirs) {
3156 			drop_nlink(d_inode(new_dentry));
3157 			drop_nlink(old_dir);
3158 		}
3159 	} else if (they_are_dirs) {
3160 		drop_nlink(old_dir);
3161 		inc_nlink(new_dir);
3162 	}
3163 
3164 	old_dir->i_size -= BOGO_DIRENT_SIZE;
3165 	new_dir->i_size += BOGO_DIRENT_SIZE;
3166 	old_dir->i_ctime = old_dir->i_mtime =
3167 	new_dir->i_ctime = new_dir->i_mtime =
3168 	inode->i_ctime = current_time(old_dir);
3169 	return 0;
3170 }
3171 
3172 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
3173 {
3174 	int error;
3175 	int len;
3176 	struct inode *inode;
3177 	struct page *page;
3178 	struct shmem_inode_info *info;
3179 
3180 	len = strlen(symname) + 1;
3181 	if (len > PAGE_SIZE)
3182 		return -ENAMETOOLONG;
3183 
3184 	inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
3185 	if (!inode)
3186 		return -ENOSPC;
3187 
3188 	error = security_inode_init_security(inode, dir, &dentry->d_name,
3189 					     shmem_initxattrs, NULL);
3190 	if (error) {
3191 		if (error != -EOPNOTSUPP) {
3192 			iput(inode);
3193 			return error;
3194 		}
3195 		error = 0;
3196 	}
3197 
3198 	info = SHMEM_I(inode);
3199 	inode->i_size = len-1;
3200 	if (len <= SHORT_SYMLINK_LEN) {
3201 		inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3202 		if (!inode->i_link) {
3203 			iput(inode);
3204 			return -ENOMEM;
3205 		}
3206 		inode->i_op = &shmem_short_symlink_operations;
3207 	} else {
3208 		inode_nohighmem(inode);
3209 		error = shmem_getpage(inode, 0, &page, SGP_WRITE);
3210 		if (error) {
3211 			iput(inode);
3212 			return error;
3213 		}
3214 		inode->i_mapping->a_ops = &shmem_aops;
3215 		inode->i_op = &shmem_symlink_inode_operations;
3216 		memcpy(page_address(page), symname, len);
3217 		SetPageUptodate(page);
3218 		set_page_dirty(page);
3219 		unlock_page(page);
3220 		put_page(page);
3221 	}
3222 	dir->i_size += BOGO_DIRENT_SIZE;
3223 	dir->i_ctime = dir->i_mtime = current_time(dir);
3224 	d_instantiate(dentry, inode);
3225 	dget(dentry);
3226 	return 0;
3227 }
3228 
3229 static void shmem_put_link(void *arg)
3230 {
3231 	mark_page_accessed(arg);
3232 	put_page(arg);
3233 }
3234 
3235 static const char *shmem_get_link(struct dentry *dentry,
3236 				  struct inode *inode,
3237 				  struct delayed_call *done)
3238 {
3239 	struct page *page = NULL;
3240 	int error;
3241 	if (!dentry) {
3242 		page = find_get_page(inode->i_mapping, 0);
3243 		if (!page)
3244 			return ERR_PTR(-ECHILD);
3245 		if (!PageUptodate(page)) {
3246 			put_page(page);
3247 			return ERR_PTR(-ECHILD);
3248 		}
3249 	} else {
3250 		error = shmem_getpage(inode, 0, &page, SGP_READ);
3251 		if (error)
3252 			return ERR_PTR(error);
3253 		unlock_page(page);
3254 	}
3255 	set_delayed_call(done, shmem_put_link, page);
3256 	return page_address(page);
3257 }
3258 
3259 #ifdef CONFIG_TMPFS_XATTR
3260 /*
3261  * Superblocks without xattr inode operations may get some security.* xattr
3262  * support from the LSM "for free". As soon as we have any other xattrs
3263  * like ACLs, we also need to implement the security.* handlers at
3264  * filesystem level, though.
3265  */
3266 
3267 /*
3268  * Callback for security_inode_init_security() for acquiring xattrs.
3269  */
3270 static int shmem_initxattrs(struct inode *inode,
3271 			    const struct xattr *xattr_array,
3272 			    void *fs_info)
3273 {
3274 	struct shmem_inode_info *info = SHMEM_I(inode);
3275 	const struct xattr *xattr;
3276 	struct simple_xattr *new_xattr;
3277 	size_t len;
3278 
3279 	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3280 		new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
3281 		if (!new_xattr)
3282 			return -ENOMEM;
3283 
3284 		len = strlen(xattr->name) + 1;
3285 		new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3286 					  GFP_KERNEL);
3287 		if (!new_xattr->name) {
3288 			kfree(new_xattr);
3289 			return -ENOMEM;
3290 		}
3291 
3292 		memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
3293 		       XATTR_SECURITY_PREFIX_LEN);
3294 		memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
3295 		       xattr->name, len);
3296 
3297 		simple_xattr_list_add(&info->xattrs, new_xattr);
3298 	}
3299 
3300 	return 0;
3301 }
3302 
3303 static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3304 				   struct dentry *unused, struct inode *inode,
3305 				   const char *name, void *buffer, size_t size)
3306 {
3307 	struct shmem_inode_info *info = SHMEM_I(inode);
3308 
3309 	name = xattr_full_name(handler, name);
3310 	return simple_xattr_get(&info->xattrs, name, buffer, size);
3311 }
3312 
3313 static int shmem_xattr_handler_set(const struct xattr_handler *handler,
3314 				   struct dentry *unused, struct inode *inode,
3315 				   const char *name, const void *value,
3316 				   size_t size, int flags)
3317 {
3318 	struct shmem_inode_info *info = SHMEM_I(inode);
3319 
3320 	name = xattr_full_name(handler, name);
3321 	return simple_xattr_set(&info->xattrs, name, value, size, flags);
3322 }
3323 
3324 static const struct xattr_handler shmem_security_xattr_handler = {
3325 	.prefix = XATTR_SECURITY_PREFIX,
3326 	.get = shmem_xattr_handler_get,
3327 	.set = shmem_xattr_handler_set,
3328 };
3329 
3330 static const struct xattr_handler shmem_trusted_xattr_handler = {
3331 	.prefix = XATTR_TRUSTED_PREFIX,
3332 	.get = shmem_xattr_handler_get,
3333 	.set = shmem_xattr_handler_set,
3334 };
3335 
3336 static const struct xattr_handler *shmem_xattr_handlers[] = {
3337 #ifdef CONFIG_TMPFS_POSIX_ACL
3338 	&posix_acl_access_xattr_handler,
3339 	&posix_acl_default_xattr_handler,
3340 #endif
3341 	&shmem_security_xattr_handler,
3342 	&shmem_trusted_xattr_handler,
3343 	NULL
3344 };
3345 
3346 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3347 {
3348 	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3349 	return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3350 }
3351 #endif /* CONFIG_TMPFS_XATTR */
3352 
3353 static const struct inode_operations shmem_short_symlink_operations = {
3354 	.get_link	= simple_get_link,
3355 #ifdef CONFIG_TMPFS_XATTR
3356 	.listxattr	= shmem_listxattr,
3357 #endif
3358 };
3359 
3360 static const struct inode_operations shmem_symlink_inode_operations = {
3361 	.get_link	= shmem_get_link,
3362 #ifdef CONFIG_TMPFS_XATTR
3363 	.listxattr	= shmem_listxattr,
3364 #endif
3365 };
3366 
3367 static struct dentry *shmem_get_parent(struct dentry *child)
3368 {
3369 	return ERR_PTR(-ESTALE);
3370 }
3371 
3372 static int shmem_match(struct inode *ino, void *vfh)
3373 {
3374 	__u32 *fh = vfh;
3375 	__u64 inum = fh[2];
3376 	inum = (inum << 32) | fh[1];
3377 	return ino->i_ino == inum && fh[0] == ino->i_generation;
3378 }
3379 
3380 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3381 		struct fid *fid, int fh_len, int fh_type)
3382 {
3383 	struct inode *inode;
3384 	struct dentry *dentry = NULL;
3385 	u64 inum;
3386 
3387 	if (fh_len < 3)
3388 		return NULL;
3389 
3390 	inum = fid->raw[2];
3391 	inum = (inum << 32) | fid->raw[1];
3392 
3393 	inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3394 			shmem_match, fid->raw);
3395 	if (inode) {
3396 		dentry = d_find_alias(inode);
3397 		iput(inode);
3398 	}
3399 
3400 	return dentry;
3401 }
3402 
3403 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3404 				struct inode *parent)
3405 {
3406 	if (*len < 3) {
3407 		*len = 3;
3408 		return FILEID_INVALID;
3409 	}
3410 
3411 	if (inode_unhashed(inode)) {
3412 		/* Unfortunately insert_inode_hash is not idempotent,
3413 		 * so as we hash inodes here rather than at creation
3414 		 * time, we need a lock to ensure we only try
3415 		 * to do it once
3416 		 */
3417 		static DEFINE_SPINLOCK(lock);
3418 		spin_lock(&lock);
3419 		if (inode_unhashed(inode))
3420 			__insert_inode_hash(inode,
3421 					    inode->i_ino + inode->i_generation);
3422 		spin_unlock(&lock);
3423 	}
3424 
3425 	fh[0] = inode->i_generation;
3426 	fh[1] = inode->i_ino;
3427 	fh[2] = ((__u64)inode->i_ino) >> 32;
3428 
3429 	*len = 3;
3430 	return 1;
3431 }
3432 
3433 static const struct export_operations shmem_export_ops = {
3434 	.get_parent     = shmem_get_parent,
3435 	.encode_fh      = shmem_encode_fh,
3436 	.fh_to_dentry	= shmem_fh_to_dentry,
3437 };
3438 
3439 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
3440 			       bool remount)
3441 {
3442 	char *this_char, *value, *rest;
3443 	struct mempolicy *mpol = NULL;
3444 	uid_t uid;
3445 	gid_t gid;
3446 
3447 	while (options != NULL) {
3448 		this_char = options;
3449 		for (;;) {
3450 			/*
3451 			 * NUL-terminate this option: unfortunately,
3452 			 * mount options form a comma-separated list,
3453 			 * but mpol's nodelist may also contain commas.
3454 			 */
3455 			options = strchr(options, ',');
3456 			if (options == NULL)
3457 				break;
3458 			options++;
3459 			if (!isdigit(*options)) {
3460 				options[-1] = '\0';
3461 				break;
3462 			}
3463 		}
3464 		if (!*this_char)
3465 			continue;
3466 		if ((value = strchr(this_char,'=')) != NULL) {
3467 			*value++ = 0;
3468 		} else {
3469 			pr_err("tmpfs: No value for mount option '%s'\n",
3470 			       this_char);
3471 			goto error;
3472 		}
3473 
3474 		if (!strcmp(this_char,"size")) {
3475 			unsigned long long size;
3476 			size = memparse(value,&rest);
3477 			if (*rest == '%') {
3478 				size <<= PAGE_SHIFT;
3479 				size *= totalram_pages;
3480 				do_div(size, 100);
3481 				rest++;
3482 			}
3483 			if (*rest)
3484 				goto bad_val;
3485 			sbinfo->max_blocks =
3486 				DIV_ROUND_UP(size, PAGE_SIZE);
3487 		} else if (!strcmp(this_char,"nr_blocks")) {
3488 			sbinfo->max_blocks = memparse(value, &rest);
3489 			if (*rest)
3490 				goto bad_val;
3491 		} else if (!strcmp(this_char,"nr_inodes")) {
3492 			sbinfo->max_inodes = memparse(value, &rest);
3493 			if (*rest)
3494 				goto bad_val;
3495 		} else if (!strcmp(this_char,"mode")) {
3496 			if (remount)
3497 				continue;
3498 			sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
3499 			if (*rest)
3500 				goto bad_val;
3501 		} else if (!strcmp(this_char,"uid")) {
3502 			if (remount)
3503 				continue;
3504 			uid = simple_strtoul(value, &rest, 0);
3505 			if (*rest)
3506 				goto bad_val;
3507 			sbinfo->uid = make_kuid(current_user_ns(), uid);
3508 			if (!uid_valid(sbinfo->uid))
3509 				goto bad_val;
3510 		} else if (!strcmp(this_char,"gid")) {
3511 			if (remount)
3512 				continue;
3513 			gid = simple_strtoul(value, &rest, 0);
3514 			if (*rest)
3515 				goto bad_val;
3516 			sbinfo->gid = make_kgid(current_user_ns(), gid);
3517 			if (!gid_valid(sbinfo->gid))
3518 				goto bad_val;
3519 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3520 		} else if (!strcmp(this_char, "huge")) {
3521 			int huge;
3522 			huge = shmem_parse_huge(value);
3523 			if (huge < 0)
3524 				goto bad_val;
3525 			if (!has_transparent_hugepage() &&
3526 					huge != SHMEM_HUGE_NEVER)
3527 				goto bad_val;
3528 			sbinfo->huge = huge;
3529 #endif
3530 #ifdef CONFIG_NUMA
3531 		} else if (!strcmp(this_char,"mpol")) {
3532 			mpol_put(mpol);
3533 			mpol = NULL;
3534 			if (mpol_parse_str(value, &mpol))
3535 				goto bad_val;
3536 #endif
3537 		} else {
3538 			pr_err("tmpfs: Bad mount option %s\n", this_char);
3539 			goto error;
3540 		}
3541 	}
3542 	sbinfo->mpol = mpol;
3543 	return 0;
3544 
3545 bad_val:
3546 	pr_err("tmpfs: Bad value '%s' for mount option '%s'\n",
3547 	       value, this_char);
3548 error:
3549 	mpol_put(mpol);
3550 	return 1;
3551 
3552 }
3553 
3554 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
3555 {
3556 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3557 	struct shmem_sb_info config = *sbinfo;
3558 	unsigned long inodes;
3559 	int error = -EINVAL;
3560 
3561 	config.mpol = NULL;
3562 	if (shmem_parse_options(data, &config, true))
3563 		return error;
3564 
3565 	spin_lock(&sbinfo->stat_lock);
3566 	inodes = sbinfo->max_inodes - sbinfo->free_inodes;
3567 	if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
3568 		goto out;
3569 	if (config.max_inodes < inodes)
3570 		goto out;
3571 	/*
3572 	 * Those tests disallow limited->unlimited while any are in use;
3573 	 * but we must separately disallow unlimited->limited, because
3574 	 * in that case we have no record of how much is already in use.
3575 	 */
3576 	if (config.max_blocks && !sbinfo->max_blocks)
3577 		goto out;
3578 	if (config.max_inodes && !sbinfo->max_inodes)
3579 		goto out;
3580 
3581 	error = 0;
3582 	sbinfo->huge = config.huge;
3583 	sbinfo->max_blocks  = config.max_blocks;
3584 	sbinfo->max_inodes  = config.max_inodes;
3585 	sbinfo->free_inodes = config.max_inodes - inodes;
3586 
3587 	/*
3588 	 * Preserve previous mempolicy unless mpol remount option was specified.
3589 	 */
3590 	if (config.mpol) {
3591 		mpol_put(sbinfo->mpol);
3592 		sbinfo->mpol = config.mpol;	/* transfers initial ref */
3593 	}
3594 out:
3595 	spin_unlock(&sbinfo->stat_lock);
3596 	return error;
3597 }
3598 
3599 static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3600 {
3601 	struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3602 
3603 	if (sbinfo->max_blocks != shmem_default_max_blocks())
3604 		seq_printf(seq, ",size=%luk",
3605 			sbinfo->max_blocks << (PAGE_SHIFT - 10));
3606 	if (sbinfo->max_inodes != shmem_default_max_inodes())
3607 		seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
3608 	if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
3609 		seq_printf(seq, ",mode=%03ho", sbinfo->mode);
3610 	if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
3611 		seq_printf(seq, ",uid=%u",
3612 				from_kuid_munged(&init_user_ns, sbinfo->uid));
3613 	if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
3614 		seq_printf(seq, ",gid=%u",
3615 				from_kgid_munged(&init_user_ns, sbinfo->gid));
3616 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3617 	/* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
3618 	if (sbinfo->huge)
3619 		seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
3620 #endif
3621 	shmem_show_mpol(seq, sbinfo->mpol);
3622 	return 0;
3623 }
3624 
3625 #define MFD_NAME_PREFIX "memfd:"
3626 #define MFD_NAME_PREFIX_LEN (sizeof(MFD_NAME_PREFIX) - 1)
3627 #define MFD_NAME_MAX_LEN (NAME_MAX - MFD_NAME_PREFIX_LEN)
3628 
3629 #define MFD_ALL_FLAGS (MFD_CLOEXEC | MFD_ALLOW_SEALING)
3630 
3631 SYSCALL_DEFINE2(memfd_create,
3632 		const char __user *, uname,
3633 		unsigned int, flags)
3634 {
3635 	struct shmem_inode_info *info;
3636 	struct file *file;
3637 	int fd, error;
3638 	char *name;
3639 	long len;
3640 
3641 	if (flags & ~(unsigned int)MFD_ALL_FLAGS)
3642 		return -EINVAL;
3643 
3644 	/* length includes terminating zero */
3645 	len = strnlen_user(uname, MFD_NAME_MAX_LEN + 1);
3646 	if (len <= 0)
3647 		return -EFAULT;
3648 	if (len > MFD_NAME_MAX_LEN + 1)
3649 		return -EINVAL;
3650 
3651 	name = kmalloc(len + MFD_NAME_PREFIX_LEN, GFP_TEMPORARY);
3652 	if (!name)
3653 		return -ENOMEM;
3654 
3655 	strcpy(name, MFD_NAME_PREFIX);
3656 	if (copy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, len)) {
3657 		error = -EFAULT;
3658 		goto err_name;
3659 	}
3660 
3661 	/* terminating-zero may have changed after strnlen_user() returned */
3662 	if (name[len + MFD_NAME_PREFIX_LEN - 1]) {
3663 		error = -EFAULT;
3664 		goto err_name;
3665 	}
3666 
3667 	fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0);
3668 	if (fd < 0) {
3669 		error = fd;
3670 		goto err_name;
3671 	}
3672 
3673 	file = shmem_file_setup(name, 0, VM_NORESERVE);
3674 	if (IS_ERR(file)) {
3675 		error = PTR_ERR(file);
3676 		goto err_fd;
3677 	}
3678 	info = SHMEM_I(file_inode(file));
3679 	file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
3680 	file->f_flags |= O_RDWR | O_LARGEFILE;
3681 	if (flags & MFD_ALLOW_SEALING)
3682 		info->seals &= ~F_SEAL_SEAL;
3683 
3684 	fd_install(fd, file);
3685 	kfree(name);
3686 	return fd;
3687 
3688 err_fd:
3689 	put_unused_fd(fd);
3690 err_name:
3691 	kfree(name);
3692 	return error;
3693 }
3694 
3695 #endif /* CONFIG_TMPFS */
3696 
3697 static void shmem_put_super(struct super_block *sb)
3698 {
3699 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3700 
3701 	percpu_counter_destroy(&sbinfo->used_blocks);
3702 	mpol_put(sbinfo->mpol);
3703 	kfree(sbinfo);
3704 	sb->s_fs_info = NULL;
3705 }
3706 
3707 int shmem_fill_super(struct super_block *sb, void *data, int silent)
3708 {
3709 	struct inode *inode;
3710 	struct shmem_sb_info *sbinfo;
3711 	int err = -ENOMEM;
3712 
3713 	/* Round up to L1_CACHE_BYTES to resist false sharing */
3714 	sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3715 				L1_CACHE_BYTES), GFP_KERNEL);
3716 	if (!sbinfo)
3717 		return -ENOMEM;
3718 
3719 	sbinfo->mode = S_IRWXUGO | S_ISVTX;
3720 	sbinfo->uid = current_fsuid();
3721 	sbinfo->gid = current_fsgid();
3722 	sb->s_fs_info = sbinfo;
3723 
3724 #ifdef CONFIG_TMPFS
3725 	/*
3726 	 * Per default we only allow half of the physical ram per
3727 	 * tmpfs instance, limiting inodes to one per page of lowmem;
3728 	 * but the internal instance is left unlimited.
3729 	 */
3730 	if (!(sb->s_flags & MS_KERNMOUNT)) {
3731 		sbinfo->max_blocks = shmem_default_max_blocks();
3732 		sbinfo->max_inodes = shmem_default_max_inodes();
3733 		if (shmem_parse_options(data, sbinfo, false)) {
3734 			err = -EINVAL;
3735 			goto failed;
3736 		}
3737 	} else {
3738 		sb->s_flags |= MS_NOUSER;
3739 	}
3740 	sb->s_export_op = &shmem_export_ops;
3741 	sb->s_flags |= MS_NOSEC;
3742 #else
3743 	sb->s_flags |= MS_NOUSER;
3744 #endif
3745 
3746 	spin_lock_init(&sbinfo->stat_lock);
3747 	if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3748 		goto failed;
3749 	sbinfo->free_inodes = sbinfo->max_inodes;
3750 	spin_lock_init(&sbinfo->shrinklist_lock);
3751 	INIT_LIST_HEAD(&sbinfo->shrinklist);
3752 
3753 	sb->s_maxbytes = MAX_LFS_FILESIZE;
3754 	sb->s_blocksize = PAGE_SIZE;
3755 	sb->s_blocksize_bits = PAGE_SHIFT;
3756 	sb->s_magic = TMPFS_MAGIC;
3757 	sb->s_op = &shmem_ops;
3758 	sb->s_time_gran = 1;
3759 #ifdef CONFIG_TMPFS_XATTR
3760 	sb->s_xattr = shmem_xattr_handlers;
3761 #endif
3762 #ifdef CONFIG_TMPFS_POSIX_ACL
3763 	sb->s_flags |= MS_POSIXACL;
3764 #endif
3765 	uuid_gen(&sb->s_uuid);
3766 
3767 	inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
3768 	if (!inode)
3769 		goto failed;
3770 	inode->i_uid = sbinfo->uid;
3771 	inode->i_gid = sbinfo->gid;
3772 	sb->s_root = d_make_root(inode);
3773 	if (!sb->s_root)
3774 		goto failed;
3775 	return 0;
3776 
3777 failed:
3778 	shmem_put_super(sb);
3779 	return err;
3780 }
3781 
3782 static struct kmem_cache *shmem_inode_cachep;
3783 
3784 static struct inode *shmem_alloc_inode(struct super_block *sb)
3785 {
3786 	struct shmem_inode_info *info;
3787 	info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
3788 	if (!info)
3789 		return NULL;
3790 	return &info->vfs_inode;
3791 }
3792 
3793 static void shmem_destroy_callback(struct rcu_head *head)
3794 {
3795 	struct inode *inode = container_of(head, struct inode, i_rcu);
3796 	if (S_ISLNK(inode->i_mode))
3797 		kfree(inode->i_link);
3798 	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3799 }
3800 
3801 static void shmem_destroy_inode(struct inode *inode)
3802 {
3803 	if (S_ISREG(inode->i_mode))
3804 		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
3805 	call_rcu(&inode->i_rcu, shmem_destroy_callback);
3806 }
3807 
3808 static void shmem_init_inode(void *foo)
3809 {
3810 	struct shmem_inode_info *info = foo;
3811 	inode_init_once(&info->vfs_inode);
3812 }
3813 
3814 static int shmem_init_inodecache(void)
3815 {
3816 	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
3817 				sizeof(struct shmem_inode_info),
3818 				0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
3819 	return 0;
3820 }
3821 
3822 static void shmem_destroy_inodecache(void)
3823 {
3824 	kmem_cache_destroy(shmem_inode_cachep);
3825 }
3826 
3827 static const struct address_space_operations shmem_aops = {
3828 	.writepage	= shmem_writepage,
3829 	.set_page_dirty	= __set_page_dirty_no_writeback,
3830 #ifdef CONFIG_TMPFS
3831 	.write_begin	= shmem_write_begin,
3832 	.write_end	= shmem_write_end,
3833 #endif
3834 #ifdef CONFIG_MIGRATION
3835 	.migratepage	= migrate_page,
3836 #endif
3837 	.error_remove_page = generic_error_remove_page,
3838 };
3839 
3840 static const struct file_operations shmem_file_operations = {
3841 	.mmap		= shmem_mmap,
3842 	.get_unmapped_area = shmem_get_unmapped_area,
3843 #ifdef CONFIG_TMPFS
3844 	.llseek		= shmem_file_llseek,
3845 	.read_iter	= shmem_file_read_iter,
3846 	.write_iter	= generic_file_write_iter,
3847 	.fsync		= noop_fsync,
3848 	.splice_read	= generic_file_splice_read,
3849 	.splice_write	= iter_file_splice_write,
3850 	.fallocate	= shmem_fallocate,
3851 #endif
3852 };
3853 
3854 static const struct inode_operations shmem_inode_operations = {
3855 	.getattr	= shmem_getattr,
3856 	.setattr	= shmem_setattr,
3857 #ifdef CONFIG_TMPFS_XATTR
3858 	.listxattr	= shmem_listxattr,
3859 	.set_acl	= simple_set_acl,
3860 #endif
3861 };
3862 
3863 static const struct inode_operations shmem_dir_inode_operations = {
3864 #ifdef CONFIG_TMPFS
3865 	.create		= shmem_create,
3866 	.lookup		= simple_lookup,
3867 	.link		= shmem_link,
3868 	.unlink		= shmem_unlink,
3869 	.symlink	= shmem_symlink,
3870 	.mkdir		= shmem_mkdir,
3871 	.rmdir		= shmem_rmdir,
3872 	.mknod		= shmem_mknod,
3873 	.rename		= shmem_rename2,
3874 	.tmpfile	= shmem_tmpfile,
3875 #endif
3876 #ifdef CONFIG_TMPFS_XATTR
3877 	.listxattr	= shmem_listxattr,
3878 #endif
3879 #ifdef CONFIG_TMPFS_POSIX_ACL
3880 	.setattr	= shmem_setattr,
3881 	.set_acl	= simple_set_acl,
3882 #endif
3883 };
3884 
3885 static const struct inode_operations shmem_special_inode_operations = {
3886 #ifdef CONFIG_TMPFS_XATTR
3887 	.listxattr	= shmem_listxattr,
3888 #endif
3889 #ifdef CONFIG_TMPFS_POSIX_ACL
3890 	.setattr	= shmem_setattr,
3891 	.set_acl	= simple_set_acl,
3892 #endif
3893 };
3894 
3895 static const struct super_operations shmem_ops = {
3896 	.alloc_inode	= shmem_alloc_inode,
3897 	.destroy_inode	= shmem_destroy_inode,
3898 #ifdef CONFIG_TMPFS
3899 	.statfs		= shmem_statfs,
3900 	.remount_fs	= shmem_remount_fs,
3901 	.show_options	= shmem_show_options,
3902 #endif
3903 	.evict_inode	= shmem_evict_inode,
3904 	.drop_inode	= generic_delete_inode,
3905 	.put_super	= shmem_put_super,
3906 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3907 	.nr_cached_objects	= shmem_unused_huge_count,
3908 	.free_cached_objects	= shmem_unused_huge_scan,
3909 #endif
3910 };
3911 
3912 static const struct vm_operations_struct shmem_vm_ops = {
3913 	.fault		= shmem_fault,
3914 	.map_pages	= filemap_map_pages,
3915 #ifdef CONFIG_NUMA
3916 	.set_policy     = shmem_set_policy,
3917 	.get_policy     = shmem_get_policy,
3918 #endif
3919 };
3920 
3921 static struct dentry *shmem_mount(struct file_system_type *fs_type,
3922 	int flags, const char *dev_name, void *data)
3923 {
3924 	return mount_nodev(fs_type, flags, data, shmem_fill_super);
3925 }
3926 
3927 static struct file_system_type shmem_fs_type = {
3928 	.owner		= THIS_MODULE,
3929 	.name		= "tmpfs",
3930 	.mount		= shmem_mount,
3931 	.kill_sb	= kill_litter_super,
3932 	.fs_flags	= FS_USERNS_MOUNT,
3933 };
3934 
3935 int __init shmem_init(void)
3936 {
3937 	int error;
3938 
3939 	/* If rootfs called this, don't re-init */
3940 	if (shmem_inode_cachep)
3941 		return 0;
3942 
3943 	error = shmem_init_inodecache();
3944 	if (error)
3945 		goto out3;
3946 
3947 	error = register_filesystem(&shmem_fs_type);
3948 	if (error) {
3949 		pr_err("Could not register tmpfs\n");
3950 		goto out2;
3951 	}
3952 
3953 	shm_mnt = kern_mount(&shmem_fs_type);
3954 	if (IS_ERR(shm_mnt)) {
3955 		error = PTR_ERR(shm_mnt);
3956 		pr_err("Could not kern_mount tmpfs\n");
3957 		goto out1;
3958 	}
3959 
3960 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3961 	if (has_transparent_hugepage() && shmem_huge < SHMEM_HUGE_DENY)
3962 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3963 	else
3964 		shmem_huge = 0; /* just in case it was patched */
3965 #endif
3966 	return 0;
3967 
3968 out1:
3969 	unregister_filesystem(&shmem_fs_type);
3970 out2:
3971 	shmem_destroy_inodecache();
3972 out3:
3973 	shm_mnt = ERR_PTR(error);
3974 	return error;
3975 }
3976 
3977 #if defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && defined(CONFIG_SYSFS)
3978 static ssize_t shmem_enabled_show(struct kobject *kobj,
3979 		struct kobj_attribute *attr, char *buf)
3980 {
3981 	int values[] = {
3982 		SHMEM_HUGE_ALWAYS,
3983 		SHMEM_HUGE_WITHIN_SIZE,
3984 		SHMEM_HUGE_ADVISE,
3985 		SHMEM_HUGE_NEVER,
3986 		SHMEM_HUGE_DENY,
3987 		SHMEM_HUGE_FORCE,
3988 	};
3989 	int i, count;
3990 
3991 	for (i = 0, count = 0; i < ARRAY_SIZE(values); i++) {
3992 		const char *fmt = shmem_huge == values[i] ? "[%s] " : "%s ";
3993 
3994 		count += sprintf(buf + count, fmt,
3995 				shmem_format_huge(values[i]));
3996 	}
3997 	buf[count - 1] = '\n';
3998 	return count;
3999 }
4000 
4001 static ssize_t shmem_enabled_store(struct kobject *kobj,
4002 		struct kobj_attribute *attr, const char *buf, size_t count)
4003 {
4004 	char tmp[16];
4005 	int huge;
4006 
4007 	if (count + 1 > sizeof(tmp))
4008 		return -EINVAL;
4009 	memcpy(tmp, buf, count);
4010 	tmp[count] = '\0';
4011 	if (count && tmp[count - 1] == '\n')
4012 		tmp[count - 1] = '\0';
4013 
4014 	huge = shmem_parse_huge(tmp);
4015 	if (huge == -EINVAL)
4016 		return -EINVAL;
4017 	if (!has_transparent_hugepage() &&
4018 			huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
4019 		return -EINVAL;
4020 
4021 	shmem_huge = huge;
4022 	if (shmem_huge < SHMEM_HUGE_DENY)
4023 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
4024 	return count;
4025 }
4026 
4027 struct kobj_attribute shmem_enabled_attr =
4028 	__ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
4029 #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
4030 
4031 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
4032 bool shmem_huge_enabled(struct vm_area_struct *vma)
4033 {
4034 	struct inode *inode = file_inode(vma->vm_file);
4035 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
4036 	loff_t i_size;
4037 	pgoff_t off;
4038 
4039 	if (shmem_huge == SHMEM_HUGE_FORCE)
4040 		return true;
4041 	if (shmem_huge == SHMEM_HUGE_DENY)
4042 		return false;
4043 	switch (sbinfo->huge) {
4044 		case SHMEM_HUGE_NEVER:
4045 			return false;
4046 		case SHMEM_HUGE_ALWAYS:
4047 			return true;
4048 		case SHMEM_HUGE_WITHIN_SIZE:
4049 			off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
4050 			i_size = round_up(i_size_read(inode), PAGE_SIZE);
4051 			if (i_size >= HPAGE_PMD_SIZE &&
4052 					i_size >> PAGE_SHIFT >= off)
4053 				return true;
4054 		case SHMEM_HUGE_ADVISE:
4055 			/* TODO: implement fadvise() hints */
4056 			return (vma->vm_flags & VM_HUGEPAGE);
4057 		default:
4058 			VM_BUG_ON(1);
4059 			return false;
4060 	}
4061 }
4062 #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
4063 
4064 #else /* !CONFIG_SHMEM */
4065 
4066 /*
4067  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4068  *
4069  * This is intended for small system where the benefits of the full
4070  * shmem code (swap-backed and resource-limited) are outweighed by
4071  * their complexity. On systems without swap this code should be
4072  * effectively equivalent, but much lighter weight.
4073  */
4074 
4075 static struct file_system_type shmem_fs_type = {
4076 	.name		= "tmpfs",
4077 	.mount		= ramfs_mount,
4078 	.kill_sb	= kill_litter_super,
4079 	.fs_flags	= FS_USERNS_MOUNT,
4080 };
4081 
4082 int __init shmem_init(void)
4083 {
4084 	BUG_ON(register_filesystem(&shmem_fs_type) != 0);
4085 
4086 	shm_mnt = kern_mount(&shmem_fs_type);
4087 	BUG_ON(IS_ERR(shm_mnt));
4088 
4089 	return 0;
4090 }
4091 
4092 int shmem_unuse(swp_entry_t swap, struct page *page)
4093 {
4094 	return 0;
4095 }
4096 
4097 int shmem_lock(struct file *file, int lock, struct user_struct *user)
4098 {
4099 	return 0;
4100 }
4101 
4102 void shmem_unlock_mapping(struct address_space *mapping)
4103 {
4104 }
4105 
4106 #ifdef CONFIG_MMU
4107 unsigned long shmem_get_unmapped_area(struct file *file,
4108 				      unsigned long addr, unsigned long len,
4109 				      unsigned long pgoff, unsigned long flags)
4110 {
4111 	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4112 }
4113 #endif
4114 
4115 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
4116 {
4117 	truncate_inode_pages_range(inode->i_mapping, lstart, lend);
4118 }
4119 EXPORT_SYMBOL_GPL(shmem_truncate_range);
4120 
4121 #define shmem_vm_ops				generic_file_vm_ops
4122 #define shmem_file_operations			ramfs_file_operations
4123 #define shmem_get_inode(sb, dir, mode, dev, flags)	ramfs_get_inode(sb, dir, mode, dev)
4124 #define shmem_acct_size(flags, size)		0
4125 #define shmem_unacct_size(flags, size)		do {} while (0)
4126 
4127 #endif /* CONFIG_SHMEM */
4128 
4129 /* common code */
4130 
4131 static const struct dentry_operations anon_ops = {
4132 	.d_dname = simple_dname
4133 };
4134 
4135 static struct file *__shmem_file_setup(const char *name, loff_t size,
4136 				       unsigned long flags, unsigned int i_flags)
4137 {
4138 	struct file *res;
4139 	struct inode *inode;
4140 	struct path path;
4141 	struct super_block *sb;
4142 	struct qstr this;
4143 
4144 	if (IS_ERR(shm_mnt))
4145 		return ERR_CAST(shm_mnt);
4146 
4147 	if (size < 0 || size > MAX_LFS_FILESIZE)
4148 		return ERR_PTR(-EINVAL);
4149 
4150 	if (shmem_acct_size(flags, size))
4151 		return ERR_PTR(-ENOMEM);
4152 
4153 	res = ERR_PTR(-ENOMEM);
4154 	this.name = name;
4155 	this.len = strlen(name);
4156 	this.hash = 0; /* will go */
4157 	sb = shm_mnt->mnt_sb;
4158 	path.mnt = mntget(shm_mnt);
4159 	path.dentry = d_alloc_pseudo(sb, &this);
4160 	if (!path.dentry)
4161 		goto put_memory;
4162 	d_set_d_op(path.dentry, &anon_ops);
4163 
4164 	res = ERR_PTR(-ENOSPC);
4165 	inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
4166 	if (!inode)
4167 		goto put_memory;
4168 
4169 	inode->i_flags |= i_flags;
4170 	d_instantiate(path.dentry, inode);
4171 	inode->i_size = size;
4172 	clear_nlink(inode);	/* It is unlinked */
4173 	res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
4174 	if (IS_ERR(res))
4175 		goto put_path;
4176 
4177 	res = alloc_file(&path, FMODE_WRITE | FMODE_READ,
4178 		  &shmem_file_operations);
4179 	if (IS_ERR(res))
4180 		goto put_path;
4181 
4182 	return res;
4183 
4184 put_memory:
4185 	shmem_unacct_size(flags, size);
4186 put_path:
4187 	path_put(&path);
4188 	return res;
4189 }
4190 
4191 /**
4192  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4193  * 	kernel internal.  There will be NO LSM permission checks against the
4194  * 	underlying inode.  So users of this interface must do LSM checks at a
4195  *	higher layer.  The users are the big_key and shm implementations.  LSM
4196  *	checks are provided at the key or shm level rather than the inode.
4197  * @name: name for dentry (to be seen in /proc/<pid>/maps
4198  * @size: size to be set for the file
4199  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4200  */
4201 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4202 {
4203 	return __shmem_file_setup(name, size, flags, S_PRIVATE);
4204 }
4205 
4206 /**
4207  * shmem_file_setup - get an unlinked file living in tmpfs
4208  * @name: name for dentry (to be seen in /proc/<pid>/maps
4209  * @size: size to be set for the file
4210  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4211  */
4212 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4213 {
4214 	return __shmem_file_setup(name, size, flags, 0);
4215 }
4216 EXPORT_SYMBOL_GPL(shmem_file_setup);
4217 
4218 /**
4219  * shmem_zero_setup - setup a shared anonymous mapping
4220  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
4221  */
4222 int shmem_zero_setup(struct vm_area_struct *vma)
4223 {
4224 	struct file *file;
4225 	loff_t size = vma->vm_end - vma->vm_start;
4226 
4227 	/*
4228 	 * Cloning a new file under mmap_sem leads to a lock ordering conflict
4229 	 * between XFS directory reading and selinux: since this file is only
4230 	 * accessible to the user through its mapping, use S_PRIVATE flag to
4231 	 * bypass file security, in the same way as shmem_kernel_file_setup().
4232 	 */
4233 	file = __shmem_file_setup("dev/zero", size, vma->vm_flags, S_PRIVATE);
4234 	if (IS_ERR(file))
4235 		return PTR_ERR(file);
4236 
4237 	if (vma->vm_file)
4238 		fput(vma->vm_file);
4239 	vma->vm_file = file;
4240 	vma->vm_ops = &shmem_vm_ops;
4241 
4242 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
4243 			((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
4244 			(vma->vm_end & HPAGE_PMD_MASK)) {
4245 		khugepaged_enter(vma, vma->vm_flags);
4246 	}
4247 
4248 	return 0;
4249 }
4250 
4251 /**
4252  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4253  * @mapping:	the page's address_space
4254  * @index:	the page index
4255  * @gfp:	the page allocator flags to use if allocating
4256  *
4257  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4258  * with any new page allocations done using the specified allocation flags.
4259  * But read_cache_page_gfp() uses the ->readpage() method: which does not
4260  * suit tmpfs, since it may have pages in swapcache, and needs to find those
4261  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4262  *
4263  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
4264  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4265  */
4266 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4267 					 pgoff_t index, gfp_t gfp)
4268 {
4269 #ifdef CONFIG_SHMEM
4270 	struct inode *inode = mapping->host;
4271 	struct page *page;
4272 	int error;
4273 
4274 	BUG_ON(mapping->a_ops != &shmem_aops);
4275 	error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
4276 				  gfp, NULL, NULL, NULL);
4277 	if (error)
4278 		page = ERR_PTR(error);
4279 	else
4280 		unlock_page(page);
4281 	return page;
4282 #else
4283 	/*
4284 	 * The tiny !SHMEM case uses ramfs without swap
4285 	 */
4286 	return read_cache_page_gfp(mapping, index, gfp);
4287 #endif
4288 }
4289 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
4290