xref: /linux/mm/shmem.c (revision 4a5df37964673effcd9f84041f7423206a5ae5f2)
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *		 2000 Transmeta Corp.
6  *		 2000-2001 Christoph Rohland
7  *		 2000-2001 SAP AG
8  *		 2002 Red Hat Inc.
9  * Copyright (C) 2002-2011 Hugh Dickins.
10  * Copyright (C) 2011 Google Inc.
11  * Copyright (C) 2002-2005 VERITAS Software Corporation.
12  * Copyright (C) 2004 Andi Kleen, SuSE Labs
13  *
14  * Extended attribute support for tmpfs:
15  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17  *
18  * tiny-shmem:
19  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20  *
21  * This file is released under the GPL.
22  */
23 
24 #include <linux/fs.h>
25 #include <linux/init.h>
26 #include <linux/vfs.h>
27 #include <linux/mount.h>
28 #include <linux/ramfs.h>
29 #include <linux/pagemap.h>
30 #include <linux/file.h>
31 #include <linux/fileattr.h>
32 #include <linux/mm.h>
33 #include <linux/random.h>
34 #include <linux/sched/signal.h>
35 #include <linux/export.h>
36 #include <linux/shmem_fs.h>
37 #include <linux/swap.h>
38 #include <linux/uio.h>
39 #include <linux/hugetlb.h>
40 #include <linux/fs_parser.h>
41 #include <linux/swapfile.h>
42 #include <linux/iversion.h>
43 #include "swap.h"
44 
45 static struct vfsmount *shm_mnt __ro_after_init;
46 
47 #ifdef CONFIG_SHMEM
48 /*
49  * This virtual memory filesystem is heavily based on the ramfs. It
50  * extends ramfs by the ability to use swap and honor resource limits
51  * which makes it a completely usable filesystem.
52  */
53 
54 #include <linux/xattr.h>
55 #include <linux/exportfs.h>
56 #include <linux/posix_acl.h>
57 #include <linux/posix_acl_xattr.h>
58 #include <linux/mman.h>
59 #include <linux/string.h>
60 #include <linux/slab.h>
61 #include <linux/backing-dev.h>
62 #include <linux/writeback.h>
63 #include <linux/pagevec.h>
64 #include <linux/percpu_counter.h>
65 #include <linux/falloc.h>
66 #include <linux/splice.h>
67 #include <linux/security.h>
68 #include <linux/swapops.h>
69 #include <linux/mempolicy.h>
70 #include <linux/namei.h>
71 #include <linux/ctype.h>
72 #include <linux/migrate.h>
73 #include <linux/highmem.h>
74 #include <linux/seq_file.h>
75 #include <linux/magic.h>
76 #include <linux/syscalls.h>
77 #include <linux/fcntl.h>
78 #include <uapi/linux/memfd.h>
79 #include <linux/rmap.h>
80 #include <linux/uuid.h>
81 #include <linux/quotaops.h>
82 #include <linux/rcupdate_wait.h>
83 
84 #include <linux/uaccess.h>
85 
86 #include "internal.h"
87 
88 #define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
89 #define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
90 
91 /* Pretend that each entry is of this size in directory's i_size */
92 #define BOGO_DIRENT_SIZE 20
93 
94 /* Pretend that one inode + its dentry occupy this much memory */
95 #define BOGO_INODE_SIZE 1024
96 
97 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
98 #define SHORT_SYMLINK_LEN 128
99 
100 /*
101  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
102  * inode->i_private (with i_rwsem making sure that it has only one user at
103  * a time): we would prefer not to enlarge the shmem inode just for that.
104  */
105 struct shmem_falloc {
106 	wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
107 	pgoff_t start;		/* start of range currently being fallocated */
108 	pgoff_t next;		/* the next page offset to be fallocated */
109 	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
110 	pgoff_t nr_unswapped;	/* how often writepage refused to swap out */
111 };
112 
113 struct shmem_options {
114 	unsigned long long blocks;
115 	unsigned long long inodes;
116 	struct mempolicy *mpol;
117 	kuid_t uid;
118 	kgid_t gid;
119 	umode_t mode;
120 	bool full_inums;
121 	int huge;
122 	int seen;
123 	bool noswap;
124 	unsigned short quota_types;
125 	struct shmem_quota_limits qlimits;
126 #define SHMEM_SEEN_BLOCKS 1
127 #define SHMEM_SEEN_INODES 2
128 #define SHMEM_SEEN_HUGE 4
129 #define SHMEM_SEEN_INUMS 8
130 #define SHMEM_SEEN_NOSWAP 16
131 #define SHMEM_SEEN_QUOTA 32
132 };
133 
134 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
135 static unsigned long huge_shmem_orders_always __read_mostly;
136 static unsigned long huge_shmem_orders_madvise __read_mostly;
137 static unsigned long huge_shmem_orders_inherit __read_mostly;
138 static unsigned long huge_shmem_orders_within_size __read_mostly;
139 #endif
140 
141 #ifdef CONFIG_TMPFS
shmem_default_max_blocks(void)142 static unsigned long shmem_default_max_blocks(void)
143 {
144 	return totalram_pages() / 2;
145 }
146 
shmem_default_max_inodes(void)147 static unsigned long shmem_default_max_inodes(void)
148 {
149 	unsigned long nr_pages = totalram_pages();
150 
151 	return min3(nr_pages - totalhigh_pages(), nr_pages / 2,
152 			ULONG_MAX / BOGO_INODE_SIZE);
153 }
154 #endif
155 
156 static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
157 			struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
158 			struct vm_area_struct *vma, vm_fault_t *fault_type);
159 
SHMEM_SB(struct super_block * sb)160 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
161 {
162 	return sb->s_fs_info;
163 }
164 
165 /*
166  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
167  * for shared memory and for shared anonymous (/dev/zero) mappings
168  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
169  * consistent with the pre-accounting of private mappings ...
170  */
shmem_acct_size(unsigned long flags,loff_t size)171 static inline int shmem_acct_size(unsigned long flags, loff_t size)
172 {
173 	return (flags & VM_NORESERVE) ?
174 		0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
175 }
176 
shmem_unacct_size(unsigned long flags,loff_t size)177 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
178 {
179 	if (!(flags & VM_NORESERVE))
180 		vm_unacct_memory(VM_ACCT(size));
181 }
182 
shmem_reacct_size(unsigned long flags,loff_t oldsize,loff_t newsize)183 static inline int shmem_reacct_size(unsigned long flags,
184 		loff_t oldsize, loff_t newsize)
185 {
186 	if (!(flags & VM_NORESERVE)) {
187 		if (VM_ACCT(newsize) > VM_ACCT(oldsize))
188 			return security_vm_enough_memory_mm(current->mm,
189 					VM_ACCT(newsize) - VM_ACCT(oldsize));
190 		else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
191 			vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
192 	}
193 	return 0;
194 }
195 
196 /*
197  * ... whereas tmpfs objects are accounted incrementally as
198  * pages are allocated, in order to allow large sparse files.
199  * shmem_get_folio reports shmem_acct_blocks failure as -ENOSPC not -ENOMEM,
200  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
201  */
shmem_acct_blocks(unsigned long flags,long pages)202 static inline int shmem_acct_blocks(unsigned long flags, long pages)
203 {
204 	if (!(flags & VM_NORESERVE))
205 		return 0;
206 
207 	return security_vm_enough_memory_mm(current->mm,
208 			pages * VM_ACCT(PAGE_SIZE));
209 }
210 
shmem_unacct_blocks(unsigned long flags,long pages)211 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
212 {
213 	if (flags & VM_NORESERVE)
214 		vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
215 }
216 
shmem_inode_acct_blocks(struct inode * inode,long pages)217 static int shmem_inode_acct_blocks(struct inode *inode, long pages)
218 {
219 	struct shmem_inode_info *info = SHMEM_I(inode);
220 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
221 	int err = -ENOSPC;
222 
223 	if (shmem_acct_blocks(info->flags, pages))
224 		return err;
225 
226 	might_sleep();	/* when quotas */
227 	if (sbinfo->max_blocks) {
228 		if (!percpu_counter_limited_add(&sbinfo->used_blocks,
229 						sbinfo->max_blocks, pages))
230 			goto unacct;
231 
232 		err = dquot_alloc_block_nodirty(inode, pages);
233 		if (err) {
234 			percpu_counter_sub(&sbinfo->used_blocks, pages);
235 			goto unacct;
236 		}
237 	} else {
238 		err = dquot_alloc_block_nodirty(inode, pages);
239 		if (err)
240 			goto unacct;
241 	}
242 
243 	return 0;
244 
245 unacct:
246 	shmem_unacct_blocks(info->flags, pages);
247 	return err;
248 }
249 
shmem_inode_unacct_blocks(struct inode * inode,long pages)250 static void shmem_inode_unacct_blocks(struct inode *inode, long pages)
251 {
252 	struct shmem_inode_info *info = SHMEM_I(inode);
253 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
254 
255 	might_sleep();	/* when quotas */
256 	dquot_free_block_nodirty(inode, pages);
257 
258 	if (sbinfo->max_blocks)
259 		percpu_counter_sub(&sbinfo->used_blocks, pages);
260 	shmem_unacct_blocks(info->flags, pages);
261 }
262 
263 static const struct super_operations shmem_ops;
264 static const struct address_space_operations shmem_aops;
265 static const struct file_operations shmem_file_operations;
266 static const struct inode_operations shmem_inode_operations;
267 static const struct inode_operations shmem_dir_inode_operations;
268 static const struct inode_operations shmem_special_inode_operations;
269 static const struct vm_operations_struct shmem_vm_ops;
270 static const struct vm_operations_struct shmem_anon_vm_ops;
271 static struct file_system_type shmem_fs_type;
272 
shmem_mapping(struct address_space * mapping)273 bool shmem_mapping(struct address_space *mapping)
274 {
275 	return mapping->a_ops == &shmem_aops;
276 }
277 EXPORT_SYMBOL_GPL(shmem_mapping);
278 
vma_is_anon_shmem(struct vm_area_struct * vma)279 bool vma_is_anon_shmem(struct vm_area_struct *vma)
280 {
281 	return vma->vm_ops == &shmem_anon_vm_ops;
282 }
283 
vma_is_shmem(struct vm_area_struct * vma)284 bool vma_is_shmem(struct vm_area_struct *vma)
285 {
286 	return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops;
287 }
288 
289 static LIST_HEAD(shmem_swaplist);
290 static DEFINE_MUTEX(shmem_swaplist_mutex);
291 
292 #ifdef CONFIG_TMPFS_QUOTA
293 
shmem_enable_quotas(struct super_block * sb,unsigned short quota_types)294 static int shmem_enable_quotas(struct super_block *sb,
295 			       unsigned short quota_types)
296 {
297 	int type, err = 0;
298 
299 	sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
300 	for (type = 0; type < SHMEM_MAXQUOTAS; type++) {
301 		if (!(quota_types & (1 << type)))
302 			continue;
303 		err = dquot_load_quota_sb(sb, type, QFMT_SHMEM,
304 					  DQUOT_USAGE_ENABLED |
305 					  DQUOT_LIMITS_ENABLED);
306 		if (err)
307 			goto out_err;
308 	}
309 	return 0;
310 
311 out_err:
312 	pr_warn("tmpfs: failed to enable quota tracking (type=%d, err=%d)\n",
313 		type, err);
314 	for (type--; type >= 0; type--)
315 		dquot_quota_off(sb, type);
316 	return err;
317 }
318 
shmem_disable_quotas(struct super_block * sb)319 static void shmem_disable_quotas(struct super_block *sb)
320 {
321 	int type;
322 
323 	for (type = 0; type < SHMEM_MAXQUOTAS; type++)
324 		dquot_quota_off(sb, type);
325 }
326 
shmem_get_dquots(struct inode * inode)327 static struct dquot __rcu **shmem_get_dquots(struct inode *inode)
328 {
329 	return SHMEM_I(inode)->i_dquot;
330 }
331 #endif /* CONFIG_TMPFS_QUOTA */
332 
333 /*
334  * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
335  * produces a novel ino for the newly allocated inode.
336  *
337  * It may also be called when making a hard link to permit the space needed by
338  * each dentry. However, in that case, no new inode number is needed since that
339  * internally draws from another pool of inode numbers (currently global
340  * get_next_ino()). This case is indicated by passing NULL as inop.
341  */
342 #define SHMEM_INO_BATCH 1024
shmem_reserve_inode(struct super_block * sb,ino_t * inop)343 static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
344 {
345 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
346 	ino_t ino;
347 
348 	if (!(sb->s_flags & SB_KERNMOUNT)) {
349 		raw_spin_lock(&sbinfo->stat_lock);
350 		if (sbinfo->max_inodes) {
351 			if (sbinfo->free_ispace < BOGO_INODE_SIZE) {
352 				raw_spin_unlock(&sbinfo->stat_lock);
353 				return -ENOSPC;
354 			}
355 			sbinfo->free_ispace -= BOGO_INODE_SIZE;
356 		}
357 		if (inop) {
358 			ino = sbinfo->next_ino++;
359 			if (unlikely(is_zero_ino(ino)))
360 				ino = sbinfo->next_ino++;
361 			if (unlikely(!sbinfo->full_inums &&
362 				     ino > UINT_MAX)) {
363 				/*
364 				 * Emulate get_next_ino uint wraparound for
365 				 * compatibility
366 				 */
367 				if (IS_ENABLED(CONFIG_64BIT))
368 					pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
369 						__func__, MINOR(sb->s_dev));
370 				sbinfo->next_ino = 1;
371 				ino = sbinfo->next_ino++;
372 			}
373 			*inop = ino;
374 		}
375 		raw_spin_unlock(&sbinfo->stat_lock);
376 	} else if (inop) {
377 		/*
378 		 * __shmem_file_setup, one of our callers, is lock-free: it
379 		 * doesn't hold stat_lock in shmem_reserve_inode since
380 		 * max_inodes is always 0, and is called from potentially
381 		 * unknown contexts. As such, use a per-cpu batched allocator
382 		 * which doesn't require the per-sb stat_lock unless we are at
383 		 * the batch boundary.
384 		 *
385 		 * We don't need to worry about inode{32,64} since SB_KERNMOUNT
386 		 * shmem mounts are not exposed to userspace, so we don't need
387 		 * to worry about things like glibc compatibility.
388 		 */
389 		ino_t *next_ino;
390 
391 		next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
392 		ino = *next_ino;
393 		if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
394 			raw_spin_lock(&sbinfo->stat_lock);
395 			ino = sbinfo->next_ino;
396 			sbinfo->next_ino += SHMEM_INO_BATCH;
397 			raw_spin_unlock(&sbinfo->stat_lock);
398 			if (unlikely(is_zero_ino(ino)))
399 				ino++;
400 		}
401 		*inop = ino;
402 		*next_ino = ++ino;
403 		put_cpu();
404 	}
405 
406 	return 0;
407 }
408 
shmem_free_inode(struct super_block * sb,size_t freed_ispace)409 static void shmem_free_inode(struct super_block *sb, size_t freed_ispace)
410 {
411 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
412 	if (sbinfo->max_inodes) {
413 		raw_spin_lock(&sbinfo->stat_lock);
414 		sbinfo->free_ispace += BOGO_INODE_SIZE + freed_ispace;
415 		raw_spin_unlock(&sbinfo->stat_lock);
416 	}
417 }
418 
419 /**
420  * shmem_recalc_inode - recalculate the block usage of an inode
421  * @inode: inode to recalc
422  * @alloced: the change in number of pages allocated to inode
423  * @swapped: the change in number of pages swapped from inode
424  *
425  * We have to calculate the free blocks since the mm can drop
426  * undirtied hole pages behind our back.
427  *
428  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
429  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
430  */
shmem_recalc_inode(struct inode * inode,long alloced,long swapped)431 static void shmem_recalc_inode(struct inode *inode, long alloced, long swapped)
432 {
433 	struct shmem_inode_info *info = SHMEM_I(inode);
434 	long freed;
435 
436 	spin_lock(&info->lock);
437 	info->alloced += alloced;
438 	info->swapped += swapped;
439 	freed = info->alloced - info->swapped -
440 		READ_ONCE(inode->i_mapping->nrpages);
441 	/*
442 	 * Special case: whereas normally shmem_recalc_inode() is called
443 	 * after i_mapping->nrpages has already been adjusted (up or down),
444 	 * shmem_writepage() has to raise swapped before nrpages is lowered -
445 	 * to stop a racing shmem_recalc_inode() from thinking that a page has
446 	 * been freed.  Compensate here, to avoid the need for a followup call.
447 	 */
448 	if (swapped > 0)
449 		freed += swapped;
450 	if (freed > 0)
451 		info->alloced -= freed;
452 	spin_unlock(&info->lock);
453 
454 	/* The quota case may block */
455 	if (freed > 0)
456 		shmem_inode_unacct_blocks(inode, freed);
457 }
458 
shmem_charge(struct inode * inode,long pages)459 bool shmem_charge(struct inode *inode, long pages)
460 {
461 	struct address_space *mapping = inode->i_mapping;
462 
463 	if (shmem_inode_acct_blocks(inode, pages))
464 		return false;
465 
466 	/* nrpages adjustment first, then shmem_recalc_inode() when balanced */
467 	xa_lock_irq(&mapping->i_pages);
468 	mapping->nrpages += pages;
469 	xa_unlock_irq(&mapping->i_pages);
470 
471 	shmem_recalc_inode(inode, pages, 0);
472 	return true;
473 }
474 
shmem_uncharge(struct inode * inode,long pages)475 void shmem_uncharge(struct inode *inode, long pages)
476 {
477 	/* pages argument is currently unused: keep it to help debugging */
478 	/* nrpages adjustment done by __filemap_remove_folio() or caller */
479 
480 	shmem_recalc_inode(inode, 0, 0);
481 }
482 
483 /*
484  * Replace item expected in xarray by a new item, while holding xa_lock.
485  */
shmem_replace_entry(struct address_space * mapping,pgoff_t index,void * expected,void * replacement)486 static int shmem_replace_entry(struct address_space *mapping,
487 			pgoff_t index, void *expected, void *replacement)
488 {
489 	XA_STATE(xas, &mapping->i_pages, index);
490 	void *item;
491 
492 	VM_BUG_ON(!expected);
493 	VM_BUG_ON(!replacement);
494 	item = xas_load(&xas);
495 	if (item != expected)
496 		return -ENOENT;
497 	xas_store(&xas, replacement);
498 	return 0;
499 }
500 
501 /*
502  * Sometimes, before we decide whether to proceed or to fail, we must check
503  * that an entry was not already brought back from swap by a racing thread.
504  *
505  * Checking folio is not enough: by the time a swapcache folio is locked, it
506  * might be reused, and again be swapcache, using the same swap as before.
507  */
shmem_confirm_swap(struct address_space * mapping,pgoff_t index,swp_entry_t swap)508 static bool shmem_confirm_swap(struct address_space *mapping,
509 			       pgoff_t index, swp_entry_t swap)
510 {
511 	return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
512 }
513 
514 /*
515  * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
516  *
517  * SHMEM_HUGE_NEVER:
518  *	disables huge pages for the mount;
519  * SHMEM_HUGE_ALWAYS:
520  *	enables huge pages for the mount;
521  * SHMEM_HUGE_WITHIN_SIZE:
522  *	only allocate huge pages if the page will be fully within i_size,
523  *	also respect fadvise()/madvise() hints;
524  * SHMEM_HUGE_ADVISE:
525  *	only allocate huge pages if requested with fadvise()/madvise();
526  */
527 
528 #define SHMEM_HUGE_NEVER	0
529 #define SHMEM_HUGE_ALWAYS	1
530 #define SHMEM_HUGE_WITHIN_SIZE	2
531 #define SHMEM_HUGE_ADVISE	3
532 
533 /*
534  * Special values.
535  * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
536  *
537  * SHMEM_HUGE_DENY:
538  *	disables huge on shm_mnt and all mounts, for emergency use;
539  * SHMEM_HUGE_FORCE:
540  *	enables huge on shm_mnt and all mounts, w/o needing option, for testing;
541  *
542  */
543 #define SHMEM_HUGE_DENY		(-1)
544 #define SHMEM_HUGE_FORCE	(-2)
545 
546 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
547 /* ifdef here to avoid bloating shmem.o when not necessary */
548 
549 static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
550 
__shmem_huge_global_enabled(struct inode * inode,pgoff_t index,loff_t write_end,bool shmem_huge_force,struct vm_area_struct * vma,unsigned long vm_flags)551 static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
552 					loff_t write_end, bool shmem_huge_force,
553 					struct vm_area_struct *vma,
554 					unsigned long vm_flags)
555 {
556 	struct mm_struct *mm = vma ? vma->vm_mm : NULL;
557 	loff_t i_size;
558 
559 	if (!S_ISREG(inode->i_mode))
560 		return false;
561 	if (mm && ((vm_flags & VM_NOHUGEPAGE) || test_bit(MMF_DISABLE_THP, &mm->flags)))
562 		return false;
563 	if (shmem_huge == SHMEM_HUGE_DENY)
564 		return false;
565 	if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
566 		return true;
567 
568 	switch (SHMEM_SB(inode->i_sb)->huge) {
569 	case SHMEM_HUGE_ALWAYS:
570 		return true;
571 	case SHMEM_HUGE_WITHIN_SIZE:
572 		index = round_up(index + 1, HPAGE_PMD_NR);
573 		i_size = max(write_end, i_size_read(inode));
574 		i_size = round_up(i_size, PAGE_SIZE);
575 		if (i_size >> PAGE_SHIFT >= index)
576 			return true;
577 		fallthrough;
578 	case SHMEM_HUGE_ADVISE:
579 		if (mm && (vm_flags & VM_HUGEPAGE))
580 			return true;
581 		fallthrough;
582 	default:
583 		return false;
584 	}
585 }
586 
shmem_huge_global_enabled(struct inode * inode,pgoff_t index,loff_t write_end,bool shmem_huge_force,struct vm_area_struct * vma,unsigned long vm_flags)587 static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
588 		   loff_t write_end, bool shmem_huge_force,
589 		   struct vm_area_struct *vma, unsigned long vm_flags)
590 {
591 	if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
592 		return false;
593 
594 	return __shmem_huge_global_enabled(inode, index, write_end,
595 					   shmem_huge_force, vma, vm_flags);
596 }
597 
598 #if defined(CONFIG_SYSFS)
shmem_parse_huge(const char * str)599 static int shmem_parse_huge(const char *str)
600 {
601 	if (!strcmp(str, "never"))
602 		return SHMEM_HUGE_NEVER;
603 	if (!strcmp(str, "always"))
604 		return SHMEM_HUGE_ALWAYS;
605 	if (!strcmp(str, "within_size"))
606 		return SHMEM_HUGE_WITHIN_SIZE;
607 	if (!strcmp(str, "advise"))
608 		return SHMEM_HUGE_ADVISE;
609 	if (!strcmp(str, "deny"))
610 		return SHMEM_HUGE_DENY;
611 	if (!strcmp(str, "force"))
612 		return SHMEM_HUGE_FORCE;
613 	return -EINVAL;
614 }
615 #endif
616 
617 #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
shmem_format_huge(int huge)618 static const char *shmem_format_huge(int huge)
619 {
620 	switch (huge) {
621 	case SHMEM_HUGE_NEVER:
622 		return "never";
623 	case SHMEM_HUGE_ALWAYS:
624 		return "always";
625 	case SHMEM_HUGE_WITHIN_SIZE:
626 		return "within_size";
627 	case SHMEM_HUGE_ADVISE:
628 		return "advise";
629 	case SHMEM_HUGE_DENY:
630 		return "deny";
631 	case SHMEM_HUGE_FORCE:
632 		return "force";
633 	default:
634 		VM_BUG_ON(1);
635 		return "bad_val";
636 	}
637 }
638 #endif
639 
shmem_unused_huge_shrink(struct shmem_sb_info * sbinfo,struct shrink_control * sc,unsigned long nr_to_free)640 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
641 		struct shrink_control *sc, unsigned long nr_to_free)
642 {
643 	LIST_HEAD(list), *pos, *next;
644 	struct inode *inode;
645 	struct shmem_inode_info *info;
646 	struct folio *folio;
647 	unsigned long batch = sc ? sc->nr_to_scan : 128;
648 	unsigned long split = 0, freed = 0;
649 
650 	if (list_empty(&sbinfo->shrinklist))
651 		return SHRINK_STOP;
652 
653 	spin_lock(&sbinfo->shrinklist_lock);
654 	list_for_each_safe(pos, next, &sbinfo->shrinklist) {
655 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
656 
657 		/* pin the inode */
658 		inode = igrab(&info->vfs_inode);
659 
660 		/* inode is about to be evicted */
661 		if (!inode) {
662 			list_del_init(&info->shrinklist);
663 			goto next;
664 		}
665 
666 		list_move(&info->shrinklist, &list);
667 next:
668 		sbinfo->shrinklist_len--;
669 		if (!--batch)
670 			break;
671 	}
672 	spin_unlock(&sbinfo->shrinklist_lock);
673 
674 	list_for_each_safe(pos, next, &list) {
675 		pgoff_t next, end;
676 		loff_t i_size;
677 		int ret;
678 
679 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
680 		inode = &info->vfs_inode;
681 
682 		if (nr_to_free && freed >= nr_to_free)
683 			goto move_back;
684 
685 		i_size = i_size_read(inode);
686 		folio = filemap_get_entry(inode->i_mapping, i_size / PAGE_SIZE);
687 		if (!folio || xa_is_value(folio))
688 			goto drop;
689 
690 		/* No large folio at the end of the file: nothing to split */
691 		if (!folio_test_large(folio)) {
692 			folio_put(folio);
693 			goto drop;
694 		}
695 
696 		/* Check if there is anything to gain from splitting */
697 		next = folio_next_index(folio);
698 		end = shmem_fallocend(inode, DIV_ROUND_UP(i_size, PAGE_SIZE));
699 		if (end <= folio->index || end >= next) {
700 			folio_put(folio);
701 			goto drop;
702 		}
703 
704 		/*
705 		 * Move the inode on the list back to shrinklist if we failed
706 		 * to lock the page at this time.
707 		 *
708 		 * Waiting for the lock may lead to deadlock in the
709 		 * reclaim path.
710 		 */
711 		if (!folio_trylock(folio)) {
712 			folio_put(folio);
713 			goto move_back;
714 		}
715 
716 		ret = split_folio(folio);
717 		folio_unlock(folio);
718 		folio_put(folio);
719 
720 		/* If split failed move the inode on the list back to shrinklist */
721 		if (ret)
722 			goto move_back;
723 
724 		freed += next - end;
725 		split++;
726 drop:
727 		list_del_init(&info->shrinklist);
728 		goto put;
729 move_back:
730 		/*
731 		 * Make sure the inode is either on the global list or deleted
732 		 * from any local list before iput() since it could be deleted
733 		 * in another thread once we put the inode (then the local list
734 		 * is corrupted).
735 		 */
736 		spin_lock(&sbinfo->shrinklist_lock);
737 		list_move(&info->shrinklist, &sbinfo->shrinklist);
738 		sbinfo->shrinklist_len++;
739 		spin_unlock(&sbinfo->shrinklist_lock);
740 put:
741 		iput(inode);
742 	}
743 
744 	return split;
745 }
746 
shmem_unused_huge_scan(struct super_block * sb,struct shrink_control * sc)747 static long shmem_unused_huge_scan(struct super_block *sb,
748 		struct shrink_control *sc)
749 {
750 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
751 
752 	if (!READ_ONCE(sbinfo->shrinklist_len))
753 		return SHRINK_STOP;
754 
755 	return shmem_unused_huge_shrink(sbinfo, sc, 0);
756 }
757 
shmem_unused_huge_count(struct super_block * sb,struct shrink_control * sc)758 static long shmem_unused_huge_count(struct super_block *sb,
759 		struct shrink_control *sc)
760 {
761 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
762 	return READ_ONCE(sbinfo->shrinklist_len);
763 }
764 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
765 
766 #define shmem_huge SHMEM_HUGE_DENY
767 
shmem_unused_huge_shrink(struct shmem_sb_info * sbinfo,struct shrink_control * sc,unsigned long nr_to_free)768 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
769 		struct shrink_control *sc, unsigned long nr_to_free)
770 {
771 	return 0;
772 }
773 
shmem_huge_global_enabled(struct inode * inode,pgoff_t index,loff_t write_end,bool shmem_huge_force,struct vm_area_struct * vma,unsigned long vm_flags)774 static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
775 		loff_t write_end, bool shmem_huge_force,
776 		struct vm_area_struct *vma, unsigned long vm_flags)
777 {
778 	return false;
779 }
780 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
781 
782 /*
783  * Somewhat like filemap_add_folio, but error if expected item has gone.
784  */
shmem_add_to_page_cache(struct folio * folio,struct address_space * mapping,pgoff_t index,void * expected,gfp_t gfp)785 static int shmem_add_to_page_cache(struct folio *folio,
786 				   struct address_space *mapping,
787 				   pgoff_t index, void *expected, gfp_t gfp)
788 {
789 	XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
790 	long nr = folio_nr_pages(folio);
791 
792 	VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
793 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
794 	VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
795 
796 	folio_ref_add(folio, nr);
797 	folio->mapping = mapping;
798 	folio->index = index;
799 
800 	gfp &= GFP_RECLAIM_MASK;
801 	folio_throttle_swaprate(folio, gfp);
802 
803 	do {
804 		xas_lock_irq(&xas);
805 		if (expected != xas_find_conflict(&xas)) {
806 			xas_set_err(&xas, -EEXIST);
807 			goto unlock;
808 		}
809 		if (expected && xas_find_conflict(&xas)) {
810 			xas_set_err(&xas, -EEXIST);
811 			goto unlock;
812 		}
813 		xas_store(&xas, folio);
814 		if (xas_error(&xas))
815 			goto unlock;
816 		if (folio_test_pmd_mappable(folio))
817 			__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
818 		__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
819 		__lruvec_stat_mod_folio(folio, NR_SHMEM, nr);
820 		mapping->nrpages += nr;
821 unlock:
822 		xas_unlock_irq(&xas);
823 	} while (xas_nomem(&xas, gfp));
824 
825 	if (xas_error(&xas)) {
826 		folio->mapping = NULL;
827 		folio_ref_sub(folio, nr);
828 		return xas_error(&xas);
829 	}
830 
831 	return 0;
832 }
833 
834 /*
835  * Somewhat like filemap_remove_folio, but substitutes swap for @folio.
836  */
shmem_delete_from_page_cache(struct folio * folio,void * radswap)837 static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
838 {
839 	struct address_space *mapping = folio->mapping;
840 	long nr = folio_nr_pages(folio);
841 	int error;
842 
843 	xa_lock_irq(&mapping->i_pages);
844 	error = shmem_replace_entry(mapping, folio->index, folio, radswap);
845 	folio->mapping = NULL;
846 	mapping->nrpages -= nr;
847 	__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
848 	__lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
849 	xa_unlock_irq(&mapping->i_pages);
850 	folio_put_refs(folio, nr);
851 	BUG_ON(error);
852 }
853 
854 /*
855  * Remove swap entry from page cache, free the swap and its page cache. Returns
856  * the number of pages being freed. 0 means entry not found in XArray (0 pages
857  * being freed).
858  */
shmem_free_swap(struct address_space * mapping,pgoff_t index,void * radswap)859 static long shmem_free_swap(struct address_space *mapping,
860 			    pgoff_t index, void *radswap)
861 {
862 	int order = xa_get_order(&mapping->i_pages, index);
863 	void *old;
864 
865 	old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
866 	if (old != radswap)
867 		return 0;
868 	free_swap_and_cache_nr(radix_to_swp_entry(radswap), 1 << order);
869 
870 	return 1 << order;
871 }
872 
873 /*
874  * Determine (in bytes) how many of the shmem object's pages mapped by the
875  * given offsets are swapped out.
876  *
877  * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
878  * as long as the inode doesn't go away and racy results are not a problem.
879  */
shmem_partial_swap_usage(struct address_space * mapping,pgoff_t start,pgoff_t end)880 unsigned long shmem_partial_swap_usage(struct address_space *mapping,
881 						pgoff_t start, pgoff_t end)
882 {
883 	XA_STATE(xas, &mapping->i_pages, start);
884 	struct page *page;
885 	unsigned long swapped = 0;
886 	unsigned long max = end - 1;
887 
888 	rcu_read_lock();
889 	xas_for_each(&xas, page, max) {
890 		if (xas_retry(&xas, page))
891 			continue;
892 		if (xa_is_value(page))
893 			swapped += 1 << xas_get_order(&xas);
894 		if (xas.xa_index == max)
895 			break;
896 		if (need_resched()) {
897 			xas_pause(&xas);
898 			cond_resched_rcu();
899 		}
900 	}
901 	rcu_read_unlock();
902 
903 	return swapped << PAGE_SHIFT;
904 }
905 
906 /*
907  * Determine (in bytes) how many of the shmem object's pages mapped by the
908  * given vma is swapped out.
909  *
910  * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
911  * as long as the inode doesn't go away and racy results are not a problem.
912  */
shmem_swap_usage(struct vm_area_struct * vma)913 unsigned long shmem_swap_usage(struct vm_area_struct *vma)
914 {
915 	struct inode *inode = file_inode(vma->vm_file);
916 	struct shmem_inode_info *info = SHMEM_I(inode);
917 	struct address_space *mapping = inode->i_mapping;
918 	unsigned long swapped;
919 
920 	/* Be careful as we don't hold info->lock */
921 	swapped = READ_ONCE(info->swapped);
922 
923 	/*
924 	 * The easier cases are when the shmem object has nothing in swap, or
925 	 * the vma maps it whole. Then we can simply use the stats that we
926 	 * already track.
927 	 */
928 	if (!swapped)
929 		return 0;
930 
931 	if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
932 		return swapped << PAGE_SHIFT;
933 
934 	/* Here comes the more involved part */
935 	return shmem_partial_swap_usage(mapping, vma->vm_pgoff,
936 					vma->vm_pgoff + vma_pages(vma));
937 }
938 
939 /*
940  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
941  */
shmem_unlock_mapping(struct address_space * mapping)942 void shmem_unlock_mapping(struct address_space *mapping)
943 {
944 	struct folio_batch fbatch;
945 	pgoff_t index = 0;
946 
947 	folio_batch_init(&fbatch);
948 	/*
949 	 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
950 	 */
951 	while (!mapping_unevictable(mapping) &&
952 	       filemap_get_folios(mapping, &index, ~0UL, &fbatch)) {
953 		check_move_unevictable_folios(&fbatch);
954 		folio_batch_release(&fbatch);
955 		cond_resched();
956 	}
957 }
958 
shmem_get_partial_folio(struct inode * inode,pgoff_t index)959 static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
960 {
961 	struct folio *folio;
962 
963 	/*
964 	 * At first avoid shmem_get_folio(,,,SGP_READ): that fails
965 	 * beyond i_size, and reports fallocated folios as holes.
966 	 */
967 	folio = filemap_get_entry(inode->i_mapping, index);
968 	if (!folio)
969 		return folio;
970 	if (!xa_is_value(folio)) {
971 		folio_lock(folio);
972 		if (folio->mapping == inode->i_mapping)
973 			return folio;
974 		/* The folio has been swapped out */
975 		folio_unlock(folio);
976 		folio_put(folio);
977 	}
978 	/*
979 	 * But read a folio back from swap if any of it is within i_size
980 	 * (although in some cases this is just a waste of time).
981 	 */
982 	folio = NULL;
983 	shmem_get_folio(inode, index, 0, &folio, SGP_READ);
984 	return folio;
985 }
986 
987 /*
988  * Remove range of pages and swap entries from page cache, and free them.
989  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
990  */
shmem_undo_range(struct inode * inode,loff_t lstart,loff_t lend,bool unfalloc)991 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
992 								 bool unfalloc)
993 {
994 	struct address_space *mapping = inode->i_mapping;
995 	struct shmem_inode_info *info = SHMEM_I(inode);
996 	pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
997 	pgoff_t end = (lend + 1) >> PAGE_SHIFT;
998 	struct folio_batch fbatch;
999 	pgoff_t indices[PAGEVEC_SIZE];
1000 	struct folio *folio;
1001 	bool same_folio;
1002 	long nr_swaps_freed = 0;
1003 	pgoff_t index;
1004 	int i;
1005 
1006 	if (lend == -1)
1007 		end = -1;	/* unsigned, so actually very big */
1008 
1009 	if (info->fallocend > start && info->fallocend <= end && !unfalloc)
1010 		info->fallocend = start;
1011 
1012 	folio_batch_init(&fbatch);
1013 	index = start;
1014 	while (index < end && find_lock_entries(mapping, &index, end - 1,
1015 			&fbatch, indices)) {
1016 		for (i = 0; i < folio_batch_count(&fbatch); i++) {
1017 			folio = fbatch.folios[i];
1018 
1019 			if (xa_is_value(folio)) {
1020 				if (unfalloc)
1021 					continue;
1022 				nr_swaps_freed += shmem_free_swap(mapping,
1023 							indices[i], folio);
1024 				continue;
1025 			}
1026 
1027 			if (!unfalloc || !folio_test_uptodate(folio))
1028 				truncate_inode_folio(mapping, folio);
1029 			folio_unlock(folio);
1030 		}
1031 		folio_batch_remove_exceptionals(&fbatch);
1032 		folio_batch_release(&fbatch);
1033 		cond_resched();
1034 	}
1035 
1036 	/*
1037 	 * When undoing a failed fallocate, we want none of the partial folio
1038 	 * zeroing and splitting below, but shall want to truncate the whole
1039 	 * folio when !uptodate indicates that it was added by this fallocate,
1040 	 * even when [lstart, lend] covers only a part of the folio.
1041 	 */
1042 	if (unfalloc)
1043 		goto whole_folios;
1044 
1045 	same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
1046 	folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
1047 	if (folio) {
1048 		same_folio = lend < folio_pos(folio) + folio_size(folio);
1049 		folio_mark_dirty(folio);
1050 		if (!truncate_inode_partial_folio(folio, lstart, lend)) {
1051 			start = folio_next_index(folio);
1052 			if (same_folio)
1053 				end = folio->index;
1054 		}
1055 		folio_unlock(folio);
1056 		folio_put(folio);
1057 		folio = NULL;
1058 	}
1059 
1060 	if (!same_folio)
1061 		folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT);
1062 	if (folio) {
1063 		folio_mark_dirty(folio);
1064 		if (!truncate_inode_partial_folio(folio, lstart, lend))
1065 			end = folio->index;
1066 		folio_unlock(folio);
1067 		folio_put(folio);
1068 	}
1069 
1070 whole_folios:
1071 
1072 	index = start;
1073 	while (index < end) {
1074 		cond_resched();
1075 
1076 		if (!find_get_entries(mapping, &index, end - 1, &fbatch,
1077 				indices)) {
1078 			/* If all gone or hole-punch or unfalloc, we're done */
1079 			if (index == start || end != -1)
1080 				break;
1081 			/* But if truncating, restart to make sure all gone */
1082 			index = start;
1083 			continue;
1084 		}
1085 		for (i = 0; i < folio_batch_count(&fbatch); i++) {
1086 			folio = fbatch.folios[i];
1087 
1088 			if (xa_is_value(folio)) {
1089 				long swaps_freed;
1090 
1091 				if (unfalloc)
1092 					continue;
1093 				swaps_freed = shmem_free_swap(mapping, indices[i], folio);
1094 				if (!swaps_freed) {
1095 					/* Swap was replaced by page: retry */
1096 					index = indices[i];
1097 					break;
1098 				}
1099 				nr_swaps_freed += swaps_freed;
1100 				continue;
1101 			}
1102 
1103 			folio_lock(folio);
1104 
1105 			if (!unfalloc || !folio_test_uptodate(folio)) {
1106 				if (folio_mapping(folio) != mapping) {
1107 					/* Page was replaced by swap: retry */
1108 					folio_unlock(folio);
1109 					index = indices[i];
1110 					break;
1111 				}
1112 				VM_BUG_ON_FOLIO(folio_test_writeback(folio),
1113 						folio);
1114 
1115 				if (!folio_test_large(folio)) {
1116 					truncate_inode_folio(mapping, folio);
1117 				} else if (truncate_inode_partial_folio(folio, lstart, lend)) {
1118 					/*
1119 					 * If we split a page, reset the loop so
1120 					 * that we pick up the new sub pages.
1121 					 * Otherwise the THP was entirely
1122 					 * dropped or the target range was
1123 					 * zeroed, so just continue the loop as
1124 					 * is.
1125 					 */
1126 					if (!folio_test_large(folio)) {
1127 						folio_unlock(folio);
1128 						index = start;
1129 						break;
1130 					}
1131 				}
1132 			}
1133 			folio_unlock(folio);
1134 		}
1135 		folio_batch_remove_exceptionals(&fbatch);
1136 		folio_batch_release(&fbatch);
1137 	}
1138 
1139 	shmem_recalc_inode(inode, 0, -nr_swaps_freed);
1140 }
1141 
shmem_truncate_range(struct inode * inode,loff_t lstart,loff_t lend)1142 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1143 {
1144 	shmem_undo_range(inode, lstart, lend, false);
1145 	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1146 	inode_inc_iversion(inode);
1147 }
1148 EXPORT_SYMBOL_GPL(shmem_truncate_range);
1149 
shmem_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)1150 static int shmem_getattr(struct mnt_idmap *idmap,
1151 			 const struct path *path, struct kstat *stat,
1152 			 u32 request_mask, unsigned int query_flags)
1153 {
1154 	struct inode *inode = path->dentry->d_inode;
1155 	struct shmem_inode_info *info = SHMEM_I(inode);
1156 
1157 	if (info->alloced - info->swapped != inode->i_mapping->nrpages)
1158 		shmem_recalc_inode(inode, 0, 0);
1159 
1160 	if (info->fsflags & FS_APPEND_FL)
1161 		stat->attributes |= STATX_ATTR_APPEND;
1162 	if (info->fsflags & FS_IMMUTABLE_FL)
1163 		stat->attributes |= STATX_ATTR_IMMUTABLE;
1164 	if (info->fsflags & FS_NODUMP_FL)
1165 		stat->attributes |= STATX_ATTR_NODUMP;
1166 	stat->attributes_mask |= (STATX_ATTR_APPEND |
1167 			STATX_ATTR_IMMUTABLE |
1168 			STATX_ATTR_NODUMP);
1169 	generic_fillattr(idmap, request_mask, inode, stat);
1170 
1171 	if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0))
1172 		stat->blksize = HPAGE_PMD_SIZE;
1173 
1174 	if (request_mask & STATX_BTIME) {
1175 		stat->result_mask |= STATX_BTIME;
1176 		stat->btime.tv_sec = info->i_crtime.tv_sec;
1177 		stat->btime.tv_nsec = info->i_crtime.tv_nsec;
1178 	}
1179 
1180 	return 0;
1181 }
1182 
shmem_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)1183 static int shmem_setattr(struct mnt_idmap *idmap,
1184 			 struct dentry *dentry, struct iattr *attr)
1185 {
1186 	struct inode *inode = d_inode(dentry);
1187 	struct shmem_inode_info *info = SHMEM_I(inode);
1188 	int error;
1189 	bool update_mtime = false;
1190 	bool update_ctime = true;
1191 
1192 	error = setattr_prepare(idmap, dentry, attr);
1193 	if (error)
1194 		return error;
1195 
1196 	if ((info->seals & F_SEAL_EXEC) && (attr->ia_valid & ATTR_MODE)) {
1197 		if ((inode->i_mode ^ attr->ia_mode) & 0111) {
1198 			return -EPERM;
1199 		}
1200 	}
1201 
1202 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1203 		loff_t oldsize = inode->i_size;
1204 		loff_t newsize = attr->ia_size;
1205 
1206 		/* protected by i_rwsem */
1207 		if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1208 		    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1209 			return -EPERM;
1210 
1211 		if (newsize != oldsize) {
1212 			error = shmem_reacct_size(SHMEM_I(inode)->flags,
1213 					oldsize, newsize);
1214 			if (error)
1215 				return error;
1216 			i_size_write(inode, newsize);
1217 			update_mtime = true;
1218 		} else {
1219 			update_ctime = false;
1220 		}
1221 		if (newsize <= oldsize) {
1222 			loff_t holebegin = round_up(newsize, PAGE_SIZE);
1223 			if (oldsize > holebegin)
1224 				unmap_mapping_range(inode->i_mapping,
1225 							holebegin, 0, 1);
1226 			if (info->alloced)
1227 				shmem_truncate_range(inode,
1228 							newsize, (loff_t)-1);
1229 			/* unmap again to remove racily COWed private pages */
1230 			if (oldsize > holebegin)
1231 				unmap_mapping_range(inode->i_mapping,
1232 							holebegin, 0, 1);
1233 		}
1234 	}
1235 
1236 	if (is_quota_modification(idmap, inode, attr)) {
1237 		error = dquot_initialize(inode);
1238 		if (error)
1239 			return error;
1240 	}
1241 
1242 	/* Transfer quota accounting */
1243 	if (i_uid_needs_update(idmap, attr, inode) ||
1244 	    i_gid_needs_update(idmap, attr, inode)) {
1245 		error = dquot_transfer(idmap, inode, attr);
1246 		if (error)
1247 			return error;
1248 	}
1249 
1250 	setattr_copy(idmap, inode, attr);
1251 	if (attr->ia_valid & ATTR_MODE)
1252 		error = posix_acl_chmod(idmap, dentry, inode->i_mode);
1253 	if (!error && update_ctime) {
1254 		inode_set_ctime_current(inode);
1255 		if (update_mtime)
1256 			inode_set_mtime_to_ts(inode, inode_get_ctime(inode));
1257 		inode_inc_iversion(inode);
1258 	}
1259 	return error;
1260 }
1261 
shmem_evict_inode(struct inode * inode)1262 static void shmem_evict_inode(struct inode *inode)
1263 {
1264 	struct shmem_inode_info *info = SHMEM_I(inode);
1265 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1266 	size_t freed = 0;
1267 
1268 	if (shmem_mapping(inode->i_mapping)) {
1269 		shmem_unacct_size(info->flags, inode->i_size);
1270 		inode->i_size = 0;
1271 		mapping_set_exiting(inode->i_mapping);
1272 		shmem_truncate_range(inode, 0, (loff_t)-1);
1273 		if (!list_empty(&info->shrinklist)) {
1274 			spin_lock(&sbinfo->shrinklist_lock);
1275 			if (!list_empty(&info->shrinklist)) {
1276 				list_del_init(&info->shrinklist);
1277 				sbinfo->shrinklist_len--;
1278 			}
1279 			spin_unlock(&sbinfo->shrinklist_lock);
1280 		}
1281 		while (!list_empty(&info->swaplist)) {
1282 			/* Wait while shmem_unuse() is scanning this inode... */
1283 			wait_var_event(&info->stop_eviction,
1284 				       !atomic_read(&info->stop_eviction));
1285 			mutex_lock(&shmem_swaplist_mutex);
1286 			/* ...but beware of the race if we peeked too early */
1287 			if (!atomic_read(&info->stop_eviction))
1288 				list_del_init(&info->swaplist);
1289 			mutex_unlock(&shmem_swaplist_mutex);
1290 		}
1291 	}
1292 
1293 	simple_xattrs_free(&info->xattrs, sbinfo->max_inodes ? &freed : NULL);
1294 	shmem_free_inode(inode->i_sb, freed);
1295 	WARN_ON(inode->i_blocks);
1296 	clear_inode(inode);
1297 #ifdef CONFIG_TMPFS_QUOTA
1298 	dquot_free_inode(inode);
1299 	dquot_drop(inode);
1300 #endif
1301 }
1302 
shmem_find_swap_entries(struct address_space * mapping,pgoff_t start,struct folio_batch * fbatch,pgoff_t * indices,unsigned int type)1303 static int shmem_find_swap_entries(struct address_space *mapping,
1304 				   pgoff_t start, struct folio_batch *fbatch,
1305 				   pgoff_t *indices, unsigned int type)
1306 {
1307 	XA_STATE(xas, &mapping->i_pages, start);
1308 	struct folio *folio;
1309 	swp_entry_t entry;
1310 
1311 	rcu_read_lock();
1312 	xas_for_each(&xas, folio, ULONG_MAX) {
1313 		if (xas_retry(&xas, folio))
1314 			continue;
1315 
1316 		if (!xa_is_value(folio))
1317 			continue;
1318 
1319 		entry = radix_to_swp_entry(folio);
1320 		/*
1321 		 * swapin error entries can be found in the mapping. But they're
1322 		 * deliberately ignored here as we've done everything we can do.
1323 		 */
1324 		if (swp_type(entry) != type)
1325 			continue;
1326 
1327 		indices[folio_batch_count(fbatch)] = xas.xa_index;
1328 		if (!folio_batch_add(fbatch, folio))
1329 			break;
1330 
1331 		if (need_resched()) {
1332 			xas_pause(&xas);
1333 			cond_resched_rcu();
1334 		}
1335 	}
1336 	rcu_read_unlock();
1337 
1338 	return xas.xa_index;
1339 }
1340 
1341 /*
1342  * Move the swapped pages for an inode to page cache. Returns the count
1343  * of pages swapped in, or the error in case of failure.
1344  */
shmem_unuse_swap_entries(struct inode * inode,struct folio_batch * fbatch,pgoff_t * indices)1345 static int shmem_unuse_swap_entries(struct inode *inode,
1346 		struct folio_batch *fbatch, pgoff_t *indices)
1347 {
1348 	int i = 0;
1349 	int ret = 0;
1350 	int error = 0;
1351 	struct address_space *mapping = inode->i_mapping;
1352 
1353 	for (i = 0; i < folio_batch_count(fbatch); i++) {
1354 		struct folio *folio = fbatch->folios[i];
1355 
1356 		if (!xa_is_value(folio))
1357 			continue;
1358 		error = shmem_swapin_folio(inode, indices[i], &folio, SGP_CACHE,
1359 					mapping_gfp_mask(mapping), NULL, NULL);
1360 		if (error == 0) {
1361 			folio_unlock(folio);
1362 			folio_put(folio);
1363 			ret++;
1364 		}
1365 		if (error == -ENOMEM)
1366 			break;
1367 		error = 0;
1368 	}
1369 	return error ? error : ret;
1370 }
1371 
1372 /*
1373  * If swap found in inode, free it and move page from swapcache to filecache.
1374  */
shmem_unuse_inode(struct inode * inode,unsigned int type)1375 static int shmem_unuse_inode(struct inode *inode, unsigned int type)
1376 {
1377 	struct address_space *mapping = inode->i_mapping;
1378 	pgoff_t start = 0;
1379 	struct folio_batch fbatch;
1380 	pgoff_t indices[PAGEVEC_SIZE];
1381 	int ret = 0;
1382 
1383 	do {
1384 		folio_batch_init(&fbatch);
1385 		shmem_find_swap_entries(mapping, start, &fbatch, indices, type);
1386 		if (folio_batch_count(&fbatch) == 0) {
1387 			ret = 0;
1388 			break;
1389 		}
1390 
1391 		ret = shmem_unuse_swap_entries(inode, &fbatch, indices);
1392 		if (ret < 0)
1393 			break;
1394 
1395 		start = indices[folio_batch_count(&fbatch) - 1];
1396 	} while (true);
1397 
1398 	return ret;
1399 }
1400 
1401 /*
1402  * Read all the shared memory data that resides in the swap
1403  * device 'type' back into memory, so the swap device can be
1404  * unused.
1405  */
shmem_unuse(unsigned int type)1406 int shmem_unuse(unsigned int type)
1407 {
1408 	struct shmem_inode_info *info, *next;
1409 	int error = 0;
1410 
1411 	if (list_empty(&shmem_swaplist))
1412 		return 0;
1413 
1414 	mutex_lock(&shmem_swaplist_mutex);
1415 	list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1416 		if (!info->swapped) {
1417 			list_del_init(&info->swaplist);
1418 			continue;
1419 		}
1420 		/*
1421 		 * Drop the swaplist mutex while searching the inode for swap;
1422 		 * but before doing so, make sure shmem_evict_inode() will not
1423 		 * remove placeholder inode from swaplist, nor let it be freed
1424 		 * (igrab() would protect from unlink, but not from unmount).
1425 		 */
1426 		atomic_inc(&info->stop_eviction);
1427 		mutex_unlock(&shmem_swaplist_mutex);
1428 
1429 		error = shmem_unuse_inode(&info->vfs_inode, type);
1430 		cond_resched();
1431 
1432 		mutex_lock(&shmem_swaplist_mutex);
1433 		next = list_next_entry(info, swaplist);
1434 		if (!info->swapped)
1435 			list_del_init(&info->swaplist);
1436 		if (atomic_dec_and_test(&info->stop_eviction))
1437 			wake_up_var(&info->stop_eviction);
1438 		if (error)
1439 			break;
1440 	}
1441 	mutex_unlock(&shmem_swaplist_mutex);
1442 
1443 	return error;
1444 }
1445 
1446 /*
1447  * Move the page from the page cache to the swap cache.
1448  */
shmem_writepage(struct page * page,struct writeback_control * wbc)1449 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1450 {
1451 	struct folio *folio = page_folio(page);
1452 	struct address_space *mapping = folio->mapping;
1453 	struct inode *inode = mapping->host;
1454 	struct shmem_inode_info *info = SHMEM_I(inode);
1455 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1456 	swp_entry_t swap;
1457 	pgoff_t index;
1458 	int nr_pages;
1459 	bool split = false;
1460 
1461 	/*
1462 	 * Our capabilities prevent regular writeback or sync from ever calling
1463 	 * shmem_writepage; but a stacking filesystem might use ->writepage of
1464 	 * its underlying filesystem, in which case tmpfs should write out to
1465 	 * swap only in response to memory pressure, and not for the writeback
1466 	 * threads or sync.
1467 	 */
1468 	if (WARN_ON_ONCE(!wbc->for_reclaim))
1469 		goto redirty;
1470 
1471 	if (WARN_ON_ONCE((info->flags & VM_LOCKED) || sbinfo->noswap))
1472 		goto redirty;
1473 
1474 	if (!total_swap_pages)
1475 		goto redirty;
1476 
1477 	/*
1478 	 * If CONFIG_THP_SWAP is not enabled, the large folio should be
1479 	 * split when swapping.
1480 	 *
1481 	 * And shrinkage of pages beyond i_size does not split swap, so
1482 	 * swapout of a large folio crossing i_size needs to split too
1483 	 * (unless fallocate has been used to preallocate beyond EOF).
1484 	 */
1485 	if (folio_test_large(folio)) {
1486 		index = shmem_fallocend(inode,
1487 			DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE));
1488 		if ((index > folio->index && index < folio_next_index(folio)) ||
1489 		    !IS_ENABLED(CONFIG_THP_SWAP))
1490 			split = true;
1491 	}
1492 
1493 	if (split) {
1494 try_split:
1495 		/* Ensure the subpages are still dirty */
1496 		folio_test_set_dirty(folio);
1497 		if (split_huge_page_to_list_to_order(page, wbc->list, 0))
1498 			goto redirty;
1499 		folio = page_folio(page);
1500 		folio_clear_dirty(folio);
1501 	}
1502 
1503 	index = folio->index;
1504 	nr_pages = folio_nr_pages(folio);
1505 
1506 	/*
1507 	 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1508 	 * value into swapfile.c, the only way we can correctly account for a
1509 	 * fallocated folio arriving here is now to initialize it and write it.
1510 	 *
1511 	 * That's okay for a folio already fallocated earlier, but if we have
1512 	 * not yet completed the fallocation, then (a) we want to keep track
1513 	 * of this folio in case we have to undo it, and (b) it may not be a
1514 	 * good idea to continue anyway, once we're pushing into swap.  So
1515 	 * reactivate the folio, and let shmem_fallocate() quit when too many.
1516 	 */
1517 	if (!folio_test_uptodate(folio)) {
1518 		if (inode->i_private) {
1519 			struct shmem_falloc *shmem_falloc;
1520 			spin_lock(&inode->i_lock);
1521 			shmem_falloc = inode->i_private;
1522 			if (shmem_falloc &&
1523 			    !shmem_falloc->waitq &&
1524 			    index >= shmem_falloc->start &&
1525 			    index < shmem_falloc->next)
1526 				shmem_falloc->nr_unswapped++;
1527 			else
1528 				shmem_falloc = NULL;
1529 			spin_unlock(&inode->i_lock);
1530 			if (shmem_falloc)
1531 				goto redirty;
1532 		}
1533 		folio_zero_range(folio, 0, folio_size(folio));
1534 		flush_dcache_folio(folio);
1535 		folio_mark_uptodate(folio);
1536 	}
1537 
1538 	swap = folio_alloc_swap(folio);
1539 	if (!swap.val) {
1540 		if (nr_pages > 1)
1541 			goto try_split;
1542 
1543 		goto redirty;
1544 	}
1545 
1546 	/*
1547 	 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1548 	 * if it's not already there.  Do it now before the folio is
1549 	 * moved to swap cache, when its pagelock no longer protects
1550 	 * the inode from eviction.  But don't unlock the mutex until
1551 	 * we've incremented swapped, because shmem_unuse_inode() will
1552 	 * prune a !swapped inode from the swaplist under this mutex.
1553 	 */
1554 	mutex_lock(&shmem_swaplist_mutex);
1555 	if (list_empty(&info->swaplist))
1556 		list_add(&info->swaplist, &shmem_swaplist);
1557 
1558 	if (add_to_swap_cache(folio, swap,
1559 			__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
1560 			NULL) == 0) {
1561 		shmem_recalc_inode(inode, 0, nr_pages);
1562 		swap_shmem_alloc(swap, nr_pages);
1563 		shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap));
1564 
1565 		mutex_unlock(&shmem_swaplist_mutex);
1566 		BUG_ON(folio_mapped(folio));
1567 		return swap_writepage(&folio->page, wbc);
1568 	}
1569 
1570 	mutex_unlock(&shmem_swaplist_mutex);
1571 	put_swap_folio(folio, swap);
1572 redirty:
1573 	folio_mark_dirty(folio);
1574 	if (wbc->for_reclaim)
1575 		return AOP_WRITEPAGE_ACTIVATE;	/* Return with folio locked */
1576 	folio_unlock(folio);
1577 	return 0;
1578 }
1579 
1580 #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
shmem_show_mpol(struct seq_file * seq,struct mempolicy * mpol)1581 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1582 {
1583 	char buffer[64];
1584 
1585 	if (!mpol || mpol->mode == MPOL_DEFAULT)
1586 		return;		/* show nothing */
1587 
1588 	mpol_to_str(buffer, sizeof(buffer), mpol);
1589 
1590 	seq_printf(seq, ",mpol=%s", buffer);
1591 }
1592 
shmem_get_sbmpol(struct shmem_sb_info * sbinfo)1593 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1594 {
1595 	struct mempolicy *mpol = NULL;
1596 	if (sbinfo->mpol) {
1597 		raw_spin_lock(&sbinfo->stat_lock);	/* prevent replace/use races */
1598 		mpol = sbinfo->mpol;
1599 		mpol_get(mpol);
1600 		raw_spin_unlock(&sbinfo->stat_lock);
1601 	}
1602 	return mpol;
1603 }
1604 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
shmem_show_mpol(struct seq_file * seq,struct mempolicy * mpol)1605 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1606 {
1607 }
shmem_get_sbmpol(struct shmem_sb_info * sbinfo)1608 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1609 {
1610 	return NULL;
1611 }
1612 #endif /* CONFIG_NUMA && CONFIG_TMPFS */
1613 
1614 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
1615 			pgoff_t index, unsigned int order, pgoff_t *ilx);
1616 
shmem_swapin_cluster(swp_entry_t swap,gfp_t gfp,struct shmem_inode_info * info,pgoff_t index)1617 static struct folio *shmem_swapin_cluster(swp_entry_t swap, gfp_t gfp,
1618 			struct shmem_inode_info *info, pgoff_t index)
1619 {
1620 	struct mempolicy *mpol;
1621 	pgoff_t ilx;
1622 	struct folio *folio;
1623 
1624 	mpol = shmem_get_pgoff_policy(info, index, 0, &ilx);
1625 	folio = swap_cluster_readahead(swap, gfp, mpol, ilx);
1626 	mpol_cond_put(mpol);
1627 
1628 	return folio;
1629 }
1630 
1631 /*
1632  * Make sure huge_gfp is always more limited than limit_gfp.
1633  * Some of the flags set permissions, while others set limitations.
1634  */
limit_gfp_mask(gfp_t huge_gfp,gfp_t limit_gfp)1635 static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
1636 {
1637 	gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
1638 	gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
1639 	gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
1640 	gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
1641 
1642 	/* Allow allocations only from the originally specified zones. */
1643 	result |= zoneflags;
1644 
1645 	/*
1646 	 * Minimize the result gfp by taking the union with the deny flags,
1647 	 * and the intersection of the allow flags.
1648 	 */
1649 	result |= (limit_gfp & denyflags);
1650 	result |= (huge_gfp & limit_gfp) & allowflags;
1651 
1652 	return result;
1653 }
1654 
1655 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
shmem_allowable_huge_orders(struct inode * inode,struct vm_area_struct * vma,pgoff_t index,loff_t write_end,bool shmem_huge_force)1656 unsigned long shmem_allowable_huge_orders(struct inode *inode,
1657 				struct vm_area_struct *vma, pgoff_t index,
1658 				loff_t write_end, bool shmem_huge_force)
1659 {
1660 	unsigned long mask = READ_ONCE(huge_shmem_orders_always);
1661 	unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
1662 	unsigned long vm_flags = vma ? vma->vm_flags : 0;
1663 	bool global_huge;
1664 	loff_t i_size;
1665 	int order;
1666 
1667 	if (thp_disabled_by_hw() || (vma && vma_thp_disabled(vma, vm_flags)))
1668 		return 0;
1669 
1670 	global_huge = shmem_huge_global_enabled(inode, index, write_end,
1671 					shmem_huge_force, vma, vm_flags);
1672 	if (!vma || !vma_is_anon_shmem(vma)) {
1673 		/*
1674 		 * For tmpfs, we now only support PMD sized THP if huge page
1675 		 * is enabled, otherwise fallback to order 0.
1676 		 */
1677 		return global_huge ? BIT(HPAGE_PMD_ORDER) : 0;
1678 	}
1679 
1680 	/*
1681 	 * Following the 'deny' semantics of the top level, force the huge
1682 	 * option off from all mounts.
1683 	 */
1684 	if (shmem_huge == SHMEM_HUGE_DENY)
1685 		return 0;
1686 
1687 	/*
1688 	 * Only allow inherit orders if the top-level value is 'force', which
1689 	 * means non-PMD sized THP can not override 'huge' mount option now.
1690 	 */
1691 	if (shmem_huge == SHMEM_HUGE_FORCE)
1692 		return READ_ONCE(huge_shmem_orders_inherit);
1693 
1694 	/* Allow mTHP that will be fully within i_size. */
1695 	order = highest_order(within_size_orders);
1696 	while (within_size_orders) {
1697 		index = round_up(index + 1, order);
1698 		i_size = round_up(i_size_read(inode), PAGE_SIZE);
1699 		if (i_size >> PAGE_SHIFT >= index) {
1700 			mask |= within_size_orders;
1701 			break;
1702 		}
1703 
1704 		order = next_order(&within_size_orders, order);
1705 	}
1706 
1707 	if (vm_flags & VM_HUGEPAGE)
1708 		mask |= READ_ONCE(huge_shmem_orders_madvise);
1709 
1710 	if (global_huge)
1711 		mask |= READ_ONCE(huge_shmem_orders_inherit);
1712 
1713 	return THP_ORDERS_ALL_FILE_DEFAULT & mask;
1714 }
1715 
shmem_suitable_orders(struct inode * inode,struct vm_fault * vmf,struct address_space * mapping,pgoff_t index,unsigned long orders)1716 static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
1717 					   struct address_space *mapping, pgoff_t index,
1718 					   unsigned long orders)
1719 {
1720 	struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
1721 	pgoff_t aligned_index;
1722 	unsigned long pages;
1723 	int order;
1724 
1725 	if (vma) {
1726 		orders = thp_vma_suitable_orders(vma, vmf->address, orders);
1727 		if (!orders)
1728 			return 0;
1729 	}
1730 
1731 	/* Find the highest order that can add into the page cache */
1732 	order = highest_order(orders);
1733 	while (orders) {
1734 		pages = 1UL << order;
1735 		aligned_index = round_down(index, pages);
1736 		/*
1737 		 * Check for conflict before waiting on a huge allocation.
1738 		 * Conflict might be that a huge page has just been allocated
1739 		 * and added to page cache by a racing thread, or that there
1740 		 * is already at least one small page in the huge extent.
1741 		 * Be careful to retry when appropriate, but not forever!
1742 		 * Elsewhere -EEXIST would be the right code, but not here.
1743 		 */
1744 		if (!xa_find(&mapping->i_pages, &aligned_index,
1745 			     aligned_index + pages - 1, XA_PRESENT))
1746 			break;
1747 		order = next_order(&orders, order);
1748 	}
1749 
1750 	return orders;
1751 }
1752 #else
shmem_suitable_orders(struct inode * inode,struct vm_fault * vmf,struct address_space * mapping,pgoff_t index,unsigned long orders)1753 static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
1754 					   struct address_space *mapping, pgoff_t index,
1755 					   unsigned long orders)
1756 {
1757 	return 0;
1758 }
1759 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1760 
shmem_alloc_folio(gfp_t gfp,int order,struct shmem_inode_info * info,pgoff_t index)1761 static struct folio *shmem_alloc_folio(gfp_t gfp, int order,
1762 		struct shmem_inode_info *info, pgoff_t index)
1763 {
1764 	struct mempolicy *mpol;
1765 	pgoff_t ilx;
1766 	struct folio *folio;
1767 
1768 	mpol = shmem_get_pgoff_policy(info, index, order, &ilx);
1769 	folio = folio_alloc_mpol(gfp, order, mpol, ilx, numa_node_id());
1770 	mpol_cond_put(mpol);
1771 
1772 	return folio;
1773 }
1774 
shmem_alloc_and_add_folio(struct vm_fault * vmf,gfp_t gfp,struct inode * inode,pgoff_t index,struct mm_struct * fault_mm,unsigned long orders)1775 static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
1776 		gfp_t gfp, struct inode *inode, pgoff_t index,
1777 		struct mm_struct *fault_mm, unsigned long orders)
1778 {
1779 	struct address_space *mapping = inode->i_mapping;
1780 	struct shmem_inode_info *info = SHMEM_I(inode);
1781 	unsigned long suitable_orders = 0;
1782 	struct folio *folio = NULL;
1783 	long pages;
1784 	int error, order;
1785 
1786 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1787 		orders = 0;
1788 
1789 	if (orders > 0) {
1790 		suitable_orders = shmem_suitable_orders(inode, vmf,
1791 							mapping, index, orders);
1792 
1793 		order = highest_order(suitable_orders);
1794 		while (suitable_orders) {
1795 			pages = 1UL << order;
1796 			index = round_down(index, pages);
1797 			folio = shmem_alloc_folio(gfp, order, info, index);
1798 			if (folio)
1799 				goto allocated;
1800 
1801 			if (pages == HPAGE_PMD_NR)
1802 				count_vm_event(THP_FILE_FALLBACK);
1803 			count_mthp_stat(order, MTHP_STAT_SHMEM_FALLBACK);
1804 			order = next_order(&suitable_orders, order);
1805 		}
1806 	} else {
1807 		pages = 1;
1808 		folio = shmem_alloc_folio(gfp, 0, info, index);
1809 	}
1810 	if (!folio)
1811 		return ERR_PTR(-ENOMEM);
1812 
1813 allocated:
1814 	__folio_set_locked(folio);
1815 	__folio_set_swapbacked(folio);
1816 
1817 	gfp &= GFP_RECLAIM_MASK;
1818 	error = mem_cgroup_charge(folio, fault_mm, gfp);
1819 	if (error) {
1820 		if (xa_find(&mapping->i_pages, &index,
1821 				index + pages - 1, XA_PRESENT)) {
1822 			error = -EEXIST;
1823 		} else if (pages > 1) {
1824 			if (pages == HPAGE_PMD_NR) {
1825 				count_vm_event(THP_FILE_FALLBACK);
1826 				count_vm_event(THP_FILE_FALLBACK_CHARGE);
1827 			}
1828 			count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK);
1829 			count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE);
1830 		}
1831 		goto unlock;
1832 	}
1833 
1834 	error = shmem_add_to_page_cache(folio, mapping, index, NULL, gfp);
1835 	if (error)
1836 		goto unlock;
1837 
1838 	error = shmem_inode_acct_blocks(inode, pages);
1839 	if (error) {
1840 		struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1841 		long freed;
1842 		/*
1843 		 * Try to reclaim some space by splitting a few
1844 		 * large folios beyond i_size on the filesystem.
1845 		 */
1846 		shmem_unused_huge_shrink(sbinfo, NULL, pages);
1847 		/*
1848 		 * And do a shmem_recalc_inode() to account for freed pages:
1849 		 * except our folio is there in cache, so not quite balanced.
1850 		 */
1851 		spin_lock(&info->lock);
1852 		freed = pages + info->alloced - info->swapped -
1853 			READ_ONCE(mapping->nrpages);
1854 		if (freed > 0)
1855 			info->alloced -= freed;
1856 		spin_unlock(&info->lock);
1857 		if (freed > 0)
1858 			shmem_inode_unacct_blocks(inode, freed);
1859 		error = shmem_inode_acct_blocks(inode, pages);
1860 		if (error) {
1861 			filemap_remove_folio(folio);
1862 			goto unlock;
1863 		}
1864 	}
1865 
1866 	shmem_recalc_inode(inode, pages, 0);
1867 	folio_add_lru(folio);
1868 	return folio;
1869 
1870 unlock:
1871 	folio_unlock(folio);
1872 	folio_put(folio);
1873 	return ERR_PTR(error);
1874 }
1875 
1876 /*
1877  * When a page is moved from swapcache to shmem filecache (either by the
1878  * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of
1879  * shmem_unuse_inode()), it may have been read in earlier from swap, in
1880  * ignorance of the mapping it belongs to.  If that mapping has special
1881  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1882  * we may need to copy to a suitable page before moving to filecache.
1883  *
1884  * In a future release, this may well be extended to respect cpuset and
1885  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1886  * but for now it is a simple matter of zone.
1887  */
shmem_should_replace_folio(struct folio * folio,gfp_t gfp)1888 static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp)
1889 {
1890 	return folio_zonenum(folio) > gfp_zone(gfp);
1891 }
1892 
shmem_replace_folio(struct folio ** foliop,gfp_t gfp,struct shmem_inode_info * info,pgoff_t index,struct vm_area_struct * vma)1893 static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
1894 				struct shmem_inode_info *info, pgoff_t index,
1895 				struct vm_area_struct *vma)
1896 {
1897 	struct folio *new, *old = *foliop;
1898 	swp_entry_t entry = old->swap;
1899 	struct address_space *swap_mapping = swap_address_space(entry);
1900 	pgoff_t swap_index = swap_cache_index(entry);
1901 	XA_STATE(xas, &swap_mapping->i_pages, swap_index);
1902 	int nr_pages = folio_nr_pages(old);
1903 	int error = 0, i;
1904 
1905 	/*
1906 	 * We have arrived here because our zones are constrained, so don't
1907 	 * limit chance of success by further cpuset and node constraints.
1908 	 */
1909 	gfp &= ~GFP_CONSTRAINT_MASK;
1910 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1911 	if (nr_pages > 1) {
1912 		gfp_t huge_gfp = vma_thp_gfp_mask(vma);
1913 
1914 		gfp = limit_gfp_mask(huge_gfp, gfp);
1915 	}
1916 #endif
1917 
1918 	new = shmem_alloc_folio(gfp, folio_order(old), info, index);
1919 	if (!new)
1920 		return -ENOMEM;
1921 
1922 	folio_ref_add(new, nr_pages);
1923 	folio_copy(new, old);
1924 	flush_dcache_folio(new);
1925 
1926 	__folio_set_locked(new);
1927 	__folio_set_swapbacked(new);
1928 	folio_mark_uptodate(new);
1929 	new->swap = entry;
1930 	folio_set_swapcache(new);
1931 
1932 	/* Swap cache still stores N entries instead of a high-order entry */
1933 	xa_lock_irq(&swap_mapping->i_pages);
1934 	for (i = 0; i < nr_pages; i++) {
1935 		void *item = xas_load(&xas);
1936 
1937 		if (item != old) {
1938 			error = -ENOENT;
1939 			break;
1940 		}
1941 
1942 		xas_store(&xas, new);
1943 		xas_next(&xas);
1944 	}
1945 	if (!error) {
1946 		mem_cgroup_replace_folio(old, new);
1947 		__lruvec_stat_mod_folio(new, NR_FILE_PAGES, nr_pages);
1948 		__lruvec_stat_mod_folio(new, NR_SHMEM, nr_pages);
1949 		__lruvec_stat_mod_folio(old, NR_FILE_PAGES, -nr_pages);
1950 		__lruvec_stat_mod_folio(old, NR_SHMEM, -nr_pages);
1951 	}
1952 	xa_unlock_irq(&swap_mapping->i_pages);
1953 
1954 	if (unlikely(error)) {
1955 		/*
1956 		 * Is this possible?  I think not, now that our callers
1957 		 * check both the swapcache flag and folio->private
1958 		 * after getting the folio lock; but be defensive.
1959 		 * Reverse old to newpage for clear and free.
1960 		 */
1961 		old = new;
1962 	} else {
1963 		folio_add_lru(new);
1964 		*foliop = new;
1965 	}
1966 
1967 	folio_clear_swapcache(old);
1968 	old->private = NULL;
1969 
1970 	folio_unlock(old);
1971 	/*
1972 	 * The old folio are removed from swap cache, drop the 'nr_pages'
1973 	 * reference, as well as one temporary reference getting from swap
1974 	 * cache.
1975 	 */
1976 	folio_put_refs(old, nr_pages + 1);
1977 	return error;
1978 }
1979 
shmem_set_folio_swapin_error(struct inode * inode,pgoff_t index,struct folio * folio,swp_entry_t swap)1980 static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
1981 					 struct folio *folio, swp_entry_t swap)
1982 {
1983 	struct address_space *mapping = inode->i_mapping;
1984 	swp_entry_t swapin_error;
1985 	void *old;
1986 	int nr_pages;
1987 
1988 	swapin_error = make_poisoned_swp_entry();
1989 	old = xa_cmpxchg_irq(&mapping->i_pages, index,
1990 			     swp_to_radix_entry(swap),
1991 			     swp_to_radix_entry(swapin_error), 0);
1992 	if (old != swp_to_radix_entry(swap))
1993 		return;
1994 
1995 	nr_pages = folio_nr_pages(folio);
1996 	folio_wait_writeback(folio);
1997 	delete_from_swap_cache(folio);
1998 	/*
1999 	 * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
2000 	 * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
2001 	 * in shmem_evict_inode().
2002 	 */
2003 	shmem_recalc_inode(inode, -nr_pages, -nr_pages);
2004 	swap_free_nr(swap, nr_pages);
2005 }
2006 
shmem_split_large_entry(struct inode * inode,pgoff_t index,swp_entry_t swap,gfp_t gfp)2007 static int shmem_split_large_entry(struct inode *inode, pgoff_t index,
2008 				   swp_entry_t swap, gfp_t gfp)
2009 {
2010 	struct address_space *mapping = inode->i_mapping;
2011 	XA_STATE_ORDER(xas, &mapping->i_pages, index, 0);
2012 	void *alloced_shadow = NULL;
2013 	int alloced_order = 0, i;
2014 
2015 	/* Convert user data gfp flags to xarray node gfp flags */
2016 	gfp &= GFP_RECLAIM_MASK;
2017 
2018 	for (;;) {
2019 		int order = -1, split_order = 0;
2020 		void *old = NULL;
2021 
2022 		xas_lock_irq(&xas);
2023 		old = xas_load(&xas);
2024 		if (!xa_is_value(old) || swp_to_radix_entry(swap) != old) {
2025 			xas_set_err(&xas, -EEXIST);
2026 			goto unlock;
2027 		}
2028 
2029 		order = xas_get_order(&xas);
2030 
2031 		/* Swap entry may have changed before we re-acquire the lock */
2032 		if (alloced_order &&
2033 		    (old != alloced_shadow || order != alloced_order)) {
2034 			xas_destroy(&xas);
2035 			alloced_order = 0;
2036 		}
2037 
2038 		/* Try to split large swap entry in pagecache */
2039 		if (order > 0) {
2040 			if (!alloced_order) {
2041 				split_order = order;
2042 				goto unlock;
2043 			}
2044 			xas_split(&xas, old, order);
2045 
2046 			/*
2047 			 * Re-set the swap entry after splitting, and the swap
2048 			 * offset of the original large entry must be continuous.
2049 			 */
2050 			for (i = 0; i < 1 << order; i++) {
2051 				pgoff_t aligned_index = round_down(index, 1 << order);
2052 				swp_entry_t tmp;
2053 
2054 				tmp = swp_entry(swp_type(swap), swp_offset(swap) + i);
2055 				__xa_store(&mapping->i_pages, aligned_index + i,
2056 					   swp_to_radix_entry(tmp), 0);
2057 			}
2058 		}
2059 
2060 unlock:
2061 		xas_unlock_irq(&xas);
2062 
2063 		/* split needed, alloc here and retry. */
2064 		if (split_order) {
2065 			xas_split_alloc(&xas, old, split_order, gfp);
2066 			if (xas_error(&xas))
2067 				goto error;
2068 			alloced_shadow = old;
2069 			alloced_order = split_order;
2070 			xas_reset(&xas);
2071 			continue;
2072 		}
2073 
2074 		if (!xas_nomem(&xas, gfp))
2075 			break;
2076 	}
2077 
2078 error:
2079 	if (xas_error(&xas))
2080 		return xas_error(&xas);
2081 
2082 	return alloced_order;
2083 }
2084 
2085 /*
2086  * Swap in the folio pointed to by *foliop.
2087  * Caller has to make sure that *foliop contains a valid swapped folio.
2088  * Returns 0 and the folio in foliop if success. On failure, returns the
2089  * error code and NULL in *foliop.
2090  */
shmem_swapin_folio(struct inode * inode,pgoff_t index,struct folio ** foliop,enum sgp_type sgp,gfp_t gfp,struct vm_area_struct * vma,vm_fault_t * fault_type)2091 static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
2092 			     struct folio **foliop, enum sgp_type sgp,
2093 			     gfp_t gfp, struct vm_area_struct *vma,
2094 			     vm_fault_t *fault_type)
2095 {
2096 	struct address_space *mapping = inode->i_mapping;
2097 	struct mm_struct *fault_mm = vma ? vma->vm_mm : NULL;
2098 	struct shmem_inode_info *info = SHMEM_I(inode);
2099 	struct swap_info_struct *si;
2100 	struct folio *folio = NULL;
2101 	swp_entry_t swap;
2102 	int error, nr_pages;
2103 
2104 	VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
2105 	swap = radix_to_swp_entry(*foliop);
2106 	*foliop = NULL;
2107 
2108 	if (is_poisoned_swp_entry(swap))
2109 		return -EIO;
2110 
2111 	si = get_swap_device(swap);
2112 	if (!si) {
2113 		if (!shmem_confirm_swap(mapping, index, swap))
2114 			return -EEXIST;
2115 		else
2116 			return -EINVAL;
2117 	}
2118 
2119 	/* Look it up and read it in.. */
2120 	folio = swap_cache_get_folio(swap, NULL, 0);
2121 	if (!folio) {
2122 		int split_order;
2123 
2124 		/* Or update major stats only when swapin succeeds?? */
2125 		if (fault_type) {
2126 			*fault_type |= VM_FAULT_MAJOR;
2127 			count_vm_event(PGMAJFAULT);
2128 			count_memcg_event_mm(fault_mm, PGMAJFAULT);
2129 		}
2130 
2131 		/*
2132 		 * Now swap device can only swap in order 0 folio, then we
2133 		 * should split the large swap entry stored in the pagecache
2134 		 * if necessary.
2135 		 */
2136 		split_order = shmem_split_large_entry(inode, index, swap, gfp);
2137 		if (split_order < 0) {
2138 			error = split_order;
2139 			goto failed;
2140 		}
2141 
2142 		/*
2143 		 * If the large swap entry has already been split, it is
2144 		 * necessary to recalculate the new swap entry based on
2145 		 * the old order alignment.
2146 		 */
2147 		if (split_order > 0) {
2148 			pgoff_t offset = index - round_down(index, 1 << split_order);
2149 
2150 			swap = swp_entry(swp_type(swap), swp_offset(swap) + offset);
2151 		}
2152 
2153 		/* Here we actually start the io */
2154 		folio = shmem_swapin_cluster(swap, gfp, info, index);
2155 		if (!folio) {
2156 			error = -ENOMEM;
2157 			goto failed;
2158 		}
2159 	}
2160 
2161 	/* We have to do this with folio locked to prevent races */
2162 	folio_lock(folio);
2163 	if (!folio_test_swapcache(folio) ||
2164 	    folio->swap.val != swap.val ||
2165 	    !shmem_confirm_swap(mapping, index, swap)) {
2166 		error = -EEXIST;
2167 		goto unlock;
2168 	}
2169 	if (!folio_test_uptodate(folio)) {
2170 		error = -EIO;
2171 		goto failed;
2172 	}
2173 	folio_wait_writeback(folio);
2174 	nr_pages = folio_nr_pages(folio);
2175 
2176 	/*
2177 	 * Some architectures may have to restore extra metadata to the
2178 	 * folio after reading from swap.
2179 	 */
2180 	arch_swap_restore(folio_swap(swap, folio), folio);
2181 
2182 	if (shmem_should_replace_folio(folio, gfp)) {
2183 		error = shmem_replace_folio(&folio, gfp, info, index, vma);
2184 		if (error)
2185 			goto failed;
2186 	}
2187 
2188 	error = shmem_add_to_page_cache(folio, mapping,
2189 					round_down(index, nr_pages),
2190 					swp_to_radix_entry(swap), gfp);
2191 	if (error)
2192 		goto failed;
2193 
2194 	shmem_recalc_inode(inode, 0, -nr_pages);
2195 
2196 	if (sgp == SGP_WRITE)
2197 		folio_mark_accessed(folio);
2198 
2199 	delete_from_swap_cache(folio);
2200 	folio_mark_dirty(folio);
2201 	swap_free_nr(swap, nr_pages);
2202 	put_swap_device(si);
2203 
2204 	*foliop = folio;
2205 	return 0;
2206 failed:
2207 	if (!shmem_confirm_swap(mapping, index, swap))
2208 		error = -EEXIST;
2209 	if (error == -EIO)
2210 		shmem_set_folio_swapin_error(inode, index, folio, swap);
2211 unlock:
2212 	if (folio) {
2213 		folio_unlock(folio);
2214 		folio_put(folio);
2215 	}
2216 	put_swap_device(si);
2217 
2218 	return error;
2219 }
2220 
2221 /*
2222  * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate
2223  *
2224  * If we allocate a new one we do not mark it dirty. That's up to the
2225  * vm. If we swap it in we mark it dirty since we also free the swap
2226  * entry since a page cannot live in both the swap and page cache.
2227  *
2228  * vmf and fault_type are only supplied by shmem_fault: otherwise they are NULL.
2229  */
shmem_get_folio_gfp(struct inode * inode,pgoff_t index,loff_t write_end,struct folio ** foliop,enum sgp_type sgp,gfp_t gfp,struct vm_fault * vmf,vm_fault_t * fault_type)2230 static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
2231 		loff_t write_end, struct folio **foliop, enum sgp_type sgp,
2232 		gfp_t gfp, struct vm_fault *vmf, vm_fault_t *fault_type)
2233 {
2234 	struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
2235 	struct mm_struct *fault_mm;
2236 	struct folio *folio;
2237 	int error;
2238 	bool alloced;
2239 	unsigned long orders = 0;
2240 
2241 	if (WARN_ON_ONCE(!shmem_mapping(inode->i_mapping)))
2242 		return -EINVAL;
2243 
2244 	if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
2245 		return -EFBIG;
2246 repeat:
2247 	if (sgp <= SGP_CACHE &&
2248 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode))
2249 		return -EINVAL;
2250 
2251 	alloced = false;
2252 	fault_mm = vma ? vma->vm_mm : NULL;
2253 
2254 	folio = filemap_get_entry(inode->i_mapping, index);
2255 	if (folio && vma && userfaultfd_minor(vma)) {
2256 		if (!xa_is_value(folio))
2257 			folio_put(folio);
2258 		*fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
2259 		return 0;
2260 	}
2261 
2262 	if (xa_is_value(folio)) {
2263 		error = shmem_swapin_folio(inode, index, &folio,
2264 					   sgp, gfp, vma, fault_type);
2265 		if (error == -EEXIST)
2266 			goto repeat;
2267 
2268 		*foliop = folio;
2269 		return error;
2270 	}
2271 
2272 	if (folio) {
2273 		folio_lock(folio);
2274 
2275 		/* Has the folio been truncated or swapped out? */
2276 		if (unlikely(folio->mapping != inode->i_mapping)) {
2277 			folio_unlock(folio);
2278 			folio_put(folio);
2279 			goto repeat;
2280 		}
2281 		if (sgp == SGP_WRITE)
2282 			folio_mark_accessed(folio);
2283 		if (folio_test_uptodate(folio))
2284 			goto out;
2285 		/* fallocated folio */
2286 		if (sgp != SGP_READ)
2287 			goto clear;
2288 		folio_unlock(folio);
2289 		folio_put(folio);
2290 	}
2291 
2292 	/*
2293 	 * SGP_READ: succeed on hole, with NULL folio, letting caller zero.
2294 	 * SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail.
2295 	 */
2296 	*foliop = NULL;
2297 	if (sgp == SGP_READ)
2298 		return 0;
2299 	if (sgp == SGP_NOALLOC)
2300 		return -ENOENT;
2301 
2302 	/*
2303 	 * Fast cache lookup and swap lookup did not find it: allocate.
2304 	 */
2305 
2306 	if (vma && userfaultfd_missing(vma)) {
2307 		*fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
2308 		return 0;
2309 	}
2310 
2311 	/* Find hugepage orders that are allowed for anonymous shmem and tmpfs. */
2312 	orders = shmem_allowable_huge_orders(inode, vma, index, write_end, false);
2313 	if (orders > 0) {
2314 		gfp_t huge_gfp;
2315 
2316 		huge_gfp = vma_thp_gfp_mask(vma);
2317 		huge_gfp = limit_gfp_mask(huge_gfp, gfp);
2318 		folio = shmem_alloc_and_add_folio(vmf, huge_gfp,
2319 				inode, index, fault_mm, orders);
2320 		if (!IS_ERR(folio)) {
2321 			if (folio_test_pmd_mappable(folio))
2322 				count_vm_event(THP_FILE_ALLOC);
2323 			count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC);
2324 			goto alloced;
2325 		}
2326 		if (PTR_ERR(folio) == -EEXIST)
2327 			goto repeat;
2328 	}
2329 
2330 	folio = shmem_alloc_and_add_folio(vmf, gfp, inode, index, fault_mm, 0);
2331 	if (IS_ERR(folio)) {
2332 		error = PTR_ERR(folio);
2333 		if (error == -EEXIST)
2334 			goto repeat;
2335 		folio = NULL;
2336 		goto unlock;
2337 	}
2338 
2339 alloced:
2340 	alloced = true;
2341 	if (folio_test_large(folio) &&
2342 	    DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
2343 					folio_next_index(folio)) {
2344 		struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2345 		struct shmem_inode_info *info = SHMEM_I(inode);
2346 		/*
2347 		 * Part of the large folio is beyond i_size: subject
2348 		 * to shrink under memory pressure.
2349 		 */
2350 		spin_lock(&sbinfo->shrinklist_lock);
2351 		/*
2352 		 * _careful to defend against unlocked access to
2353 		 * ->shrink_list in shmem_unused_huge_shrink()
2354 		 */
2355 		if (list_empty_careful(&info->shrinklist)) {
2356 			list_add_tail(&info->shrinklist,
2357 				      &sbinfo->shrinklist);
2358 			sbinfo->shrinklist_len++;
2359 		}
2360 		spin_unlock(&sbinfo->shrinklist_lock);
2361 	}
2362 
2363 	if (sgp == SGP_WRITE)
2364 		folio_set_referenced(folio);
2365 	/*
2366 	 * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio.
2367 	 */
2368 	if (sgp == SGP_FALLOC)
2369 		sgp = SGP_WRITE;
2370 clear:
2371 	/*
2372 	 * Let SGP_WRITE caller clear ends if write does not fill folio;
2373 	 * but SGP_FALLOC on a folio fallocated earlier must initialize
2374 	 * it now, lest undo on failure cancel our earlier guarantee.
2375 	 */
2376 	if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
2377 		long i, n = folio_nr_pages(folio);
2378 
2379 		for (i = 0; i < n; i++)
2380 			clear_highpage(folio_page(folio, i));
2381 		flush_dcache_folio(folio);
2382 		folio_mark_uptodate(folio);
2383 	}
2384 
2385 	/* Perhaps the file has been truncated since we checked */
2386 	if (sgp <= SGP_CACHE &&
2387 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
2388 		error = -EINVAL;
2389 		goto unlock;
2390 	}
2391 out:
2392 	*foliop = folio;
2393 	return 0;
2394 
2395 	/*
2396 	 * Error recovery.
2397 	 */
2398 unlock:
2399 	if (alloced)
2400 		filemap_remove_folio(folio);
2401 	shmem_recalc_inode(inode, 0, 0);
2402 	if (folio) {
2403 		folio_unlock(folio);
2404 		folio_put(folio);
2405 	}
2406 	return error;
2407 }
2408 
2409 /**
2410  * shmem_get_folio - find, and lock a shmem folio.
2411  * @inode:	inode to search
2412  * @index:	the page index.
2413  * @write_end:	end of a write, could extend inode size
2414  * @foliop:	pointer to the folio if found
2415  * @sgp:	SGP_* flags to control behavior
2416  *
2417  * Looks up the page cache entry at @inode & @index.  If a folio is
2418  * present, it is returned locked with an increased refcount.
2419  *
2420  * If the caller modifies data in the folio, it must call folio_mark_dirty()
2421  * before unlocking the folio to ensure that the folio is not reclaimed.
2422  * There is no need to reserve space before calling folio_mark_dirty().
2423  *
2424  * When no folio is found, the behavior depends on @sgp:
2425  *  - for SGP_READ, *@foliop is %NULL and 0 is returned
2426  *  - for SGP_NOALLOC, *@foliop is %NULL and -ENOENT is returned
2427  *  - for all other flags a new folio is allocated, inserted into the
2428  *    page cache and returned locked in @foliop.
2429  *
2430  * Context: May sleep.
2431  * Return: 0 if successful, else a negative error code.
2432  */
shmem_get_folio(struct inode * inode,pgoff_t index,loff_t write_end,struct folio ** foliop,enum sgp_type sgp)2433 int shmem_get_folio(struct inode *inode, pgoff_t index, loff_t write_end,
2434 		    struct folio **foliop, enum sgp_type sgp)
2435 {
2436 	return shmem_get_folio_gfp(inode, index, write_end, foliop, sgp,
2437 			mapping_gfp_mask(inode->i_mapping), NULL, NULL);
2438 }
2439 EXPORT_SYMBOL_GPL(shmem_get_folio);
2440 
2441 /*
2442  * This is like autoremove_wake_function, but it removes the wait queue
2443  * entry unconditionally - even if something else had already woken the
2444  * target.
2445  */
synchronous_wake_function(wait_queue_entry_t * wait,unsigned int mode,int sync,void * key)2446 static int synchronous_wake_function(wait_queue_entry_t *wait,
2447 			unsigned int mode, int sync, void *key)
2448 {
2449 	int ret = default_wake_function(wait, mode, sync, key);
2450 	list_del_init(&wait->entry);
2451 	return ret;
2452 }
2453 
2454 /*
2455  * Trinity finds that probing a hole which tmpfs is punching can
2456  * prevent the hole-punch from ever completing: which in turn
2457  * locks writers out with its hold on i_rwsem.  So refrain from
2458  * faulting pages into the hole while it's being punched.  Although
2459  * shmem_undo_range() does remove the additions, it may be unable to
2460  * keep up, as each new page needs its own unmap_mapping_range() call,
2461  * and the i_mmap tree grows ever slower to scan if new vmas are added.
2462  *
2463  * It does not matter if we sometimes reach this check just before the
2464  * hole-punch begins, so that one fault then races with the punch:
2465  * we just need to make racing faults a rare case.
2466  *
2467  * The implementation below would be much simpler if we just used a
2468  * standard mutex or completion: but we cannot take i_rwsem in fault,
2469  * and bloating every shmem inode for this unlikely case would be sad.
2470  */
shmem_falloc_wait(struct vm_fault * vmf,struct inode * inode)2471 static vm_fault_t shmem_falloc_wait(struct vm_fault *vmf, struct inode *inode)
2472 {
2473 	struct shmem_falloc *shmem_falloc;
2474 	struct file *fpin = NULL;
2475 	vm_fault_t ret = 0;
2476 
2477 	spin_lock(&inode->i_lock);
2478 	shmem_falloc = inode->i_private;
2479 	if (shmem_falloc &&
2480 	    shmem_falloc->waitq &&
2481 	    vmf->pgoff >= shmem_falloc->start &&
2482 	    vmf->pgoff < shmem_falloc->next) {
2483 		wait_queue_head_t *shmem_falloc_waitq;
2484 		DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
2485 
2486 		ret = VM_FAULT_NOPAGE;
2487 		fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2488 		shmem_falloc_waitq = shmem_falloc->waitq;
2489 		prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2490 				TASK_UNINTERRUPTIBLE);
2491 		spin_unlock(&inode->i_lock);
2492 		schedule();
2493 
2494 		/*
2495 		 * shmem_falloc_waitq points into the shmem_fallocate()
2496 		 * stack of the hole-punching task: shmem_falloc_waitq
2497 		 * is usually invalid by the time we reach here, but
2498 		 * finish_wait() does not dereference it in that case;
2499 		 * though i_lock needed lest racing with wake_up_all().
2500 		 */
2501 		spin_lock(&inode->i_lock);
2502 		finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2503 	}
2504 	spin_unlock(&inode->i_lock);
2505 	if (fpin) {
2506 		fput(fpin);
2507 		ret = VM_FAULT_RETRY;
2508 	}
2509 	return ret;
2510 }
2511 
shmem_fault(struct vm_fault * vmf)2512 static vm_fault_t shmem_fault(struct vm_fault *vmf)
2513 {
2514 	struct inode *inode = file_inode(vmf->vma->vm_file);
2515 	gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
2516 	struct folio *folio = NULL;
2517 	vm_fault_t ret = 0;
2518 	int err;
2519 
2520 	/*
2521 	 * Trinity finds that probing a hole which tmpfs is punching can
2522 	 * prevent the hole-punch from ever completing: noted in i_private.
2523 	 */
2524 	if (unlikely(inode->i_private)) {
2525 		ret = shmem_falloc_wait(vmf, inode);
2526 		if (ret)
2527 			return ret;
2528 	}
2529 
2530 	WARN_ON_ONCE(vmf->page != NULL);
2531 	err = shmem_get_folio_gfp(inode, vmf->pgoff, 0, &folio, SGP_CACHE,
2532 				  gfp, vmf, &ret);
2533 	if (err)
2534 		return vmf_error(err);
2535 	if (folio) {
2536 		vmf->page = folio_file_page(folio, vmf->pgoff);
2537 		ret |= VM_FAULT_LOCKED;
2538 	}
2539 	return ret;
2540 }
2541 
shmem_get_unmapped_area(struct file * file,unsigned long uaddr,unsigned long len,unsigned long pgoff,unsigned long flags)2542 unsigned long shmem_get_unmapped_area(struct file *file,
2543 				      unsigned long uaddr, unsigned long len,
2544 				      unsigned long pgoff, unsigned long flags)
2545 {
2546 	unsigned long addr;
2547 	unsigned long offset;
2548 	unsigned long inflated_len;
2549 	unsigned long inflated_addr;
2550 	unsigned long inflated_offset;
2551 	unsigned long hpage_size;
2552 
2553 	if (len > TASK_SIZE)
2554 		return -ENOMEM;
2555 
2556 	addr = mm_get_unmapped_area(current->mm, file, uaddr, len, pgoff,
2557 				    flags);
2558 
2559 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2560 		return addr;
2561 	if (IS_ERR_VALUE(addr))
2562 		return addr;
2563 	if (addr & ~PAGE_MASK)
2564 		return addr;
2565 	if (addr > TASK_SIZE - len)
2566 		return addr;
2567 
2568 	if (shmem_huge == SHMEM_HUGE_DENY)
2569 		return addr;
2570 	if (flags & MAP_FIXED)
2571 		return addr;
2572 	/*
2573 	 * Our priority is to support MAP_SHARED mapped hugely;
2574 	 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2575 	 * But if caller specified an address hint and we allocated area there
2576 	 * successfully, respect that as before.
2577 	 */
2578 	if (uaddr == addr)
2579 		return addr;
2580 
2581 	hpage_size = HPAGE_PMD_SIZE;
2582 	if (shmem_huge != SHMEM_HUGE_FORCE) {
2583 		struct super_block *sb;
2584 		unsigned long __maybe_unused hpage_orders;
2585 		int order = 0;
2586 
2587 		if (file) {
2588 			VM_BUG_ON(file->f_op != &shmem_file_operations);
2589 			sb = file_inode(file)->i_sb;
2590 		} else {
2591 			/*
2592 			 * Called directly from mm/mmap.c, or drivers/char/mem.c
2593 			 * for "/dev/zero", to create a shared anonymous object.
2594 			 */
2595 			if (IS_ERR(shm_mnt))
2596 				return addr;
2597 			sb = shm_mnt->mnt_sb;
2598 
2599 			/*
2600 			 * Find the highest mTHP order used for anonymous shmem to
2601 			 * provide a suitable alignment address.
2602 			 */
2603 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2604 			hpage_orders = READ_ONCE(huge_shmem_orders_always);
2605 			hpage_orders |= READ_ONCE(huge_shmem_orders_within_size);
2606 			hpage_orders |= READ_ONCE(huge_shmem_orders_madvise);
2607 			if (SHMEM_SB(sb)->huge != SHMEM_HUGE_NEVER)
2608 				hpage_orders |= READ_ONCE(huge_shmem_orders_inherit);
2609 
2610 			if (hpage_orders > 0) {
2611 				order = highest_order(hpage_orders);
2612 				hpage_size = PAGE_SIZE << order;
2613 			}
2614 #endif
2615 		}
2616 		if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER && !order)
2617 			return addr;
2618 	}
2619 
2620 	if (len < hpage_size)
2621 		return addr;
2622 
2623 	offset = (pgoff << PAGE_SHIFT) & (hpage_size - 1);
2624 	if (offset && offset + len < 2 * hpage_size)
2625 		return addr;
2626 	if ((addr & (hpage_size - 1)) == offset)
2627 		return addr;
2628 
2629 	inflated_len = len + hpage_size - PAGE_SIZE;
2630 	if (inflated_len > TASK_SIZE)
2631 		return addr;
2632 	if (inflated_len < len)
2633 		return addr;
2634 
2635 	inflated_addr = mm_get_unmapped_area(current->mm, NULL, uaddr,
2636 					     inflated_len, 0, flags);
2637 	if (IS_ERR_VALUE(inflated_addr))
2638 		return addr;
2639 	if (inflated_addr & ~PAGE_MASK)
2640 		return addr;
2641 
2642 	inflated_offset = inflated_addr & (hpage_size - 1);
2643 	inflated_addr += offset - inflated_offset;
2644 	if (inflated_offset > offset)
2645 		inflated_addr += hpage_size;
2646 
2647 	if (inflated_addr > TASK_SIZE - len)
2648 		return addr;
2649 	return inflated_addr;
2650 }
2651 
2652 #ifdef CONFIG_NUMA
shmem_set_policy(struct vm_area_struct * vma,struct mempolicy * mpol)2653 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2654 {
2655 	struct inode *inode = file_inode(vma->vm_file);
2656 	return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2657 }
2658 
shmem_get_policy(struct vm_area_struct * vma,unsigned long addr,pgoff_t * ilx)2659 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2660 					  unsigned long addr, pgoff_t *ilx)
2661 {
2662 	struct inode *inode = file_inode(vma->vm_file);
2663 	pgoff_t index;
2664 
2665 	/*
2666 	 * Bias interleave by inode number to distribute better across nodes;
2667 	 * but this interface is independent of which page order is used, so
2668 	 * supplies only that bias, letting caller apply the offset (adjusted
2669 	 * by page order, as in shmem_get_pgoff_policy() and get_vma_policy()).
2670 	 */
2671 	*ilx = inode->i_ino;
2672 	index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2673 	return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2674 }
2675 
shmem_get_pgoff_policy(struct shmem_inode_info * info,pgoff_t index,unsigned int order,pgoff_t * ilx)2676 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
2677 			pgoff_t index, unsigned int order, pgoff_t *ilx)
2678 {
2679 	struct mempolicy *mpol;
2680 
2681 	/* Bias interleave by inode number to distribute better across nodes */
2682 	*ilx = info->vfs_inode.i_ino + (index >> order);
2683 
2684 	mpol = mpol_shared_policy_lookup(&info->policy, index);
2685 	return mpol ? mpol : get_task_policy(current);
2686 }
2687 #else
shmem_get_pgoff_policy(struct shmem_inode_info * info,pgoff_t index,unsigned int order,pgoff_t * ilx)2688 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
2689 			pgoff_t index, unsigned int order, pgoff_t *ilx)
2690 {
2691 	*ilx = 0;
2692 	return NULL;
2693 }
2694 #endif /* CONFIG_NUMA */
2695 
shmem_lock(struct file * file,int lock,struct ucounts * ucounts)2696 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
2697 {
2698 	struct inode *inode = file_inode(file);
2699 	struct shmem_inode_info *info = SHMEM_I(inode);
2700 	int retval = -ENOMEM;
2701 
2702 	/*
2703 	 * What serializes the accesses to info->flags?
2704 	 * ipc_lock_object() when called from shmctl_do_lock(),
2705 	 * no serialization needed when called from shm_destroy().
2706 	 */
2707 	if (lock && !(info->flags & VM_LOCKED)) {
2708 		if (!user_shm_lock(inode->i_size, ucounts))
2709 			goto out_nomem;
2710 		info->flags |= VM_LOCKED;
2711 		mapping_set_unevictable(file->f_mapping);
2712 	}
2713 	if (!lock && (info->flags & VM_LOCKED) && ucounts) {
2714 		user_shm_unlock(inode->i_size, ucounts);
2715 		info->flags &= ~VM_LOCKED;
2716 		mapping_clear_unevictable(file->f_mapping);
2717 	}
2718 	retval = 0;
2719 
2720 out_nomem:
2721 	return retval;
2722 }
2723 
shmem_mmap(struct file * file,struct vm_area_struct * vma)2724 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2725 {
2726 	struct inode *inode = file_inode(file);
2727 	struct shmem_inode_info *info = SHMEM_I(inode);
2728 	int ret;
2729 
2730 	ret = seal_check_write(info->seals, vma);
2731 	if (ret)
2732 		return ret;
2733 
2734 	file_accessed(file);
2735 	/* This is anonymous shared memory if it is unlinked at the time of mmap */
2736 	if (inode->i_nlink)
2737 		vma->vm_ops = &shmem_vm_ops;
2738 	else
2739 		vma->vm_ops = &shmem_anon_vm_ops;
2740 	return 0;
2741 }
2742 
shmem_file_open(struct inode * inode,struct file * file)2743 static int shmem_file_open(struct inode *inode, struct file *file)
2744 {
2745 	file->f_mode |= FMODE_CAN_ODIRECT;
2746 	return generic_file_open(inode, file);
2747 }
2748 
2749 #ifdef CONFIG_TMPFS_XATTR
2750 static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2751 
2752 /*
2753  * chattr's fsflags are unrelated to extended attributes,
2754  * but tmpfs has chosen to enable them under the same config option.
2755  */
shmem_set_inode_flags(struct inode * inode,unsigned int fsflags)2756 static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
2757 {
2758 	unsigned int i_flags = 0;
2759 
2760 	if (fsflags & FS_NOATIME_FL)
2761 		i_flags |= S_NOATIME;
2762 	if (fsflags & FS_APPEND_FL)
2763 		i_flags |= S_APPEND;
2764 	if (fsflags & FS_IMMUTABLE_FL)
2765 		i_flags |= S_IMMUTABLE;
2766 	/*
2767 	 * But FS_NODUMP_FL does not require any action in i_flags.
2768 	 */
2769 	inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE);
2770 }
2771 #else
shmem_set_inode_flags(struct inode * inode,unsigned int fsflags)2772 static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
2773 {
2774 }
2775 #define shmem_initxattrs NULL
2776 #endif
2777 
shmem_get_offset_ctx(struct inode * inode)2778 static struct offset_ctx *shmem_get_offset_ctx(struct inode *inode)
2779 {
2780 	return &SHMEM_I(inode)->dir_offsets;
2781 }
2782 
__shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,unsigned long flags)2783 static struct inode *__shmem_get_inode(struct mnt_idmap *idmap,
2784 					     struct super_block *sb,
2785 					     struct inode *dir, umode_t mode,
2786 					     dev_t dev, unsigned long flags)
2787 {
2788 	struct inode *inode;
2789 	struct shmem_inode_info *info;
2790 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2791 	ino_t ino;
2792 	int err;
2793 
2794 	err = shmem_reserve_inode(sb, &ino);
2795 	if (err)
2796 		return ERR_PTR(err);
2797 
2798 	inode = new_inode(sb);
2799 	if (!inode) {
2800 		shmem_free_inode(sb, 0);
2801 		return ERR_PTR(-ENOSPC);
2802 	}
2803 
2804 	inode->i_ino = ino;
2805 	inode_init_owner(idmap, inode, dir, mode);
2806 	inode->i_blocks = 0;
2807 	simple_inode_init_ts(inode);
2808 	inode->i_generation = get_random_u32();
2809 	info = SHMEM_I(inode);
2810 	memset(info, 0, (char *)inode - (char *)info);
2811 	spin_lock_init(&info->lock);
2812 	atomic_set(&info->stop_eviction, 0);
2813 	info->seals = F_SEAL_SEAL;
2814 	info->flags = flags & VM_NORESERVE;
2815 	info->i_crtime = inode_get_mtime(inode);
2816 	info->fsflags = (dir == NULL) ? 0 :
2817 		SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
2818 	if (info->fsflags)
2819 		shmem_set_inode_flags(inode, info->fsflags);
2820 	INIT_LIST_HEAD(&info->shrinklist);
2821 	INIT_LIST_HEAD(&info->swaplist);
2822 	simple_xattrs_init(&info->xattrs);
2823 	cache_no_acl(inode);
2824 	if (sbinfo->noswap)
2825 		mapping_set_unevictable(inode->i_mapping);
2826 	mapping_set_large_folios(inode->i_mapping);
2827 
2828 	switch (mode & S_IFMT) {
2829 	default:
2830 		inode->i_op = &shmem_special_inode_operations;
2831 		init_special_inode(inode, mode, dev);
2832 		break;
2833 	case S_IFREG:
2834 		inode->i_mapping->a_ops = &shmem_aops;
2835 		inode->i_op = &shmem_inode_operations;
2836 		inode->i_fop = &shmem_file_operations;
2837 		mpol_shared_policy_init(&info->policy,
2838 					 shmem_get_sbmpol(sbinfo));
2839 		break;
2840 	case S_IFDIR:
2841 		inc_nlink(inode);
2842 		/* Some things misbehave if size == 0 on a directory */
2843 		inode->i_size = 2 * BOGO_DIRENT_SIZE;
2844 		inode->i_op = &shmem_dir_inode_operations;
2845 		inode->i_fop = &simple_offset_dir_operations;
2846 		simple_offset_init(shmem_get_offset_ctx(inode));
2847 		break;
2848 	case S_IFLNK:
2849 		/*
2850 		 * Must not load anything in the rbtree,
2851 		 * mpol_free_shared_policy will not be called.
2852 		 */
2853 		mpol_shared_policy_init(&info->policy, NULL);
2854 		break;
2855 	}
2856 
2857 	lockdep_annotate_inode_mutex_key(inode);
2858 	return inode;
2859 }
2860 
2861 #ifdef CONFIG_TMPFS_QUOTA
shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,unsigned long flags)2862 static struct inode *shmem_get_inode(struct mnt_idmap *idmap,
2863 				     struct super_block *sb, struct inode *dir,
2864 				     umode_t mode, dev_t dev, unsigned long flags)
2865 {
2866 	int err;
2867 	struct inode *inode;
2868 
2869 	inode = __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
2870 	if (IS_ERR(inode))
2871 		return inode;
2872 
2873 	err = dquot_initialize(inode);
2874 	if (err)
2875 		goto errout;
2876 
2877 	err = dquot_alloc_inode(inode);
2878 	if (err) {
2879 		dquot_drop(inode);
2880 		goto errout;
2881 	}
2882 	return inode;
2883 
2884 errout:
2885 	inode->i_flags |= S_NOQUOTA;
2886 	iput(inode);
2887 	return ERR_PTR(err);
2888 }
2889 #else
shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,unsigned long flags)2890 static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
2891 				     struct super_block *sb, struct inode *dir,
2892 				     umode_t mode, dev_t dev, unsigned long flags)
2893 {
2894 	return __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
2895 }
2896 #endif /* CONFIG_TMPFS_QUOTA */
2897 
2898 #ifdef CONFIG_USERFAULTFD
shmem_mfill_atomic_pte(pmd_t * dst_pmd,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,uffd_flags_t flags,struct folio ** foliop)2899 int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
2900 			   struct vm_area_struct *dst_vma,
2901 			   unsigned long dst_addr,
2902 			   unsigned long src_addr,
2903 			   uffd_flags_t flags,
2904 			   struct folio **foliop)
2905 {
2906 	struct inode *inode = file_inode(dst_vma->vm_file);
2907 	struct shmem_inode_info *info = SHMEM_I(inode);
2908 	struct address_space *mapping = inode->i_mapping;
2909 	gfp_t gfp = mapping_gfp_mask(mapping);
2910 	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
2911 	void *page_kaddr;
2912 	struct folio *folio;
2913 	int ret;
2914 	pgoff_t max_off;
2915 
2916 	if (shmem_inode_acct_blocks(inode, 1)) {
2917 		/*
2918 		 * We may have got a page, returned -ENOENT triggering a retry,
2919 		 * and now we find ourselves with -ENOMEM. Release the page, to
2920 		 * avoid a BUG_ON in our caller.
2921 		 */
2922 		if (unlikely(*foliop)) {
2923 			folio_put(*foliop);
2924 			*foliop = NULL;
2925 		}
2926 		return -ENOMEM;
2927 	}
2928 
2929 	if (!*foliop) {
2930 		ret = -ENOMEM;
2931 		folio = shmem_alloc_folio(gfp, 0, info, pgoff);
2932 		if (!folio)
2933 			goto out_unacct_blocks;
2934 
2935 		if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
2936 			page_kaddr = kmap_local_folio(folio, 0);
2937 			/*
2938 			 * The read mmap_lock is held here.  Despite the
2939 			 * mmap_lock being read recursive a deadlock is still
2940 			 * possible if a writer has taken a lock.  For example:
2941 			 *
2942 			 * process A thread 1 takes read lock on own mmap_lock
2943 			 * process A thread 2 calls mmap, blocks taking write lock
2944 			 * process B thread 1 takes page fault, read lock on own mmap lock
2945 			 * process B thread 2 calls mmap, blocks taking write lock
2946 			 * process A thread 1 blocks taking read lock on process B
2947 			 * process B thread 1 blocks taking read lock on process A
2948 			 *
2949 			 * Disable page faults to prevent potential deadlock
2950 			 * and retry the copy outside the mmap_lock.
2951 			 */
2952 			pagefault_disable();
2953 			ret = copy_from_user(page_kaddr,
2954 					     (const void __user *)src_addr,
2955 					     PAGE_SIZE);
2956 			pagefault_enable();
2957 			kunmap_local(page_kaddr);
2958 
2959 			/* fallback to copy_from_user outside mmap_lock */
2960 			if (unlikely(ret)) {
2961 				*foliop = folio;
2962 				ret = -ENOENT;
2963 				/* don't free the page */
2964 				goto out_unacct_blocks;
2965 			}
2966 
2967 			flush_dcache_folio(folio);
2968 		} else {		/* ZEROPAGE */
2969 			clear_user_highpage(&folio->page, dst_addr);
2970 		}
2971 	} else {
2972 		folio = *foliop;
2973 		VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
2974 		*foliop = NULL;
2975 	}
2976 
2977 	VM_BUG_ON(folio_test_locked(folio));
2978 	VM_BUG_ON(folio_test_swapbacked(folio));
2979 	__folio_set_locked(folio);
2980 	__folio_set_swapbacked(folio);
2981 	__folio_mark_uptodate(folio);
2982 
2983 	ret = -EFAULT;
2984 	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2985 	if (unlikely(pgoff >= max_off))
2986 		goto out_release;
2987 
2988 	ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp);
2989 	if (ret)
2990 		goto out_release;
2991 	ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp);
2992 	if (ret)
2993 		goto out_release;
2994 
2995 	ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
2996 				       &folio->page, true, flags);
2997 	if (ret)
2998 		goto out_delete_from_cache;
2999 
3000 	shmem_recalc_inode(inode, 1, 0);
3001 	folio_unlock(folio);
3002 	return 0;
3003 out_delete_from_cache:
3004 	filemap_remove_folio(folio);
3005 out_release:
3006 	folio_unlock(folio);
3007 	folio_put(folio);
3008 out_unacct_blocks:
3009 	shmem_inode_unacct_blocks(inode, 1);
3010 	return ret;
3011 }
3012 #endif /* CONFIG_USERFAULTFD */
3013 
3014 #ifdef CONFIG_TMPFS
3015 static const struct inode_operations shmem_symlink_inode_operations;
3016 static const struct inode_operations shmem_short_symlink_operations;
3017 
3018 static int
shmem_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata)3019 shmem_write_begin(struct file *file, struct address_space *mapping,
3020 			loff_t pos, unsigned len,
3021 			struct folio **foliop, void **fsdata)
3022 {
3023 	struct inode *inode = mapping->host;
3024 	struct shmem_inode_info *info = SHMEM_I(inode);
3025 	pgoff_t index = pos >> PAGE_SHIFT;
3026 	struct folio *folio;
3027 	int ret = 0;
3028 
3029 	/* i_rwsem is held by caller */
3030 	if (unlikely(info->seals & (F_SEAL_GROW |
3031 				   F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
3032 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
3033 			return -EPERM;
3034 		if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
3035 			return -EPERM;
3036 	}
3037 
3038 	ret = shmem_get_folio(inode, index, pos + len, &folio, SGP_WRITE);
3039 	if (ret)
3040 		return ret;
3041 
3042 	if (folio_test_hwpoison(folio) ||
3043 	    (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) {
3044 		folio_unlock(folio);
3045 		folio_put(folio);
3046 		return -EIO;
3047 	}
3048 
3049 	*foliop = folio;
3050 	return 0;
3051 }
3052 
3053 static int
shmem_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)3054 shmem_write_end(struct file *file, struct address_space *mapping,
3055 			loff_t pos, unsigned len, unsigned copied,
3056 			struct folio *folio, void *fsdata)
3057 {
3058 	struct inode *inode = mapping->host;
3059 
3060 	if (pos + copied > inode->i_size)
3061 		i_size_write(inode, pos + copied);
3062 
3063 	if (!folio_test_uptodate(folio)) {
3064 		if (copied < folio_size(folio)) {
3065 			size_t from = offset_in_folio(folio, pos);
3066 			folio_zero_segments(folio, 0, from,
3067 					from + copied, folio_size(folio));
3068 		}
3069 		folio_mark_uptodate(folio);
3070 	}
3071 	folio_mark_dirty(folio);
3072 	folio_unlock(folio);
3073 	folio_put(folio);
3074 
3075 	return copied;
3076 }
3077 
shmem_file_read_iter(struct kiocb * iocb,struct iov_iter * to)3078 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
3079 {
3080 	struct file *file = iocb->ki_filp;
3081 	struct inode *inode = file_inode(file);
3082 	struct address_space *mapping = inode->i_mapping;
3083 	pgoff_t index;
3084 	unsigned long offset;
3085 	int error = 0;
3086 	ssize_t retval = 0;
3087 	loff_t *ppos = &iocb->ki_pos;
3088 
3089 	index = *ppos >> PAGE_SHIFT;
3090 	offset = *ppos & ~PAGE_MASK;
3091 
3092 	for (;;) {
3093 		struct folio *folio = NULL;
3094 		struct page *page = NULL;
3095 		pgoff_t end_index;
3096 		unsigned long nr, ret;
3097 		loff_t i_size = i_size_read(inode);
3098 
3099 		end_index = i_size >> PAGE_SHIFT;
3100 		if (index > end_index)
3101 			break;
3102 		if (index == end_index) {
3103 			nr = i_size & ~PAGE_MASK;
3104 			if (nr <= offset)
3105 				break;
3106 		}
3107 
3108 		error = shmem_get_folio(inode, index, 0, &folio, SGP_READ);
3109 		if (error) {
3110 			if (error == -EINVAL)
3111 				error = 0;
3112 			break;
3113 		}
3114 		if (folio) {
3115 			folio_unlock(folio);
3116 
3117 			page = folio_file_page(folio, index);
3118 			if (PageHWPoison(page)) {
3119 				folio_put(folio);
3120 				error = -EIO;
3121 				break;
3122 			}
3123 		}
3124 
3125 		/*
3126 		 * We must evaluate after, since reads (unlike writes)
3127 		 * are called without i_rwsem protection against truncate
3128 		 */
3129 		nr = PAGE_SIZE;
3130 		i_size = i_size_read(inode);
3131 		end_index = i_size >> PAGE_SHIFT;
3132 		if (index == end_index) {
3133 			nr = i_size & ~PAGE_MASK;
3134 			if (nr <= offset) {
3135 				if (folio)
3136 					folio_put(folio);
3137 				break;
3138 			}
3139 		}
3140 		nr -= offset;
3141 
3142 		if (folio) {
3143 			/*
3144 			 * If users can be writing to this page using arbitrary
3145 			 * virtual addresses, take care about potential aliasing
3146 			 * before reading the page on the kernel side.
3147 			 */
3148 			if (mapping_writably_mapped(mapping))
3149 				flush_dcache_page(page);
3150 			/*
3151 			 * Mark the page accessed if we read the beginning.
3152 			 */
3153 			if (!offset)
3154 				folio_mark_accessed(folio);
3155 			/*
3156 			 * Ok, we have the page, and it's up-to-date, so
3157 			 * now we can copy it to user space...
3158 			 */
3159 			ret = copy_page_to_iter(page, offset, nr, to);
3160 			folio_put(folio);
3161 
3162 		} else if (user_backed_iter(to)) {
3163 			/*
3164 			 * Copy to user tends to be so well optimized, but
3165 			 * clear_user() not so much, that it is noticeably
3166 			 * faster to copy the zero page instead of clearing.
3167 			 */
3168 			ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to);
3169 		} else {
3170 			/*
3171 			 * But submitting the same page twice in a row to
3172 			 * splice() - or others? - can result in confusion:
3173 			 * so don't attempt that optimization on pipes etc.
3174 			 */
3175 			ret = iov_iter_zero(nr, to);
3176 		}
3177 
3178 		retval += ret;
3179 		offset += ret;
3180 		index += offset >> PAGE_SHIFT;
3181 		offset &= ~PAGE_MASK;
3182 
3183 		if (!iov_iter_count(to))
3184 			break;
3185 		if (ret < nr) {
3186 			error = -EFAULT;
3187 			break;
3188 		}
3189 		cond_resched();
3190 	}
3191 
3192 	*ppos = ((loff_t) index << PAGE_SHIFT) + offset;
3193 	file_accessed(file);
3194 	return retval ? retval : error;
3195 }
3196 
shmem_file_write_iter(struct kiocb * iocb,struct iov_iter * from)3197 static ssize_t shmem_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3198 {
3199 	struct file *file = iocb->ki_filp;
3200 	struct inode *inode = file->f_mapping->host;
3201 	ssize_t ret;
3202 
3203 	inode_lock(inode);
3204 	ret = generic_write_checks(iocb, from);
3205 	if (ret <= 0)
3206 		goto unlock;
3207 	ret = file_remove_privs(file);
3208 	if (ret)
3209 		goto unlock;
3210 	ret = file_update_time(file);
3211 	if (ret)
3212 		goto unlock;
3213 	ret = generic_perform_write(iocb, from);
3214 unlock:
3215 	inode_unlock(inode);
3216 	return ret;
3217 }
3218 
zero_pipe_buf_get(struct pipe_inode_info * pipe,struct pipe_buffer * buf)3219 static bool zero_pipe_buf_get(struct pipe_inode_info *pipe,
3220 			      struct pipe_buffer *buf)
3221 {
3222 	return true;
3223 }
3224 
zero_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)3225 static void zero_pipe_buf_release(struct pipe_inode_info *pipe,
3226 				  struct pipe_buffer *buf)
3227 {
3228 }
3229 
zero_pipe_buf_try_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)3230 static bool zero_pipe_buf_try_steal(struct pipe_inode_info *pipe,
3231 				    struct pipe_buffer *buf)
3232 {
3233 	return false;
3234 }
3235 
3236 static const struct pipe_buf_operations zero_pipe_buf_ops = {
3237 	.release	= zero_pipe_buf_release,
3238 	.try_steal	= zero_pipe_buf_try_steal,
3239 	.get		= zero_pipe_buf_get,
3240 };
3241 
splice_zeropage_into_pipe(struct pipe_inode_info * pipe,loff_t fpos,size_t size)3242 static size_t splice_zeropage_into_pipe(struct pipe_inode_info *pipe,
3243 					loff_t fpos, size_t size)
3244 {
3245 	size_t offset = fpos & ~PAGE_MASK;
3246 
3247 	size = min_t(size_t, size, PAGE_SIZE - offset);
3248 
3249 	if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) {
3250 		struct pipe_buffer *buf = pipe_head_buf(pipe);
3251 
3252 		*buf = (struct pipe_buffer) {
3253 			.ops	= &zero_pipe_buf_ops,
3254 			.page	= ZERO_PAGE(0),
3255 			.offset	= offset,
3256 			.len	= size,
3257 		};
3258 		pipe->head++;
3259 	}
3260 
3261 	return size;
3262 }
3263 
shmem_file_splice_read(struct file * in,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)3264 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
3265 				      struct pipe_inode_info *pipe,
3266 				      size_t len, unsigned int flags)
3267 {
3268 	struct inode *inode = file_inode(in);
3269 	struct address_space *mapping = inode->i_mapping;
3270 	struct folio *folio = NULL;
3271 	size_t total_spliced = 0, used, npages, n, part;
3272 	loff_t isize;
3273 	int error = 0;
3274 
3275 	/* Work out how much data we can actually add into the pipe */
3276 	used = pipe_occupancy(pipe->head, pipe->tail);
3277 	npages = max_t(ssize_t, pipe->max_usage - used, 0);
3278 	len = min_t(size_t, len, npages * PAGE_SIZE);
3279 
3280 	do {
3281 		if (*ppos >= i_size_read(inode))
3282 			break;
3283 
3284 		error = shmem_get_folio(inode, *ppos / PAGE_SIZE, 0, &folio,
3285 					SGP_READ);
3286 		if (error) {
3287 			if (error == -EINVAL)
3288 				error = 0;
3289 			break;
3290 		}
3291 		if (folio) {
3292 			folio_unlock(folio);
3293 
3294 			if (folio_test_hwpoison(folio) ||
3295 			    (folio_test_large(folio) &&
3296 			     folio_test_has_hwpoisoned(folio))) {
3297 				error = -EIO;
3298 				break;
3299 			}
3300 		}
3301 
3302 		/*
3303 		 * i_size must be checked after we know the pages are Uptodate.
3304 		 *
3305 		 * Checking i_size after the check allows us to calculate
3306 		 * the correct value for "nr", which means the zero-filled
3307 		 * part of the page is not copied back to userspace (unless
3308 		 * another truncate extends the file - this is desired though).
3309 		 */
3310 		isize = i_size_read(inode);
3311 		if (unlikely(*ppos >= isize))
3312 			break;
3313 		part = min_t(loff_t, isize - *ppos, len);
3314 
3315 		if (folio) {
3316 			/*
3317 			 * If users can be writing to this page using arbitrary
3318 			 * virtual addresses, take care about potential aliasing
3319 			 * before reading the page on the kernel side.
3320 			 */
3321 			if (mapping_writably_mapped(mapping))
3322 				flush_dcache_folio(folio);
3323 			folio_mark_accessed(folio);
3324 			/*
3325 			 * Ok, we have the page, and it's up-to-date, so we can
3326 			 * now splice it into the pipe.
3327 			 */
3328 			n = splice_folio_into_pipe(pipe, folio, *ppos, part);
3329 			folio_put(folio);
3330 			folio = NULL;
3331 		} else {
3332 			n = splice_zeropage_into_pipe(pipe, *ppos, part);
3333 		}
3334 
3335 		if (!n)
3336 			break;
3337 		len -= n;
3338 		total_spliced += n;
3339 		*ppos += n;
3340 		in->f_ra.prev_pos = *ppos;
3341 		if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
3342 			break;
3343 
3344 		cond_resched();
3345 	} while (len);
3346 
3347 	if (folio)
3348 		folio_put(folio);
3349 
3350 	file_accessed(in);
3351 	return total_spliced ? total_spliced : error;
3352 }
3353 
shmem_file_llseek(struct file * file,loff_t offset,int whence)3354 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
3355 {
3356 	struct address_space *mapping = file->f_mapping;
3357 	struct inode *inode = mapping->host;
3358 
3359 	if (whence != SEEK_DATA && whence != SEEK_HOLE)
3360 		return generic_file_llseek_size(file, offset, whence,
3361 					MAX_LFS_FILESIZE, i_size_read(inode));
3362 	if (offset < 0)
3363 		return -ENXIO;
3364 
3365 	inode_lock(inode);
3366 	/* We're holding i_rwsem so we can access i_size directly */
3367 	offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
3368 	if (offset >= 0)
3369 		offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
3370 	inode_unlock(inode);
3371 	return offset;
3372 }
3373 
shmem_fallocate(struct file * file,int mode,loff_t offset,loff_t len)3374 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
3375 							 loff_t len)
3376 {
3377 	struct inode *inode = file_inode(file);
3378 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3379 	struct shmem_inode_info *info = SHMEM_I(inode);
3380 	struct shmem_falloc shmem_falloc;
3381 	pgoff_t start, index, end, undo_fallocend;
3382 	int error;
3383 
3384 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
3385 		return -EOPNOTSUPP;
3386 
3387 	inode_lock(inode);
3388 
3389 	if (mode & FALLOC_FL_PUNCH_HOLE) {
3390 		struct address_space *mapping = file->f_mapping;
3391 		loff_t unmap_start = round_up(offset, PAGE_SIZE);
3392 		loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
3393 		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
3394 
3395 		/* protected by i_rwsem */
3396 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
3397 			error = -EPERM;
3398 			goto out;
3399 		}
3400 
3401 		shmem_falloc.waitq = &shmem_falloc_waitq;
3402 		shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
3403 		shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
3404 		spin_lock(&inode->i_lock);
3405 		inode->i_private = &shmem_falloc;
3406 		spin_unlock(&inode->i_lock);
3407 
3408 		if ((u64)unmap_end > (u64)unmap_start)
3409 			unmap_mapping_range(mapping, unmap_start,
3410 					    1 + unmap_end - unmap_start, 0);
3411 		shmem_truncate_range(inode, offset, offset + len - 1);
3412 		/* No need to unmap again: hole-punching leaves COWed pages */
3413 
3414 		spin_lock(&inode->i_lock);
3415 		inode->i_private = NULL;
3416 		wake_up_all(&shmem_falloc_waitq);
3417 		WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
3418 		spin_unlock(&inode->i_lock);
3419 		error = 0;
3420 		goto out;
3421 	}
3422 
3423 	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
3424 	error = inode_newsize_ok(inode, offset + len);
3425 	if (error)
3426 		goto out;
3427 
3428 	if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
3429 		error = -EPERM;
3430 		goto out;
3431 	}
3432 
3433 	start = offset >> PAGE_SHIFT;
3434 	end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
3435 	/* Try to avoid a swapstorm if len is impossible to satisfy */
3436 	if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
3437 		error = -ENOSPC;
3438 		goto out;
3439 	}
3440 
3441 	shmem_falloc.waitq = NULL;
3442 	shmem_falloc.start = start;
3443 	shmem_falloc.next  = start;
3444 	shmem_falloc.nr_falloced = 0;
3445 	shmem_falloc.nr_unswapped = 0;
3446 	spin_lock(&inode->i_lock);
3447 	inode->i_private = &shmem_falloc;
3448 	spin_unlock(&inode->i_lock);
3449 
3450 	/*
3451 	 * info->fallocend is only relevant when huge pages might be
3452 	 * involved: to prevent split_huge_page() freeing fallocated
3453 	 * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size.
3454 	 */
3455 	undo_fallocend = info->fallocend;
3456 	if (info->fallocend < end)
3457 		info->fallocend = end;
3458 
3459 	for (index = start; index < end; ) {
3460 		struct folio *folio;
3461 
3462 		/*
3463 		 * Check for fatal signal so that we abort early in OOM
3464 		 * situations. We don't want to abort in case of non-fatal
3465 		 * signals as large fallocate can take noticeable time and
3466 		 * e.g. periodic timers may result in fallocate constantly
3467 		 * restarting.
3468 		 */
3469 		if (fatal_signal_pending(current))
3470 			error = -EINTR;
3471 		else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
3472 			error = -ENOMEM;
3473 		else
3474 			error = shmem_get_folio(inode, index, offset + len,
3475 						&folio, SGP_FALLOC);
3476 		if (error) {
3477 			info->fallocend = undo_fallocend;
3478 			/* Remove the !uptodate folios we added */
3479 			if (index > start) {
3480 				shmem_undo_range(inode,
3481 				    (loff_t)start << PAGE_SHIFT,
3482 				    ((loff_t)index << PAGE_SHIFT) - 1, true);
3483 			}
3484 			goto undone;
3485 		}
3486 
3487 		/*
3488 		 * Here is a more important optimization than it appears:
3489 		 * a second SGP_FALLOC on the same large folio will clear it,
3490 		 * making it uptodate and un-undoable if we fail later.
3491 		 */
3492 		index = folio_next_index(folio);
3493 		/* Beware 32-bit wraparound */
3494 		if (!index)
3495 			index--;
3496 
3497 		/*
3498 		 * Inform shmem_writepage() how far we have reached.
3499 		 * No need for lock or barrier: we have the page lock.
3500 		 */
3501 		if (!folio_test_uptodate(folio))
3502 			shmem_falloc.nr_falloced += index - shmem_falloc.next;
3503 		shmem_falloc.next = index;
3504 
3505 		/*
3506 		 * If !uptodate, leave it that way so that freeable folios
3507 		 * can be recognized if we need to rollback on error later.
3508 		 * But mark it dirty so that memory pressure will swap rather
3509 		 * than free the folios we are allocating (and SGP_CACHE folios
3510 		 * might still be clean: we now need to mark those dirty too).
3511 		 */
3512 		folio_mark_dirty(folio);
3513 		folio_unlock(folio);
3514 		folio_put(folio);
3515 		cond_resched();
3516 	}
3517 
3518 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
3519 		i_size_write(inode, offset + len);
3520 undone:
3521 	spin_lock(&inode->i_lock);
3522 	inode->i_private = NULL;
3523 	spin_unlock(&inode->i_lock);
3524 out:
3525 	if (!error)
3526 		file_modified(file);
3527 	inode_unlock(inode);
3528 	return error;
3529 }
3530 
shmem_statfs(struct dentry * dentry,struct kstatfs * buf)3531 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
3532 {
3533 	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
3534 
3535 	buf->f_type = TMPFS_MAGIC;
3536 	buf->f_bsize = PAGE_SIZE;
3537 	buf->f_namelen = NAME_MAX;
3538 	if (sbinfo->max_blocks) {
3539 		buf->f_blocks = sbinfo->max_blocks;
3540 		buf->f_bavail =
3541 		buf->f_bfree  = sbinfo->max_blocks -
3542 				percpu_counter_sum(&sbinfo->used_blocks);
3543 	}
3544 	if (sbinfo->max_inodes) {
3545 		buf->f_files = sbinfo->max_inodes;
3546 		buf->f_ffree = sbinfo->free_ispace / BOGO_INODE_SIZE;
3547 	}
3548 	/* else leave those fields 0 like simple_statfs */
3549 
3550 	buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
3551 
3552 	return 0;
3553 }
3554 
3555 /*
3556  * File creation. Allocate an inode, and we're done..
3557  */
3558 static int
shmem_mknod(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode,dev_t dev)3559 shmem_mknod(struct mnt_idmap *idmap, struct inode *dir,
3560 	    struct dentry *dentry, umode_t mode, dev_t dev)
3561 {
3562 	struct inode *inode;
3563 	int error;
3564 
3565 	inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, dev, VM_NORESERVE);
3566 	if (IS_ERR(inode))
3567 		return PTR_ERR(inode);
3568 
3569 	error = simple_acl_create(dir, inode);
3570 	if (error)
3571 		goto out_iput;
3572 	error = security_inode_init_security(inode, dir, &dentry->d_name,
3573 					     shmem_initxattrs, NULL);
3574 	if (error && error != -EOPNOTSUPP)
3575 		goto out_iput;
3576 
3577 	error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3578 	if (error)
3579 		goto out_iput;
3580 
3581 	dir->i_size += BOGO_DIRENT_SIZE;
3582 	inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
3583 	inode_inc_iversion(dir);
3584 	d_instantiate(dentry, inode);
3585 	dget(dentry); /* Extra count - pin the dentry in core */
3586 	return error;
3587 
3588 out_iput:
3589 	iput(inode);
3590 	return error;
3591 }
3592 
3593 static int
shmem_tmpfile(struct mnt_idmap * idmap,struct inode * dir,struct file * file,umode_t mode)3594 shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
3595 	      struct file *file, umode_t mode)
3596 {
3597 	struct inode *inode;
3598 	int error;
3599 
3600 	inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE);
3601 	if (IS_ERR(inode)) {
3602 		error = PTR_ERR(inode);
3603 		goto err_out;
3604 	}
3605 	error = security_inode_init_security(inode, dir, NULL,
3606 					     shmem_initxattrs, NULL);
3607 	if (error && error != -EOPNOTSUPP)
3608 		goto out_iput;
3609 	error = simple_acl_create(dir, inode);
3610 	if (error)
3611 		goto out_iput;
3612 	d_tmpfile(file, inode);
3613 
3614 err_out:
3615 	return finish_open_simple(file, error);
3616 out_iput:
3617 	iput(inode);
3618 	return error;
3619 }
3620 
shmem_mkdir(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode)3621 static int shmem_mkdir(struct mnt_idmap *idmap, struct inode *dir,
3622 		       struct dentry *dentry, umode_t mode)
3623 {
3624 	int error;
3625 
3626 	error = shmem_mknod(idmap, dir, dentry, mode | S_IFDIR, 0);
3627 	if (error)
3628 		return error;
3629 	inc_nlink(dir);
3630 	return 0;
3631 }
3632 
shmem_create(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode,bool excl)3633 static int shmem_create(struct mnt_idmap *idmap, struct inode *dir,
3634 			struct dentry *dentry, umode_t mode, bool excl)
3635 {
3636 	return shmem_mknod(idmap, dir, dentry, mode | S_IFREG, 0);
3637 }
3638 
3639 /*
3640  * Link a file..
3641  */
shmem_link(struct dentry * old_dentry,struct inode * dir,struct dentry * dentry)3642 static int shmem_link(struct dentry *old_dentry, struct inode *dir,
3643 		      struct dentry *dentry)
3644 {
3645 	struct inode *inode = d_inode(old_dentry);
3646 	int ret = 0;
3647 
3648 	/*
3649 	 * No ordinary (disk based) filesystem counts links as inodes;
3650 	 * but each new link needs a new dentry, pinning lowmem, and
3651 	 * tmpfs dentries cannot be pruned until they are unlinked.
3652 	 * But if an O_TMPFILE file is linked into the tmpfs, the
3653 	 * first link must skip that, to get the accounting right.
3654 	 */
3655 	if (inode->i_nlink) {
3656 		ret = shmem_reserve_inode(inode->i_sb, NULL);
3657 		if (ret)
3658 			goto out;
3659 	}
3660 
3661 	ret = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3662 	if (ret) {
3663 		if (inode->i_nlink)
3664 			shmem_free_inode(inode->i_sb, 0);
3665 		goto out;
3666 	}
3667 
3668 	dir->i_size += BOGO_DIRENT_SIZE;
3669 	inode_set_mtime_to_ts(dir,
3670 			      inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
3671 	inode_inc_iversion(dir);
3672 	inc_nlink(inode);
3673 	ihold(inode);	/* New dentry reference */
3674 	dget(dentry);	/* Extra pinning count for the created dentry */
3675 	d_instantiate(dentry, inode);
3676 out:
3677 	return ret;
3678 }
3679 
shmem_unlink(struct inode * dir,struct dentry * dentry)3680 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
3681 {
3682 	struct inode *inode = d_inode(dentry);
3683 
3684 	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
3685 		shmem_free_inode(inode->i_sb, 0);
3686 
3687 	simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
3688 
3689 	dir->i_size -= BOGO_DIRENT_SIZE;
3690 	inode_set_mtime_to_ts(dir,
3691 			      inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
3692 	inode_inc_iversion(dir);
3693 	drop_nlink(inode);
3694 	dput(dentry);	/* Undo the count from "create" - does all the work */
3695 	return 0;
3696 }
3697 
shmem_rmdir(struct inode * dir,struct dentry * dentry)3698 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
3699 {
3700 	if (!simple_offset_empty(dentry))
3701 		return -ENOTEMPTY;
3702 
3703 	drop_nlink(d_inode(dentry));
3704 	drop_nlink(dir);
3705 	return shmem_unlink(dir, dentry);
3706 }
3707 
shmem_whiteout(struct mnt_idmap * idmap,struct inode * old_dir,struct dentry * old_dentry)3708 static int shmem_whiteout(struct mnt_idmap *idmap,
3709 			  struct inode *old_dir, struct dentry *old_dentry)
3710 {
3711 	struct dentry *whiteout;
3712 	int error;
3713 
3714 	whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
3715 	if (!whiteout)
3716 		return -ENOMEM;
3717 
3718 	error = shmem_mknod(idmap, old_dir, whiteout,
3719 			    S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
3720 	dput(whiteout);
3721 	if (error)
3722 		return error;
3723 
3724 	/*
3725 	 * Cheat and hash the whiteout while the old dentry is still in
3726 	 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
3727 	 *
3728 	 * d_lookup() will consistently find one of them at this point,
3729 	 * not sure which one, but that isn't even important.
3730 	 */
3731 	d_rehash(whiteout);
3732 	return 0;
3733 }
3734 
3735 /*
3736  * The VFS layer already does all the dentry stuff for rename,
3737  * we just have to decrement the usage count for the target if
3738  * it exists so that the VFS layer correctly free's it when it
3739  * gets overwritten.
3740  */
shmem_rename2(struct mnt_idmap * idmap,struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry,unsigned int flags)3741 static int shmem_rename2(struct mnt_idmap *idmap,
3742 			 struct inode *old_dir, struct dentry *old_dentry,
3743 			 struct inode *new_dir, struct dentry *new_dentry,
3744 			 unsigned int flags)
3745 {
3746 	struct inode *inode = d_inode(old_dentry);
3747 	int they_are_dirs = S_ISDIR(inode->i_mode);
3748 	int error;
3749 
3750 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3751 		return -EINVAL;
3752 
3753 	if (flags & RENAME_EXCHANGE)
3754 		return simple_offset_rename_exchange(old_dir, old_dentry,
3755 						     new_dir, new_dentry);
3756 
3757 	if (!simple_offset_empty(new_dentry))
3758 		return -ENOTEMPTY;
3759 
3760 	if (flags & RENAME_WHITEOUT) {
3761 		error = shmem_whiteout(idmap, old_dir, old_dentry);
3762 		if (error)
3763 			return error;
3764 	}
3765 
3766 	error = simple_offset_rename(old_dir, old_dentry, new_dir, new_dentry);
3767 	if (error)
3768 		return error;
3769 
3770 	if (d_really_is_positive(new_dentry)) {
3771 		(void) shmem_unlink(new_dir, new_dentry);
3772 		if (they_are_dirs) {
3773 			drop_nlink(d_inode(new_dentry));
3774 			drop_nlink(old_dir);
3775 		}
3776 	} else if (they_are_dirs) {
3777 		drop_nlink(old_dir);
3778 		inc_nlink(new_dir);
3779 	}
3780 
3781 	old_dir->i_size -= BOGO_DIRENT_SIZE;
3782 	new_dir->i_size += BOGO_DIRENT_SIZE;
3783 	simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
3784 	inode_inc_iversion(old_dir);
3785 	inode_inc_iversion(new_dir);
3786 	return 0;
3787 }
3788 
shmem_symlink(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,const char * symname)3789 static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir,
3790 			 struct dentry *dentry, const char *symname)
3791 {
3792 	int error;
3793 	int len;
3794 	struct inode *inode;
3795 	struct folio *folio;
3796 
3797 	len = strlen(symname) + 1;
3798 	if (len > PAGE_SIZE)
3799 		return -ENAMETOOLONG;
3800 
3801 	inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0,
3802 				VM_NORESERVE);
3803 	if (IS_ERR(inode))
3804 		return PTR_ERR(inode);
3805 
3806 	error = security_inode_init_security(inode, dir, &dentry->d_name,
3807 					     shmem_initxattrs, NULL);
3808 	if (error && error != -EOPNOTSUPP)
3809 		goto out_iput;
3810 
3811 	error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3812 	if (error)
3813 		goto out_iput;
3814 
3815 	inode->i_size = len-1;
3816 	if (len <= SHORT_SYMLINK_LEN) {
3817 		inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3818 		if (!inode->i_link) {
3819 			error = -ENOMEM;
3820 			goto out_remove_offset;
3821 		}
3822 		inode->i_op = &shmem_short_symlink_operations;
3823 	} else {
3824 		inode_nohighmem(inode);
3825 		inode->i_mapping->a_ops = &shmem_aops;
3826 		error = shmem_get_folio(inode, 0, 0, &folio, SGP_WRITE);
3827 		if (error)
3828 			goto out_remove_offset;
3829 		inode->i_op = &shmem_symlink_inode_operations;
3830 		memcpy(folio_address(folio), symname, len);
3831 		folio_mark_uptodate(folio);
3832 		folio_mark_dirty(folio);
3833 		folio_unlock(folio);
3834 		folio_put(folio);
3835 	}
3836 	dir->i_size += BOGO_DIRENT_SIZE;
3837 	inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
3838 	inode_inc_iversion(dir);
3839 	d_instantiate(dentry, inode);
3840 	dget(dentry);
3841 	return 0;
3842 
3843 out_remove_offset:
3844 	simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
3845 out_iput:
3846 	iput(inode);
3847 	return error;
3848 }
3849 
shmem_put_link(void * arg)3850 static void shmem_put_link(void *arg)
3851 {
3852 	folio_mark_accessed(arg);
3853 	folio_put(arg);
3854 }
3855 
shmem_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)3856 static const char *shmem_get_link(struct dentry *dentry, struct inode *inode,
3857 				  struct delayed_call *done)
3858 {
3859 	struct folio *folio = NULL;
3860 	int error;
3861 
3862 	if (!dentry) {
3863 		folio = filemap_get_folio(inode->i_mapping, 0);
3864 		if (IS_ERR(folio))
3865 			return ERR_PTR(-ECHILD);
3866 		if (PageHWPoison(folio_page(folio, 0)) ||
3867 		    !folio_test_uptodate(folio)) {
3868 			folio_put(folio);
3869 			return ERR_PTR(-ECHILD);
3870 		}
3871 	} else {
3872 		error = shmem_get_folio(inode, 0, 0, &folio, SGP_READ);
3873 		if (error)
3874 			return ERR_PTR(error);
3875 		if (!folio)
3876 			return ERR_PTR(-ECHILD);
3877 		if (PageHWPoison(folio_page(folio, 0))) {
3878 			folio_unlock(folio);
3879 			folio_put(folio);
3880 			return ERR_PTR(-ECHILD);
3881 		}
3882 		folio_unlock(folio);
3883 	}
3884 	set_delayed_call(done, shmem_put_link, folio);
3885 	return folio_address(folio);
3886 }
3887 
3888 #ifdef CONFIG_TMPFS_XATTR
3889 
shmem_fileattr_get(struct dentry * dentry,struct fileattr * fa)3890 static int shmem_fileattr_get(struct dentry *dentry, struct fileattr *fa)
3891 {
3892 	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3893 
3894 	fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE);
3895 
3896 	return 0;
3897 }
3898 
shmem_fileattr_set(struct mnt_idmap * idmap,struct dentry * dentry,struct fileattr * fa)3899 static int shmem_fileattr_set(struct mnt_idmap *idmap,
3900 			      struct dentry *dentry, struct fileattr *fa)
3901 {
3902 	struct inode *inode = d_inode(dentry);
3903 	struct shmem_inode_info *info = SHMEM_I(inode);
3904 
3905 	if (fileattr_has_fsx(fa))
3906 		return -EOPNOTSUPP;
3907 	if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE)
3908 		return -EOPNOTSUPP;
3909 
3910 	info->fsflags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) |
3911 		(fa->flags & SHMEM_FL_USER_MODIFIABLE);
3912 
3913 	shmem_set_inode_flags(inode, info->fsflags);
3914 	inode_set_ctime_current(inode);
3915 	inode_inc_iversion(inode);
3916 	return 0;
3917 }
3918 
3919 /*
3920  * Superblocks without xattr inode operations may get some security.* xattr
3921  * support from the LSM "for free". As soon as we have any other xattrs
3922  * like ACLs, we also need to implement the security.* handlers at
3923  * filesystem level, though.
3924  */
3925 
3926 /*
3927  * Callback for security_inode_init_security() for acquiring xattrs.
3928  */
shmem_initxattrs(struct inode * inode,const struct xattr * xattr_array,void * fs_info)3929 static int shmem_initxattrs(struct inode *inode,
3930 			    const struct xattr *xattr_array, void *fs_info)
3931 {
3932 	struct shmem_inode_info *info = SHMEM_I(inode);
3933 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3934 	const struct xattr *xattr;
3935 	struct simple_xattr *new_xattr;
3936 	size_t ispace = 0;
3937 	size_t len;
3938 
3939 	if (sbinfo->max_inodes) {
3940 		for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3941 			ispace += simple_xattr_space(xattr->name,
3942 				xattr->value_len + XATTR_SECURITY_PREFIX_LEN);
3943 		}
3944 		if (ispace) {
3945 			raw_spin_lock(&sbinfo->stat_lock);
3946 			if (sbinfo->free_ispace < ispace)
3947 				ispace = 0;
3948 			else
3949 				sbinfo->free_ispace -= ispace;
3950 			raw_spin_unlock(&sbinfo->stat_lock);
3951 			if (!ispace)
3952 				return -ENOSPC;
3953 		}
3954 	}
3955 
3956 	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3957 		new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
3958 		if (!new_xattr)
3959 			break;
3960 
3961 		len = strlen(xattr->name) + 1;
3962 		new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3963 					  GFP_KERNEL_ACCOUNT);
3964 		if (!new_xattr->name) {
3965 			kvfree(new_xattr);
3966 			break;
3967 		}
3968 
3969 		memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
3970 		       XATTR_SECURITY_PREFIX_LEN);
3971 		memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
3972 		       xattr->name, len);
3973 
3974 		simple_xattr_add(&info->xattrs, new_xattr);
3975 	}
3976 
3977 	if (xattr->name != NULL) {
3978 		if (ispace) {
3979 			raw_spin_lock(&sbinfo->stat_lock);
3980 			sbinfo->free_ispace += ispace;
3981 			raw_spin_unlock(&sbinfo->stat_lock);
3982 		}
3983 		simple_xattrs_free(&info->xattrs, NULL);
3984 		return -ENOMEM;
3985 	}
3986 
3987 	return 0;
3988 }
3989 
shmem_xattr_handler_get(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * name,void * buffer,size_t size)3990 static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3991 				   struct dentry *unused, struct inode *inode,
3992 				   const char *name, void *buffer, size_t size)
3993 {
3994 	struct shmem_inode_info *info = SHMEM_I(inode);
3995 
3996 	name = xattr_full_name(handler, name);
3997 	return simple_xattr_get(&info->xattrs, name, buffer, size);
3998 }
3999 
shmem_xattr_handler_set(const struct xattr_handler * handler,struct mnt_idmap * idmap,struct dentry * unused,struct inode * inode,const char * name,const void * value,size_t size,int flags)4000 static int shmem_xattr_handler_set(const struct xattr_handler *handler,
4001 				   struct mnt_idmap *idmap,
4002 				   struct dentry *unused, struct inode *inode,
4003 				   const char *name, const void *value,
4004 				   size_t size, int flags)
4005 {
4006 	struct shmem_inode_info *info = SHMEM_I(inode);
4007 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
4008 	struct simple_xattr *old_xattr;
4009 	size_t ispace = 0;
4010 
4011 	name = xattr_full_name(handler, name);
4012 	if (value && sbinfo->max_inodes) {
4013 		ispace = simple_xattr_space(name, size);
4014 		raw_spin_lock(&sbinfo->stat_lock);
4015 		if (sbinfo->free_ispace < ispace)
4016 			ispace = 0;
4017 		else
4018 			sbinfo->free_ispace -= ispace;
4019 		raw_spin_unlock(&sbinfo->stat_lock);
4020 		if (!ispace)
4021 			return -ENOSPC;
4022 	}
4023 
4024 	old_xattr = simple_xattr_set(&info->xattrs, name, value, size, flags);
4025 	if (!IS_ERR(old_xattr)) {
4026 		ispace = 0;
4027 		if (old_xattr && sbinfo->max_inodes)
4028 			ispace = simple_xattr_space(old_xattr->name,
4029 						    old_xattr->size);
4030 		simple_xattr_free(old_xattr);
4031 		old_xattr = NULL;
4032 		inode_set_ctime_current(inode);
4033 		inode_inc_iversion(inode);
4034 	}
4035 	if (ispace) {
4036 		raw_spin_lock(&sbinfo->stat_lock);
4037 		sbinfo->free_ispace += ispace;
4038 		raw_spin_unlock(&sbinfo->stat_lock);
4039 	}
4040 	return PTR_ERR(old_xattr);
4041 }
4042 
4043 static const struct xattr_handler shmem_security_xattr_handler = {
4044 	.prefix = XATTR_SECURITY_PREFIX,
4045 	.get = shmem_xattr_handler_get,
4046 	.set = shmem_xattr_handler_set,
4047 };
4048 
4049 static const struct xattr_handler shmem_trusted_xattr_handler = {
4050 	.prefix = XATTR_TRUSTED_PREFIX,
4051 	.get = shmem_xattr_handler_get,
4052 	.set = shmem_xattr_handler_set,
4053 };
4054 
4055 static const struct xattr_handler shmem_user_xattr_handler = {
4056 	.prefix = XATTR_USER_PREFIX,
4057 	.get = shmem_xattr_handler_get,
4058 	.set = shmem_xattr_handler_set,
4059 };
4060 
4061 static const struct xattr_handler * const shmem_xattr_handlers[] = {
4062 	&shmem_security_xattr_handler,
4063 	&shmem_trusted_xattr_handler,
4064 	&shmem_user_xattr_handler,
4065 	NULL
4066 };
4067 
shmem_listxattr(struct dentry * dentry,char * buffer,size_t size)4068 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
4069 {
4070 	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
4071 	return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
4072 }
4073 #endif /* CONFIG_TMPFS_XATTR */
4074 
4075 static const struct inode_operations shmem_short_symlink_operations = {
4076 	.getattr	= shmem_getattr,
4077 	.setattr	= shmem_setattr,
4078 	.get_link	= simple_get_link,
4079 #ifdef CONFIG_TMPFS_XATTR
4080 	.listxattr	= shmem_listxattr,
4081 #endif
4082 };
4083 
4084 static const struct inode_operations shmem_symlink_inode_operations = {
4085 	.getattr	= shmem_getattr,
4086 	.setattr	= shmem_setattr,
4087 	.get_link	= shmem_get_link,
4088 #ifdef CONFIG_TMPFS_XATTR
4089 	.listxattr	= shmem_listxattr,
4090 #endif
4091 };
4092 
shmem_get_parent(struct dentry * child)4093 static struct dentry *shmem_get_parent(struct dentry *child)
4094 {
4095 	return ERR_PTR(-ESTALE);
4096 }
4097 
shmem_match(struct inode * ino,void * vfh)4098 static int shmem_match(struct inode *ino, void *vfh)
4099 {
4100 	__u32 *fh = vfh;
4101 	__u64 inum = fh[2];
4102 	inum = (inum << 32) | fh[1];
4103 	return ino->i_ino == inum && fh[0] == ino->i_generation;
4104 }
4105 
4106 /* Find any alias of inode, but prefer a hashed alias */
shmem_find_alias(struct inode * inode)4107 static struct dentry *shmem_find_alias(struct inode *inode)
4108 {
4109 	struct dentry *alias = d_find_alias(inode);
4110 
4111 	return alias ?: d_find_any_alias(inode);
4112 }
4113 
shmem_fh_to_dentry(struct super_block * sb,struct fid * fid,int fh_len,int fh_type)4114 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
4115 		struct fid *fid, int fh_len, int fh_type)
4116 {
4117 	struct inode *inode;
4118 	struct dentry *dentry = NULL;
4119 	u64 inum;
4120 
4121 	if (fh_len < 3)
4122 		return NULL;
4123 
4124 	inum = fid->raw[2];
4125 	inum = (inum << 32) | fid->raw[1];
4126 
4127 	inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
4128 			shmem_match, fid->raw);
4129 	if (inode) {
4130 		dentry = shmem_find_alias(inode);
4131 		iput(inode);
4132 	}
4133 
4134 	return dentry;
4135 }
4136 
shmem_encode_fh(struct inode * inode,__u32 * fh,int * len,struct inode * parent)4137 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
4138 				struct inode *parent)
4139 {
4140 	if (*len < 3) {
4141 		*len = 3;
4142 		return FILEID_INVALID;
4143 	}
4144 
4145 	if (inode_unhashed(inode)) {
4146 		/* Unfortunately insert_inode_hash is not idempotent,
4147 		 * so as we hash inodes here rather than at creation
4148 		 * time, we need a lock to ensure we only try
4149 		 * to do it once
4150 		 */
4151 		static DEFINE_SPINLOCK(lock);
4152 		spin_lock(&lock);
4153 		if (inode_unhashed(inode))
4154 			__insert_inode_hash(inode,
4155 					    inode->i_ino + inode->i_generation);
4156 		spin_unlock(&lock);
4157 	}
4158 
4159 	fh[0] = inode->i_generation;
4160 	fh[1] = inode->i_ino;
4161 	fh[2] = ((__u64)inode->i_ino) >> 32;
4162 
4163 	*len = 3;
4164 	return 1;
4165 }
4166 
4167 static const struct export_operations shmem_export_ops = {
4168 	.get_parent     = shmem_get_parent,
4169 	.encode_fh      = shmem_encode_fh,
4170 	.fh_to_dentry	= shmem_fh_to_dentry,
4171 };
4172 
4173 enum shmem_param {
4174 	Opt_gid,
4175 	Opt_huge,
4176 	Opt_mode,
4177 	Opt_mpol,
4178 	Opt_nr_blocks,
4179 	Opt_nr_inodes,
4180 	Opt_size,
4181 	Opt_uid,
4182 	Opt_inode32,
4183 	Opt_inode64,
4184 	Opt_noswap,
4185 	Opt_quota,
4186 	Opt_usrquota,
4187 	Opt_grpquota,
4188 	Opt_usrquota_block_hardlimit,
4189 	Opt_usrquota_inode_hardlimit,
4190 	Opt_grpquota_block_hardlimit,
4191 	Opt_grpquota_inode_hardlimit,
4192 };
4193 
4194 static const struct constant_table shmem_param_enums_huge[] = {
4195 	{"never",	SHMEM_HUGE_NEVER },
4196 	{"always",	SHMEM_HUGE_ALWAYS },
4197 	{"within_size",	SHMEM_HUGE_WITHIN_SIZE },
4198 	{"advise",	SHMEM_HUGE_ADVISE },
4199 	{}
4200 };
4201 
4202 const struct fs_parameter_spec shmem_fs_parameters[] = {
4203 	fsparam_gid   ("gid",		Opt_gid),
4204 	fsparam_enum  ("huge",		Opt_huge,  shmem_param_enums_huge),
4205 	fsparam_u32oct("mode",		Opt_mode),
4206 	fsparam_string("mpol",		Opt_mpol),
4207 	fsparam_string("nr_blocks",	Opt_nr_blocks),
4208 	fsparam_string("nr_inodes",	Opt_nr_inodes),
4209 	fsparam_string("size",		Opt_size),
4210 	fsparam_uid   ("uid",		Opt_uid),
4211 	fsparam_flag  ("inode32",	Opt_inode32),
4212 	fsparam_flag  ("inode64",	Opt_inode64),
4213 	fsparam_flag  ("noswap",	Opt_noswap),
4214 #ifdef CONFIG_TMPFS_QUOTA
4215 	fsparam_flag  ("quota",		Opt_quota),
4216 	fsparam_flag  ("usrquota",	Opt_usrquota),
4217 	fsparam_flag  ("grpquota",	Opt_grpquota),
4218 	fsparam_string("usrquota_block_hardlimit", Opt_usrquota_block_hardlimit),
4219 	fsparam_string("usrquota_inode_hardlimit", Opt_usrquota_inode_hardlimit),
4220 	fsparam_string("grpquota_block_hardlimit", Opt_grpquota_block_hardlimit),
4221 	fsparam_string("grpquota_inode_hardlimit", Opt_grpquota_inode_hardlimit),
4222 #endif
4223 	{}
4224 };
4225 
shmem_parse_one(struct fs_context * fc,struct fs_parameter * param)4226 static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
4227 {
4228 	struct shmem_options *ctx = fc->fs_private;
4229 	struct fs_parse_result result;
4230 	unsigned long long size;
4231 	char *rest;
4232 	int opt;
4233 	kuid_t kuid;
4234 	kgid_t kgid;
4235 
4236 	opt = fs_parse(fc, shmem_fs_parameters, param, &result);
4237 	if (opt < 0)
4238 		return opt;
4239 
4240 	switch (opt) {
4241 	case Opt_size:
4242 		size = memparse(param->string, &rest);
4243 		if (*rest == '%') {
4244 			size <<= PAGE_SHIFT;
4245 			size *= totalram_pages();
4246 			do_div(size, 100);
4247 			rest++;
4248 		}
4249 		if (*rest)
4250 			goto bad_value;
4251 		ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
4252 		ctx->seen |= SHMEM_SEEN_BLOCKS;
4253 		break;
4254 	case Opt_nr_blocks:
4255 		ctx->blocks = memparse(param->string, &rest);
4256 		if (*rest || ctx->blocks > LONG_MAX)
4257 			goto bad_value;
4258 		ctx->seen |= SHMEM_SEEN_BLOCKS;
4259 		break;
4260 	case Opt_nr_inodes:
4261 		ctx->inodes = memparse(param->string, &rest);
4262 		if (*rest || ctx->inodes > ULONG_MAX / BOGO_INODE_SIZE)
4263 			goto bad_value;
4264 		ctx->seen |= SHMEM_SEEN_INODES;
4265 		break;
4266 	case Opt_mode:
4267 		ctx->mode = result.uint_32 & 07777;
4268 		break;
4269 	case Opt_uid:
4270 		kuid = result.uid;
4271 
4272 		/*
4273 		 * The requested uid must be representable in the
4274 		 * filesystem's idmapping.
4275 		 */
4276 		if (!kuid_has_mapping(fc->user_ns, kuid))
4277 			goto bad_value;
4278 
4279 		ctx->uid = kuid;
4280 		break;
4281 	case Opt_gid:
4282 		kgid = result.gid;
4283 
4284 		/*
4285 		 * The requested gid must be representable in the
4286 		 * filesystem's idmapping.
4287 		 */
4288 		if (!kgid_has_mapping(fc->user_ns, kgid))
4289 			goto bad_value;
4290 
4291 		ctx->gid = kgid;
4292 		break;
4293 	case Opt_huge:
4294 		ctx->huge = result.uint_32;
4295 		if (ctx->huge != SHMEM_HUGE_NEVER &&
4296 		    !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
4297 		      has_transparent_hugepage()))
4298 			goto unsupported_parameter;
4299 		ctx->seen |= SHMEM_SEEN_HUGE;
4300 		break;
4301 	case Opt_mpol:
4302 		if (IS_ENABLED(CONFIG_NUMA)) {
4303 			mpol_put(ctx->mpol);
4304 			ctx->mpol = NULL;
4305 			if (mpol_parse_str(param->string, &ctx->mpol))
4306 				goto bad_value;
4307 			break;
4308 		}
4309 		goto unsupported_parameter;
4310 	case Opt_inode32:
4311 		ctx->full_inums = false;
4312 		ctx->seen |= SHMEM_SEEN_INUMS;
4313 		break;
4314 	case Opt_inode64:
4315 		if (sizeof(ino_t) < 8) {
4316 			return invalfc(fc,
4317 				       "Cannot use inode64 with <64bit inums in kernel\n");
4318 		}
4319 		ctx->full_inums = true;
4320 		ctx->seen |= SHMEM_SEEN_INUMS;
4321 		break;
4322 	case Opt_noswap:
4323 		if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) {
4324 			return invalfc(fc,
4325 				       "Turning off swap in unprivileged tmpfs mounts unsupported");
4326 		}
4327 		ctx->noswap = true;
4328 		ctx->seen |= SHMEM_SEEN_NOSWAP;
4329 		break;
4330 	case Opt_quota:
4331 		if (fc->user_ns != &init_user_ns)
4332 			return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4333 		ctx->seen |= SHMEM_SEEN_QUOTA;
4334 		ctx->quota_types |= (QTYPE_MASK_USR | QTYPE_MASK_GRP);
4335 		break;
4336 	case Opt_usrquota:
4337 		if (fc->user_ns != &init_user_ns)
4338 			return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4339 		ctx->seen |= SHMEM_SEEN_QUOTA;
4340 		ctx->quota_types |= QTYPE_MASK_USR;
4341 		break;
4342 	case Opt_grpquota:
4343 		if (fc->user_ns != &init_user_ns)
4344 			return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4345 		ctx->seen |= SHMEM_SEEN_QUOTA;
4346 		ctx->quota_types |= QTYPE_MASK_GRP;
4347 		break;
4348 	case Opt_usrquota_block_hardlimit:
4349 		size = memparse(param->string, &rest);
4350 		if (*rest || !size)
4351 			goto bad_value;
4352 		if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
4353 			return invalfc(fc,
4354 				       "User quota block hardlimit too large.");
4355 		ctx->qlimits.usrquota_bhardlimit = size;
4356 		break;
4357 	case Opt_grpquota_block_hardlimit:
4358 		size = memparse(param->string, &rest);
4359 		if (*rest || !size)
4360 			goto bad_value;
4361 		if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
4362 			return invalfc(fc,
4363 				       "Group quota block hardlimit too large.");
4364 		ctx->qlimits.grpquota_bhardlimit = size;
4365 		break;
4366 	case Opt_usrquota_inode_hardlimit:
4367 		size = memparse(param->string, &rest);
4368 		if (*rest || !size)
4369 			goto bad_value;
4370 		if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
4371 			return invalfc(fc,
4372 				       "User quota inode hardlimit too large.");
4373 		ctx->qlimits.usrquota_ihardlimit = size;
4374 		break;
4375 	case Opt_grpquota_inode_hardlimit:
4376 		size = memparse(param->string, &rest);
4377 		if (*rest || !size)
4378 			goto bad_value;
4379 		if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
4380 			return invalfc(fc,
4381 				       "Group quota inode hardlimit too large.");
4382 		ctx->qlimits.grpquota_ihardlimit = size;
4383 		break;
4384 	}
4385 	return 0;
4386 
4387 unsupported_parameter:
4388 	return invalfc(fc, "Unsupported parameter '%s'", param->key);
4389 bad_value:
4390 	return invalfc(fc, "Bad value for '%s'", param->key);
4391 }
4392 
shmem_parse_options(struct fs_context * fc,void * data)4393 static int shmem_parse_options(struct fs_context *fc, void *data)
4394 {
4395 	char *options = data;
4396 
4397 	if (options) {
4398 		int err = security_sb_eat_lsm_opts(options, &fc->security);
4399 		if (err)
4400 			return err;
4401 	}
4402 
4403 	while (options != NULL) {
4404 		char *this_char = options;
4405 		for (;;) {
4406 			/*
4407 			 * NUL-terminate this option: unfortunately,
4408 			 * mount options form a comma-separated list,
4409 			 * but mpol's nodelist may also contain commas.
4410 			 */
4411 			options = strchr(options, ',');
4412 			if (options == NULL)
4413 				break;
4414 			options++;
4415 			if (!isdigit(*options)) {
4416 				options[-1] = '\0';
4417 				break;
4418 			}
4419 		}
4420 		if (*this_char) {
4421 			char *value = strchr(this_char, '=');
4422 			size_t len = 0;
4423 			int err;
4424 
4425 			if (value) {
4426 				*value++ = '\0';
4427 				len = strlen(value);
4428 			}
4429 			err = vfs_parse_fs_string(fc, this_char, value, len);
4430 			if (err < 0)
4431 				return err;
4432 		}
4433 	}
4434 	return 0;
4435 }
4436 
4437 /*
4438  * Reconfigure a shmem filesystem.
4439  */
shmem_reconfigure(struct fs_context * fc)4440 static int shmem_reconfigure(struct fs_context *fc)
4441 {
4442 	struct shmem_options *ctx = fc->fs_private;
4443 	struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
4444 	unsigned long used_isp;
4445 	struct mempolicy *mpol = NULL;
4446 	const char *err;
4447 
4448 	raw_spin_lock(&sbinfo->stat_lock);
4449 	used_isp = sbinfo->max_inodes * BOGO_INODE_SIZE - sbinfo->free_ispace;
4450 
4451 	if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
4452 		if (!sbinfo->max_blocks) {
4453 			err = "Cannot retroactively limit size";
4454 			goto out;
4455 		}
4456 		if (percpu_counter_compare(&sbinfo->used_blocks,
4457 					   ctx->blocks) > 0) {
4458 			err = "Too small a size for current use";
4459 			goto out;
4460 		}
4461 	}
4462 	if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
4463 		if (!sbinfo->max_inodes) {
4464 			err = "Cannot retroactively limit inodes";
4465 			goto out;
4466 		}
4467 		if (ctx->inodes * BOGO_INODE_SIZE < used_isp) {
4468 			err = "Too few inodes for current use";
4469 			goto out;
4470 		}
4471 	}
4472 
4473 	if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
4474 	    sbinfo->next_ino > UINT_MAX) {
4475 		err = "Current inum too high to switch to 32-bit inums";
4476 		goto out;
4477 	}
4478 	if ((ctx->seen & SHMEM_SEEN_NOSWAP) && ctx->noswap && !sbinfo->noswap) {
4479 		err = "Cannot disable swap on remount";
4480 		goto out;
4481 	}
4482 	if (!(ctx->seen & SHMEM_SEEN_NOSWAP) && !ctx->noswap && sbinfo->noswap) {
4483 		err = "Cannot enable swap on remount if it was disabled on first mount";
4484 		goto out;
4485 	}
4486 
4487 	if (ctx->seen & SHMEM_SEEN_QUOTA &&
4488 	    !sb_any_quota_loaded(fc->root->d_sb)) {
4489 		err = "Cannot enable quota on remount";
4490 		goto out;
4491 	}
4492 
4493 #ifdef CONFIG_TMPFS_QUOTA
4494 #define CHANGED_LIMIT(name)						\
4495 	(ctx->qlimits.name## hardlimit &&				\
4496 	(ctx->qlimits.name## hardlimit != sbinfo->qlimits.name## hardlimit))
4497 
4498 	if (CHANGED_LIMIT(usrquota_b) || CHANGED_LIMIT(usrquota_i) ||
4499 	    CHANGED_LIMIT(grpquota_b) || CHANGED_LIMIT(grpquota_i)) {
4500 		err = "Cannot change global quota limit on remount";
4501 		goto out;
4502 	}
4503 #endif /* CONFIG_TMPFS_QUOTA */
4504 
4505 	if (ctx->seen & SHMEM_SEEN_HUGE)
4506 		sbinfo->huge = ctx->huge;
4507 	if (ctx->seen & SHMEM_SEEN_INUMS)
4508 		sbinfo->full_inums = ctx->full_inums;
4509 	if (ctx->seen & SHMEM_SEEN_BLOCKS)
4510 		sbinfo->max_blocks  = ctx->blocks;
4511 	if (ctx->seen & SHMEM_SEEN_INODES) {
4512 		sbinfo->max_inodes  = ctx->inodes;
4513 		sbinfo->free_ispace = ctx->inodes * BOGO_INODE_SIZE - used_isp;
4514 	}
4515 
4516 	/*
4517 	 * Preserve previous mempolicy unless mpol remount option was specified.
4518 	 */
4519 	if (ctx->mpol) {
4520 		mpol = sbinfo->mpol;
4521 		sbinfo->mpol = ctx->mpol;	/* transfers initial ref */
4522 		ctx->mpol = NULL;
4523 	}
4524 
4525 	if (ctx->noswap)
4526 		sbinfo->noswap = true;
4527 
4528 	raw_spin_unlock(&sbinfo->stat_lock);
4529 	mpol_put(mpol);
4530 	return 0;
4531 out:
4532 	raw_spin_unlock(&sbinfo->stat_lock);
4533 	return invalfc(fc, "%s", err);
4534 }
4535 
shmem_show_options(struct seq_file * seq,struct dentry * root)4536 static int shmem_show_options(struct seq_file *seq, struct dentry *root)
4537 {
4538 	struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
4539 	struct mempolicy *mpol;
4540 
4541 	if (sbinfo->max_blocks != shmem_default_max_blocks())
4542 		seq_printf(seq, ",size=%luk", K(sbinfo->max_blocks));
4543 	if (sbinfo->max_inodes != shmem_default_max_inodes())
4544 		seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
4545 	if (sbinfo->mode != (0777 | S_ISVTX))
4546 		seq_printf(seq, ",mode=%03ho", sbinfo->mode);
4547 	if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
4548 		seq_printf(seq, ",uid=%u",
4549 				from_kuid_munged(&init_user_ns, sbinfo->uid));
4550 	if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
4551 		seq_printf(seq, ",gid=%u",
4552 				from_kgid_munged(&init_user_ns, sbinfo->gid));
4553 
4554 	/*
4555 	 * Showing inode{64,32} might be useful even if it's the system default,
4556 	 * since then people don't have to resort to checking both here and
4557 	 * /proc/config.gz to confirm 64-bit inums were successfully applied
4558 	 * (which may not even exist if IKCONFIG_PROC isn't enabled).
4559 	 *
4560 	 * We hide it when inode64 isn't the default and we are using 32-bit
4561 	 * inodes, since that probably just means the feature isn't even under
4562 	 * consideration.
4563 	 *
4564 	 * As such:
4565 	 *
4566 	 *                     +-----------------+-----------------+
4567 	 *                     | TMPFS_INODE64=y | TMPFS_INODE64=n |
4568 	 *  +------------------+-----------------+-----------------+
4569 	 *  | full_inums=true  | show            | show            |
4570 	 *  | full_inums=false | show            | hide            |
4571 	 *  +------------------+-----------------+-----------------+
4572 	 *
4573 	 */
4574 	if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
4575 		seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
4576 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4577 	/* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
4578 	if (sbinfo->huge)
4579 		seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
4580 #endif
4581 	mpol = shmem_get_sbmpol(sbinfo);
4582 	shmem_show_mpol(seq, mpol);
4583 	mpol_put(mpol);
4584 	if (sbinfo->noswap)
4585 		seq_printf(seq, ",noswap");
4586 #ifdef CONFIG_TMPFS_QUOTA
4587 	if (sb_has_quota_active(root->d_sb, USRQUOTA))
4588 		seq_printf(seq, ",usrquota");
4589 	if (sb_has_quota_active(root->d_sb, GRPQUOTA))
4590 		seq_printf(seq, ",grpquota");
4591 	if (sbinfo->qlimits.usrquota_bhardlimit)
4592 		seq_printf(seq, ",usrquota_block_hardlimit=%lld",
4593 			   sbinfo->qlimits.usrquota_bhardlimit);
4594 	if (sbinfo->qlimits.grpquota_bhardlimit)
4595 		seq_printf(seq, ",grpquota_block_hardlimit=%lld",
4596 			   sbinfo->qlimits.grpquota_bhardlimit);
4597 	if (sbinfo->qlimits.usrquota_ihardlimit)
4598 		seq_printf(seq, ",usrquota_inode_hardlimit=%lld",
4599 			   sbinfo->qlimits.usrquota_ihardlimit);
4600 	if (sbinfo->qlimits.grpquota_ihardlimit)
4601 		seq_printf(seq, ",grpquota_inode_hardlimit=%lld",
4602 			   sbinfo->qlimits.grpquota_ihardlimit);
4603 #endif
4604 	return 0;
4605 }
4606 
4607 #endif /* CONFIG_TMPFS */
4608 
shmem_put_super(struct super_block * sb)4609 static void shmem_put_super(struct super_block *sb)
4610 {
4611 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
4612 
4613 #ifdef CONFIG_TMPFS_QUOTA
4614 	shmem_disable_quotas(sb);
4615 #endif
4616 	free_percpu(sbinfo->ino_batch);
4617 	percpu_counter_destroy(&sbinfo->used_blocks);
4618 	mpol_put(sbinfo->mpol);
4619 	kfree(sbinfo);
4620 	sb->s_fs_info = NULL;
4621 }
4622 
shmem_fill_super(struct super_block * sb,struct fs_context * fc)4623 static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
4624 {
4625 	struct shmem_options *ctx = fc->fs_private;
4626 	struct inode *inode;
4627 	struct shmem_sb_info *sbinfo;
4628 	int error = -ENOMEM;
4629 
4630 	/* Round up to L1_CACHE_BYTES to resist false sharing */
4631 	sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
4632 				L1_CACHE_BYTES), GFP_KERNEL);
4633 	if (!sbinfo)
4634 		return error;
4635 
4636 	sb->s_fs_info = sbinfo;
4637 
4638 #ifdef CONFIG_TMPFS
4639 	/*
4640 	 * Per default we only allow half of the physical ram per
4641 	 * tmpfs instance, limiting inodes to one per page of lowmem;
4642 	 * but the internal instance is left unlimited.
4643 	 */
4644 	if (!(sb->s_flags & SB_KERNMOUNT)) {
4645 		if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
4646 			ctx->blocks = shmem_default_max_blocks();
4647 		if (!(ctx->seen & SHMEM_SEEN_INODES))
4648 			ctx->inodes = shmem_default_max_inodes();
4649 		if (!(ctx->seen & SHMEM_SEEN_INUMS))
4650 			ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
4651 		sbinfo->noswap = ctx->noswap;
4652 	} else {
4653 		sb->s_flags |= SB_NOUSER;
4654 	}
4655 	sb->s_export_op = &shmem_export_ops;
4656 	sb->s_flags |= SB_NOSEC | SB_I_VERSION;
4657 #else
4658 	sb->s_flags |= SB_NOUSER;
4659 #endif
4660 	sbinfo->max_blocks = ctx->blocks;
4661 	sbinfo->max_inodes = ctx->inodes;
4662 	sbinfo->free_ispace = sbinfo->max_inodes * BOGO_INODE_SIZE;
4663 	if (sb->s_flags & SB_KERNMOUNT) {
4664 		sbinfo->ino_batch = alloc_percpu(ino_t);
4665 		if (!sbinfo->ino_batch)
4666 			goto failed;
4667 	}
4668 	sbinfo->uid = ctx->uid;
4669 	sbinfo->gid = ctx->gid;
4670 	sbinfo->full_inums = ctx->full_inums;
4671 	sbinfo->mode = ctx->mode;
4672 	sbinfo->huge = ctx->huge;
4673 	sbinfo->mpol = ctx->mpol;
4674 	ctx->mpol = NULL;
4675 
4676 	raw_spin_lock_init(&sbinfo->stat_lock);
4677 	if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
4678 		goto failed;
4679 	spin_lock_init(&sbinfo->shrinklist_lock);
4680 	INIT_LIST_HEAD(&sbinfo->shrinklist);
4681 
4682 	sb->s_maxbytes = MAX_LFS_FILESIZE;
4683 	sb->s_blocksize = PAGE_SIZE;
4684 	sb->s_blocksize_bits = PAGE_SHIFT;
4685 	sb->s_magic = TMPFS_MAGIC;
4686 	sb->s_op = &shmem_ops;
4687 	sb->s_time_gran = 1;
4688 #ifdef CONFIG_TMPFS_XATTR
4689 	sb->s_xattr = shmem_xattr_handlers;
4690 #endif
4691 #ifdef CONFIG_TMPFS_POSIX_ACL
4692 	sb->s_flags |= SB_POSIXACL;
4693 #endif
4694 	uuid_t uuid;
4695 	uuid_gen(&uuid);
4696 	super_set_uuid(sb, uuid.b, sizeof(uuid));
4697 
4698 #ifdef CONFIG_TMPFS_QUOTA
4699 	if (ctx->seen & SHMEM_SEEN_QUOTA) {
4700 		sb->dq_op = &shmem_quota_operations;
4701 		sb->s_qcop = &dquot_quotactl_sysfile_ops;
4702 		sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
4703 
4704 		/* Copy the default limits from ctx into sbinfo */
4705 		memcpy(&sbinfo->qlimits, &ctx->qlimits,
4706 		       sizeof(struct shmem_quota_limits));
4707 
4708 		if (shmem_enable_quotas(sb, ctx->quota_types))
4709 			goto failed;
4710 	}
4711 #endif /* CONFIG_TMPFS_QUOTA */
4712 
4713 	inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL,
4714 				S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
4715 	if (IS_ERR(inode)) {
4716 		error = PTR_ERR(inode);
4717 		goto failed;
4718 	}
4719 	inode->i_uid = sbinfo->uid;
4720 	inode->i_gid = sbinfo->gid;
4721 	sb->s_root = d_make_root(inode);
4722 	if (!sb->s_root)
4723 		goto failed;
4724 	return 0;
4725 
4726 failed:
4727 	shmem_put_super(sb);
4728 	return error;
4729 }
4730 
shmem_get_tree(struct fs_context * fc)4731 static int shmem_get_tree(struct fs_context *fc)
4732 {
4733 	return get_tree_nodev(fc, shmem_fill_super);
4734 }
4735 
shmem_free_fc(struct fs_context * fc)4736 static void shmem_free_fc(struct fs_context *fc)
4737 {
4738 	struct shmem_options *ctx = fc->fs_private;
4739 
4740 	if (ctx) {
4741 		mpol_put(ctx->mpol);
4742 		kfree(ctx);
4743 	}
4744 }
4745 
4746 static const struct fs_context_operations shmem_fs_context_ops = {
4747 	.free			= shmem_free_fc,
4748 	.get_tree		= shmem_get_tree,
4749 #ifdef CONFIG_TMPFS
4750 	.parse_monolithic	= shmem_parse_options,
4751 	.parse_param		= shmem_parse_one,
4752 	.reconfigure		= shmem_reconfigure,
4753 #endif
4754 };
4755 
4756 static struct kmem_cache *shmem_inode_cachep __ro_after_init;
4757 
shmem_alloc_inode(struct super_block * sb)4758 static struct inode *shmem_alloc_inode(struct super_block *sb)
4759 {
4760 	struct shmem_inode_info *info;
4761 	info = alloc_inode_sb(sb, shmem_inode_cachep, GFP_KERNEL);
4762 	if (!info)
4763 		return NULL;
4764 	return &info->vfs_inode;
4765 }
4766 
shmem_free_in_core_inode(struct inode * inode)4767 static void shmem_free_in_core_inode(struct inode *inode)
4768 {
4769 	if (S_ISLNK(inode->i_mode))
4770 		kfree(inode->i_link);
4771 	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
4772 }
4773 
shmem_destroy_inode(struct inode * inode)4774 static void shmem_destroy_inode(struct inode *inode)
4775 {
4776 	if (S_ISREG(inode->i_mode))
4777 		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
4778 	if (S_ISDIR(inode->i_mode))
4779 		simple_offset_destroy(shmem_get_offset_ctx(inode));
4780 }
4781 
shmem_init_inode(void * foo)4782 static void shmem_init_inode(void *foo)
4783 {
4784 	struct shmem_inode_info *info = foo;
4785 	inode_init_once(&info->vfs_inode);
4786 }
4787 
shmem_init_inodecache(void)4788 static void __init shmem_init_inodecache(void)
4789 {
4790 	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
4791 				sizeof(struct shmem_inode_info),
4792 				0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
4793 }
4794 
shmem_destroy_inodecache(void)4795 static void __init shmem_destroy_inodecache(void)
4796 {
4797 	kmem_cache_destroy(shmem_inode_cachep);
4798 }
4799 
4800 /* Keep the page in page cache instead of truncating it */
shmem_error_remove_folio(struct address_space * mapping,struct folio * folio)4801 static int shmem_error_remove_folio(struct address_space *mapping,
4802 				   struct folio *folio)
4803 {
4804 	return 0;
4805 }
4806 
4807 static const struct address_space_operations shmem_aops = {
4808 	.writepage	= shmem_writepage,
4809 	.dirty_folio	= noop_dirty_folio,
4810 #ifdef CONFIG_TMPFS
4811 	.write_begin	= shmem_write_begin,
4812 	.write_end	= shmem_write_end,
4813 #endif
4814 #ifdef CONFIG_MIGRATION
4815 	.migrate_folio	= migrate_folio,
4816 #endif
4817 	.error_remove_folio = shmem_error_remove_folio,
4818 };
4819 
4820 static const struct file_operations shmem_file_operations = {
4821 	.mmap		= shmem_mmap,
4822 	.open		= shmem_file_open,
4823 	.get_unmapped_area = shmem_get_unmapped_area,
4824 #ifdef CONFIG_TMPFS
4825 	.llseek		= shmem_file_llseek,
4826 	.read_iter	= shmem_file_read_iter,
4827 	.write_iter	= shmem_file_write_iter,
4828 	.fsync		= noop_fsync,
4829 	.splice_read	= shmem_file_splice_read,
4830 	.splice_write	= iter_file_splice_write,
4831 	.fallocate	= shmem_fallocate,
4832 #endif
4833 };
4834 
4835 static const struct inode_operations shmem_inode_operations = {
4836 	.getattr	= shmem_getattr,
4837 	.setattr	= shmem_setattr,
4838 #ifdef CONFIG_TMPFS_XATTR
4839 	.listxattr	= shmem_listxattr,
4840 	.set_acl	= simple_set_acl,
4841 	.fileattr_get	= shmem_fileattr_get,
4842 	.fileattr_set	= shmem_fileattr_set,
4843 #endif
4844 };
4845 
4846 static const struct inode_operations shmem_dir_inode_operations = {
4847 #ifdef CONFIG_TMPFS
4848 	.getattr	= shmem_getattr,
4849 	.create		= shmem_create,
4850 	.lookup		= simple_lookup,
4851 	.link		= shmem_link,
4852 	.unlink		= shmem_unlink,
4853 	.symlink	= shmem_symlink,
4854 	.mkdir		= shmem_mkdir,
4855 	.rmdir		= shmem_rmdir,
4856 	.mknod		= shmem_mknod,
4857 	.rename		= shmem_rename2,
4858 	.tmpfile	= shmem_tmpfile,
4859 	.get_offset_ctx	= shmem_get_offset_ctx,
4860 #endif
4861 #ifdef CONFIG_TMPFS_XATTR
4862 	.listxattr	= shmem_listxattr,
4863 	.fileattr_get	= shmem_fileattr_get,
4864 	.fileattr_set	= shmem_fileattr_set,
4865 #endif
4866 #ifdef CONFIG_TMPFS_POSIX_ACL
4867 	.setattr	= shmem_setattr,
4868 	.set_acl	= simple_set_acl,
4869 #endif
4870 };
4871 
4872 static const struct inode_operations shmem_special_inode_operations = {
4873 	.getattr	= shmem_getattr,
4874 #ifdef CONFIG_TMPFS_XATTR
4875 	.listxattr	= shmem_listxattr,
4876 #endif
4877 #ifdef CONFIG_TMPFS_POSIX_ACL
4878 	.setattr	= shmem_setattr,
4879 	.set_acl	= simple_set_acl,
4880 #endif
4881 };
4882 
4883 static const struct super_operations shmem_ops = {
4884 	.alloc_inode	= shmem_alloc_inode,
4885 	.free_inode	= shmem_free_in_core_inode,
4886 	.destroy_inode	= shmem_destroy_inode,
4887 #ifdef CONFIG_TMPFS
4888 	.statfs		= shmem_statfs,
4889 	.show_options	= shmem_show_options,
4890 #endif
4891 #ifdef CONFIG_TMPFS_QUOTA
4892 	.get_dquots	= shmem_get_dquots,
4893 #endif
4894 	.evict_inode	= shmem_evict_inode,
4895 	.drop_inode	= generic_delete_inode,
4896 	.put_super	= shmem_put_super,
4897 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4898 	.nr_cached_objects	= shmem_unused_huge_count,
4899 	.free_cached_objects	= shmem_unused_huge_scan,
4900 #endif
4901 };
4902 
4903 static const struct vm_operations_struct shmem_vm_ops = {
4904 	.fault		= shmem_fault,
4905 	.map_pages	= filemap_map_pages,
4906 #ifdef CONFIG_NUMA
4907 	.set_policy     = shmem_set_policy,
4908 	.get_policy     = shmem_get_policy,
4909 #endif
4910 };
4911 
4912 static const struct vm_operations_struct shmem_anon_vm_ops = {
4913 	.fault		= shmem_fault,
4914 	.map_pages	= filemap_map_pages,
4915 #ifdef CONFIG_NUMA
4916 	.set_policy     = shmem_set_policy,
4917 	.get_policy     = shmem_get_policy,
4918 #endif
4919 };
4920 
shmem_init_fs_context(struct fs_context * fc)4921 int shmem_init_fs_context(struct fs_context *fc)
4922 {
4923 	struct shmem_options *ctx;
4924 
4925 	ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
4926 	if (!ctx)
4927 		return -ENOMEM;
4928 
4929 	ctx->mode = 0777 | S_ISVTX;
4930 	ctx->uid = current_fsuid();
4931 	ctx->gid = current_fsgid();
4932 
4933 	fc->fs_private = ctx;
4934 	fc->ops = &shmem_fs_context_ops;
4935 	return 0;
4936 }
4937 
4938 static struct file_system_type shmem_fs_type = {
4939 	.owner		= THIS_MODULE,
4940 	.name		= "tmpfs",
4941 	.init_fs_context = shmem_init_fs_context,
4942 #ifdef CONFIG_TMPFS
4943 	.parameters	= shmem_fs_parameters,
4944 #endif
4945 	.kill_sb	= kill_litter_super,
4946 	.fs_flags	= FS_USERNS_MOUNT | FS_ALLOW_IDMAP,
4947 };
4948 
shmem_init(void)4949 void __init shmem_init(void)
4950 {
4951 	int error;
4952 
4953 	shmem_init_inodecache();
4954 
4955 #ifdef CONFIG_TMPFS_QUOTA
4956 	register_quota_format(&shmem_quota_format);
4957 #endif
4958 
4959 	error = register_filesystem(&shmem_fs_type);
4960 	if (error) {
4961 		pr_err("Could not register tmpfs\n");
4962 		goto out2;
4963 	}
4964 
4965 	shm_mnt = kern_mount(&shmem_fs_type);
4966 	if (IS_ERR(shm_mnt)) {
4967 		error = PTR_ERR(shm_mnt);
4968 		pr_err("Could not kern_mount tmpfs\n");
4969 		goto out1;
4970 	}
4971 
4972 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4973 	if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
4974 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
4975 	else
4976 		shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
4977 
4978 	/*
4979 	 * Default to setting PMD-sized THP to inherit the global setting and
4980 	 * disable all other multi-size THPs.
4981 	 */
4982 	huge_shmem_orders_inherit = BIT(HPAGE_PMD_ORDER);
4983 #endif
4984 	return;
4985 
4986 out1:
4987 	unregister_filesystem(&shmem_fs_type);
4988 out2:
4989 #ifdef CONFIG_TMPFS_QUOTA
4990 	unregister_quota_format(&shmem_quota_format);
4991 #endif
4992 	shmem_destroy_inodecache();
4993 	shm_mnt = ERR_PTR(error);
4994 }
4995 
4996 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
shmem_enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4997 static ssize_t shmem_enabled_show(struct kobject *kobj,
4998 				  struct kobj_attribute *attr, char *buf)
4999 {
5000 	static const int values[] = {
5001 		SHMEM_HUGE_ALWAYS,
5002 		SHMEM_HUGE_WITHIN_SIZE,
5003 		SHMEM_HUGE_ADVISE,
5004 		SHMEM_HUGE_NEVER,
5005 		SHMEM_HUGE_DENY,
5006 		SHMEM_HUGE_FORCE,
5007 	};
5008 	int len = 0;
5009 	int i;
5010 
5011 	for (i = 0; i < ARRAY_SIZE(values); i++) {
5012 		len += sysfs_emit_at(buf, len,
5013 				shmem_huge == values[i] ? "%s[%s]" : "%s%s",
5014 				i ? " " : "", shmem_format_huge(values[i]));
5015 	}
5016 	len += sysfs_emit_at(buf, len, "\n");
5017 
5018 	return len;
5019 }
5020 
shmem_enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)5021 static ssize_t shmem_enabled_store(struct kobject *kobj,
5022 		struct kobj_attribute *attr, const char *buf, size_t count)
5023 {
5024 	char tmp[16];
5025 	int huge;
5026 
5027 	if (count + 1 > sizeof(tmp))
5028 		return -EINVAL;
5029 	memcpy(tmp, buf, count);
5030 	tmp[count] = '\0';
5031 	if (count && tmp[count - 1] == '\n')
5032 		tmp[count - 1] = '\0';
5033 
5034 	huge = shmem_parse_huge(tmp);
5035 	if (huge == -EINVAL)
5036 		return -EINVAL;
5037 	if (!has_transparent_hugepage() &&
5038 			huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
5039 		return -EINVAL;
5040 
5041 	/* Do not override huge allocation policy with non-PMD sized mTHP */
5042 	if (huge == SHMEM_HUGE_FORCE &&
5043 	    huge_shmem_orders_inherit != BIT(HPAGE_PMD_ORDER))
5044 		return -EINVAL;
5045 
5046 	shmem_huge = huge;
5047 	if (shmem_huge > SHMEM_HUGE_DENY)
5048 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
5049 	return count;
5050 }
5051 
5052 struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled);
5053 static DEFINE_SPINLOCK(huge_shmem_orders_lock);
5054 
thpsize_shmem_enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)5055 static ssize_t thpsize_shmem_enabled_show(struct kobject *kobj,
5056 					  struct kobj_attribute *attr, char *buf)
5057 {
5058 	int order = to_thpsize(kobj)->order;
5059 	const char *output;
5060 
5061 	if (test_bit(order, &huge_shmem_orders_always))
5062 		output = "[always] inherit within_size advise never";
5063 	else if (test_bit(order, &huge_shmem_orders_inherit))
5064 		output = "always [inherit] within_size advise never";
5065 	else if (test_bit(order, &huge_shmem_orders_within_size))
5066 		output = "always inherit [within_size] advise never";
5067 	else if (test_bit(order, &huge_shmem_orders_madvise))
5068 		output = "always inherit within_size [advise] never";
5069 	else
5070 		output = "always inherit within_size advise [never]";
5071 
5072 	return sysfs_emit(buf, "%s\n", output);
5073 }
5074 
thpsize_shmem_enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)5075 static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj,
5076 					   struct kobj_attribute *attr,
5077 					   const char *buf, size_t count)
5078 {
5079 	int order = to_thpsize(kobj)->order;
5080 	ssize_t ret = count;
5081 
5082 	if (sysfs_streq(buf, "always")) {
5083 		spin_lock(&huge_shmem_orders_lock);
5084 		clear_bit(order, &huge_shmem_orders_inherit);
5085 		clear_bit(order, &huge_shmem_orders_madvise);
5086 		clear_bit(order, &huge_shmem_orders_within_size);
5087 		set_bit(order, &huge_shmem_orders_always);
5088 		spin_unlock(&huge_shmem_orders_lock);
5089 	} else if (sysfs_streq(buf, "inherit")) {
5090 		/* Do not override huge allocation policy with non-PMD sized mTHP */
5091 		if (shmem_huge == SHMEM_HUGE_FORCE &&
5092 		    order != HPAGE_PMD_ORDER)
5093 			return -EINVAL;
5094 
5095 		spin_lock(&huge_shmem_orders_lock);
5096 		clear_bit(order, &huge_shmem_orders_always);
5097 		clear_bit(order, &huge_shmem_orders_madvise);
5098 		clear_bit(order, &huge_shmem_orders_within_size);
5099 		set_bit(order, &huge_shmem_orders_inherit);
5100 		spin_unlock(&huge_shmem_orders_lock);
5101 	} else if (sysfs_streq(buf, "within_size")) {
5102 		spin_lock(&huge_shmem_orders_lock);
5103 		clear_bit(order, &huge_shmem_orders_always);
5104 		clear_bit(order, &huge_shmem_orders_inherit);
5105 		clear_bit(order, &huge_shmem_orders_madvise);
5106 		set_bit(order, &huge_shmem_orders_within_size);
5107 		spin_unlock(&huge_shmem_orders_lock);
5108 	} else if (sysfs_streq(buf, "advise")) {
5109 		spin_lock(&huge_shmem_orders_lock);
5110 		clear_bit(order, &huge_shmem_orders_always);
5111 		clear_bit(order, &huge_shmem_orders_inherit);
5112 		clear_bit(order, &huge_shmem_orders_within_size);
5113 		set_bit(order, &huge_shmem_orders_madvise);
5114 		spin_unlock(&huge_shmem_orders_lock);
5115 	} else if (sysfs_streq(buf, "never")) {
5116 		spin_lock(&huge_shmem_orders_lock);
5117 		clear_bit(order, &huge_shmem_orders_always);
5118 		clear_bit(order, &huge_shmem_orders_inherit);
5119 		clear_bit(order, &huge_shmem_orders_within_size);
5120 		clear_bit(order, &huge_shmem_orders_madvise);
5121 		spin_unlock(&huge_shmem_orders_lock);
5122 	} else {
5123 		ret = -EINVAL;
5124 	}
5125 
5126 	return ret;
5127 }
5128 
5129 struct kobj_attribute thpsize_shmem_enabled_attr =
5130 	__ATTR(shmem_enabled, 0644, thpsize_shmem_enabled_show, thpsize_shmem_enabled_store);
5131 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
5132 
5133 #else /* !CONFIG_SHMEM */
5134 
5135 /*
5136  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
5137  *
5138  * This is intended for small system where the benefits of the full
5139  * shmem code (swap-backed and resource-limited) are outweighed by
5140  * their complexity. On systems without swap this code should be
5141  * effectively equivalent, but much lighter weight.
5142  */
5143 
5144 static struct file_system_type shmem_fs_type = {
5145 	.name		= "tmpfs",
5146 	.init_fs_context = ramfs_init_fs_context,
5147 	.parameters	= ramfs_fs_parameters,
5148 	.kill_sb	= ramfs_kill_sb,
5149 	.fs_flags	= FS_USERNS_MOUNT,
5150 };
5151 
shmem_init(void)5152 void __init shmem_init(void)
5153 {
5154 	BUG_ON(register_filesystem(&shmem_fs_type) != 0);
5155 
5156 	shm_mnt = kern_mount(&shmem_fs_type);
5157 	BUG_ON(IS_ERR(shm_mnt));
5158 }
5159 
shmem_unuse(unsigned int type)5160 int shmem_unuse(unsigned int type)
5161 {
5162 	return 0;
5163 }
5164 
shmem_lock(struct file * file,int lock,struct ucounts * ucounts)5165 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
5166 {
5167 	return 0;
5168 }
5169 
shmem_unlock_mapping(struct address_space * mapping)5170 void shmem_unlock_mapping(struct address_space *mapping)
5171 {
5172 }
5173 
5174 #ifdef CONFIG_MMU
shmem_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)5175 unsigned long shmem_get_unmapped_area(struct file *file,
5176 				      unsigned long addr, unsigned long len,
5177 				      unsigned long pgoff, unsigned long flags)
5178 {
5179 	return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
5180 }
5181 #endif
5182 
shmem_truncate_range(struct inode * inode,loff_t lstart,loff_t lend)5183 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
5184 {
5185 	truncate_inode_pages_range(inode->i_mapping, lstart, lend);
5186 }
5187 EXPORT_SYMBOL_GPL(shmem_truncate_range);
5188 
5189 #define shmem_vm_ops				generic_file_vm_ops
5190 #define shmem_anon_vm_ops			generic_file_vm_ops
5191 #define shmem_file_operations			ramfs_file_operations
5192 #define shmem_acct_size(flags, size)		0
5193 #define shmem_unacct_size(flags, size)		do {} while (0)
5194 
shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,unsigned long flags)5195 static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
5196 				struct super_block *sb, struct inode *dir,
5197 				umode_t mode, dev_t dev, unsigned long flags)
5198 {
5199 	struct inode *inode = ramfs_get_inode(sb, dir, mode, dev);
5200 	return inode ? inode : ERR_PTR(-ENOSPC);
5201 }
5202 
5203 #endif /* CONFIG_SHMEM */
5204 
5205 /* common code */
5206 
__shmem_file_setup(struct vfsmount * mnt,const char * name,loff_t size,unsigned long flags,unsigned int i_flags)5207 static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name,
5208 			loff_t size, unsigned long flags, unsigned int i_flags)
5209 {
5210 	struct inode *inode;
5211 	struct file *res;
5212 
5213 	if (IS_ERR(mnt))
5214 		return ERR_CAST(mnt);
5215 
5216 	if (size < 0 || size > MAX_LFS_FILESIZE)
5217 		return ERR_PTR(-EINVAL);
5218 
5219 	if (shmem_acct_size(flags, size))
5220 		return ERR_PTR(-ENOMEM);
5221 
5222 	if (is_idmapped_mnt(mnt))
5223 		return ERR_PTR(-EINVAL);
5224 
5225 	inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL,
5226 				S_IFREG | S_IRWXUGO, 0, flags);
5227 	if (IS_ERR(inode)) {
5228 		shmem_unacct_size(flags, size);
5229 		return ERR_CAST(inode);
5230 	}
5231 	inode->i_flags |= i_flags;
5232 	inode->i_size = size;
5233 	clear_nlink(inode);	/* It is unlinked */
5234 	res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
5235 	if (!IS_ERR(res))
5236 		res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
5237 				&shmem_file_operations);
5238 	if (IS_ERR(res))
5239 		iput(inode);
5240 	return res;
5241 }
5242 
5243 /**
5244  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
5245  * 	kernel internal.  There will be NO LSM permission checks against the
5246  * 	underlying inode.  So users of this interface must do LSM checks at a
5247  *	higher layer.  The users are the big_key and shm implementations.  LSM
5248  *	checks are provided at the key or shm level rather than the inode.
5249  * @name: name for dentry (to be seen in /proc/<pid>/maps
5250  * @size: size to be set for the file
5251  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
5252  */
shmem_kernel_file_setup(const char * name,loff_t size,unsigned long flags)5253 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
5254 {
5255 	return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
5256 }
5257 EXPORT_SYMBOL_GPL(shmem_kernel_file_setup);
5258 
5259 /**
5260  * shmem_file_setup - get an unlinked file living in tmpfs
5261  * @name: name for dentry (to be seen in /proc/<pid>/maps
5262  * @size: size to be set for the file
5263  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
5264  */
shmem_file_setup(const char * name,loff_t size,unsigned long flags)5265 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
5266 {
5267 	return __shmem_file_setup(shm_mnt, name, size, flags, 0);
5268 }
5269 EXPORT_SYMBOL_GPL(shmem_file_setup);
5270 
5271 /**
5272  * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
5273  * @mnt: the tmpfs mount where the file will be created
5274  * @name: name for dentry (to be seen in /proc/<pid>/maps
5275  * @size: size to be set for the file
5276  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
5277  */
shmem_file_setup_with_mnt(struct vfsmount * mnt,const char * name,loff_t size,unsigned long flags)5278 struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
5279 				       loff_t size, unsigned long flags)
5280 {
5281 	return __shmem_file_setup(mnt, name, size, flags, 0);
5282 }
5283 EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
5284 
5285 /**
5286  * shmem_zero_setup - setup a shared anonymous mapping
5287  * @vma: the vma to be mmapped is prepared by do_mmap
5288  */
shmem_zero_setup(struct vm_area_struct * vma)5289 int shmem_zero_setup(struct vm_area_struct *vma)
5290 {
5291 	struct file *file;
5292 	loff_t size = vma->vm_end - vma->vm_start;
5293 
5294 	/*
5295 	 * Cloning a new file under mmap_lock leads to a lock ordering conflict
5296 	 * between XFS directory reading and selinux: since this file is only
5297 	 * accessible to the user through its mapping, use S_PRIVATE flag to
5298 	 * bypass file security, in the same way as shmem_kernel_file_setup().
5299 	 */
5300 	file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
5301 	if (IS_ERR(file))
5302 		return PTR_ERR(file);
5303 
5304 	if (vma->vm_file)
5305 		fput(vma->vm_file);
5306 	vma->vm_file = file;
5307 	vma->vm_ops = &shmem_anon_vm_ops;
5308 
5309 	return 0;
5310 }
5311 
5312 /**
5313  * shmem_read_folio_gfp - read into page cache, using specified page allocation flags.
5314  * @mapping:	the folio's address_space
5315  * @index:	the folio index
5316  * @gfp:	the page allocator flags to use if allocating
5317  *
5318  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
5319  * with any new page allocations done using the specified allocation flags.
5320  * But read_cache_page_gfp() uses the ->read_folio() method: which does not
5321  * suit tmpfs, since it may have pages in swapcache, and needs to find those
5322  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
5323  *
5324  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
5325  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
5326  */
shmem_read_folio_gfp(struct address_space * mapping,pgoff_t index,gfp_t gfp)5327 struct folio *shmem_read_folio_gfp(struct address_space *mapping,
5328 		pgoff_t index, gfp_t gfp)
5329 {
5330 #ifdef CONFIG_SHMEM
5331 	struct inode *inode = mapping->host;
5332 	struct folio *folio;
5333 	int error;
5334 
5335 	error = shmem_get_folio_gfp(inode, index, 0, &folio, SGP_CACHE,
5336 				    gfp, NULL, NULL);
5337 	if (error)
5338 		return ERR_PTR(error);
5339 
5340 	folio_unlock(folio);
5341 	return folio;
5342 #else
5343 	/*
5344 	 * The tiny !SHMEM case uses ramfs without swap
5345 	 */
5346 	return mapping_read_folio_gfp(mapping, index, gfp);
5347 #endif
5348 }
5349 EXPORT_SYMBOL_GPL(shmem_read_folio_gfp);
5350 
shmem_read_mapping_page_gfp(struct address_space * mapping,pgoff_t index,gfp_t gfp)5351 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
5352 					 pgoff_t index, gfp_t gfp)
5353 {
5354 	struct folio *folio = shmem_read_folio_gfp(mapping, index, gfp);
5355 	struct page *page;
5356 
5357 	if (IS_ERR(folio))
5358 		return &folio->page;
5359 
5360 	page = folio_file_page(folio, index);
5361 	if (PageHWPoison(page)) {
5362 		folio_put(folio);
5363 		return ERR_PTR(-EIO);
5364 	}
5365 
5366 	return page;
5367 }
5368 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
5369