xref: /linux/mm/shmem.c (revision 537d196186e0a0ce28e494ca1881885accc35a12)
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *		 2000 Transmeta Corp.
6  *		 2000-2001 Christoph Rohland
7  *		 2000-2001 SAP AG
8  *		 2002 Red Hat Inc.
9  * Copyright (C) 2002-2011 Hugh Dickins.
10  * Copyright (C) 2011 Google Inc.
11  * Copyright (C) 2002-2005 VERITAS Software Corporation.
12  * Copyright (C) 2004 Andi Kleen, SuSE Labs
13  *
14  * Extended attribute support for tmpfs:
15  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17  *
18  * tiny-shmem:
19  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20  *
21  * This file is released under the GPL.
22  */
23 
24 #include <linux/fs.h>
25 #include <linux/init.h>
26 #include <linux/vfs.h>
27 #include <linux/mount.h>
28 #include <linux/ramfs.h>
29 #include <linux/pagemap.h>
30 #include <linux/file.h>
31 #include <linux/fileattr.h>
32 #include <linux/mm.h>
33 #include <linux/random.h>
34 #include <linux/sched/signal.h>
35 #include <linux/export.h>
36 #include <linux/shmem_fs.h>
37 #include <linux/swap.h>
38 #include <linux/uio.h>
39 #include <linux/hugetlb.h>
40 #include <linux/fs_parser.h>
41 #include <linux/swapfile.h>
42 #include <linux/iversion.h>
43 #include <linux/unicode.h>
44 #include "swap.h"
45 
46 static struct vfsmount *shm_mnt __ro_after_init;
47 
48 #ifdef CONFIG_SHMEM
49 /*
50  * This virtual memory filesystem is heavily based on the ramfs. It
51  * extends ramfs by the ability to use swap and honor resource limits
52  * which makes it a completely usable filesystem.
53  */
54 
55 #include <linux/xattr.h>
56 #include <linux/exportfs.h>
57 #include <linux/posix_acl.h>
58 #include <linux/posix_acl_xattr.h>
59 #include <linux/mman.h>
60 #include <linux/string.h>
61 #include <linux/slab.h>
62 #include <linux/backing-dev.h>
63 #include <linux/writeback.h>
64 #include <linux/pagevec.h>
65 #include <linux/percpu_counter.h>
66 #include <linux/falloc.h>
67 #include <linux/splice.h>
68 #include <linux/security.h>
69 #include <linux/swapops.h>
70 #include <linux/mempolicy.h>
71 #include <linux/namei.h>
72 #include <linux/ctype.h>
73 #include <linux/migrate.h>
74 #include <linux/highmem.h>
75 #include <linux/seq_file.h>
76 #include <linux/magic.h>
77 #include <linux/syscalls.h>
78 #include <linux/fcntl.h>
79 #include <uapi/linux/memfd.h>
80 #include <linux/rmap.h>
81 #include <linux/uuid.h>
82 #include <linux/quotaops.h>
83 #include <linux/rcupdate_wait.h>
84 
85 #include <linux/uaccess.h>
86 
87 #include "internal.h"
88 
89 #define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
90 
91 /* Pretend that each entry is of this size in directory's i_size */
92 #define BOGO_DIRENT_SIZE 20
93 
94 /* Pretend that one inode + its dentry occupy this much memory */
95 #define BOGO_INODE_SIZE 1024
96 
97 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
98 #define SHORT_SYMLINK_LEN 128
99 
100 /*
101  * shmem_fallocate communicates with shmem_fault or shmem_writeout via
102  * inode->i_private (with i_rwsem making sure that it has only one user at
103  * a time): we would prefer not to enlarge the shmem inode just for that.
104  */
105 struct shmem_falloc {
106 	wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
107 	pgoff_t start;		/* start of range currently being fallocated */
108 	pgoff_t next;		/* the next page offset to be fallocated */
109 	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
110 	pgoff_t nr_unswapped;	/* how often writeout refused to swap out */
111 };
112 
113 struct shmem_options {
114 	unsigned long long blocks;
115 	unsigned long long inodes;
116 	struct mempolicy *mpol;
117 	kuid_t uid;
118 	kgid_t gid;
119 	umode_t mode;
120 	bool full_inums;
121 	int huge;
122 	int seen;
123 	bool noswap;
124 	unsigned short quota_types;
125 	struct shmem_quota_limits qlimits;
126 #if IS_ENABLED(CONFIG_UNICODE)
127 	struct unicode_map *encoding;
128 	bool strict_encoding;
129 #endif
130 #define SHMEM_SEEN_BLOCKS 1
131 #define SHMEM_SEEN_INODES 2
132 #define SHMEM_SEEN_HUGE 4
133 #define SHMEM_SEEN_INUMS 8
134 #define SHMEM_SEEN_NOSWAP 16
135 #define SHMEM_SEEN_QUOTA 32
136 };
137 
138 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
139 static unsigned long huge_shmem_orders_always __read_mostly;
140 static unsigned long huge_shmem_orders_madvise __read_mostly;
141 static unsigned long huge_shmem_orders_inherit __read_mostly;
142 static unsigned long huge_shmem_orders_within_size __read_mostly;
143 static bool shmem_orders_configured __initdata;
144 #endif
145 
146 #ifdef CONFIG_TMPFS
shmem_default_max_blocks(void)147 static unsigned long shmem_default_max_blocks(void)
148 {
149 	return totalram_pages() / 2;
150 }
151 
shmem_default_max_inodes(void)152 static unsigned long shmem_default_max_inodes(void)
153 {
154 	unsigned long nr_pages = totalram_pages();
155 
156 	return min3(nr_pages - totalhigh_pages(), nr_pages / 2,
157 			ULONG_MAX / BOGO_INODE_SIZE);
158 }
159 #endif
160 
161 static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
162 			struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
163 			struct vm_area_struct *vma, vm_fault_t *fault_type);
164 
SHMEM_SB(struct super_block * sb)165 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
166 {
167 	return sb->s_fs_info;
168 }
169 
170 /*
171  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
172  * for shared memory and for shared anonymous (/dev/zero) mappings
173  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
174  * consistent with the pre-accounting of private mappings ...
175  */
shmem_acct_size(unsigned long flags,loff_t size)176 static inline int shmem_acct_size(unsigned long flags, loff_t size)
177 {
178 	return (flags & VM_NORESERVE) ?
179 		0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
180 }
181 
shmem_unacct_size(unsigned long flags,loff_t size)182 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
183 {
184 	if (!(flags & VM_NORESERVE))
185 		vm_unacct_memory(VM_ACCT(size));
186 }
187 
shmem_reacct_size(unsigned long flags,loff_t oldsize,loff_t newsize)188 static inline int shmem_reacct_size(unsigned long flags,
189 		loff_t oldsize, loff_t newsize)
190 {
191 	if (!(flags & VM_NORESERVE)) {
192 		if (VM_ACCT(newsize) > VM_ACCT(oldsize))
193 			return security_vm_enough_memory_mm(current->mm,
194 					VM_ACCT(newsize) - VM_ACCT(oldsize));
195 		else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
196 			vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
197 	}
198 	return 0;
199 }
200 
201 /*
202  * ... whereas tmpfs objects are accounted incrementally as
203  * pages are allocated, in order to allow large sparse files.
204  * shmem_get_folio reports shmem_acct_blocks failure as -ENOSPC not -ENOMEM,
205  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
206  */
shmem_acct_blocks(unsigned long flags,long pages)207 static inline int shmem_acct_blocks(unsigned long flags, long pages)
208 {
209 	if (!(flags & VM_NORESERVE))
210 		return 0;
211 
212 	return security_vm_enough_memory_mm(current->mm,
213 			pages * VM_ACCT(PAGE_SIZE));
214 }
215 
shmem_unacct_blocks(unsigned long flags,long pages)216 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
217 {
218 	if (flags & VM_NORESERVE)
219 		vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
220 }
221 
shmem_inode_acct_blocks(struct inode * inode,long pages)222 static int shmem_inode_acct_blocks(struct inode *inode, long pages)
223 {
224 	struct shmem_inode_info *info = SHMEM_I(inode);
225 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
226 	int err = -ENOSPC;
227 
228 	if (shmem_acct_blocks(info->flags, pages))
229 		return err;
230 
231 	might_sleep();	/* when quotas */
232 	if (sbinfo->max_blocks) {
233 		if (!percpu_counter_limited_add(&sbinfo->used_blocks,
234 						sbinfo->max_blocks, pages))
235 			goto unacct;
236 
237 		err = dquot_alloc_block_nodirty(inode, pages);
238 		if (err) {
239 			percpu_counter_sub(&sbinfo->used_blocks, pages);
240 			goto unacct;
241 		}
242 	} else {
243 		err = dquot_alloc_block_nodirty(inode, pages);
244 		if (err)
245 			goto unacct;
246 	}
247 
248 	return 0;
249 
250 unacct:
251 	shmem_unacct_blocks(info->flags, pages);
252 	return err;
253 }
254 
shmem_inode_unacct_blocks(struct inode * inode,long pages)255 static void shmem_inode_unacct_blocks(struct inode *inode, long pages)
256 {
257 	struct shmem_inode_info *info = SHMEM_I(inode);
258 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
259 
260 	might_sleep();	/* when quotas */
261 	dquot_free_block_nodirty(inode, pages);
262 
263 	if (sbinfo->max_blocks)
264 		percpu_counter_sub(&sbinfo->used_blocks, pages);
265 	shmem_unacct_blocks(info->flags, pages);
266 }
267 
268 static const struct super_operations shmem_ops;
269 static const struct address_space_operations shmem_aops;
270 static const struct file_operations shmem_file_operations;
271 static const struct inode_operations shmem_inode_operations;
272 static const struct inode_operations shmem_dir_inode_operations;
273 static const struct inode_operations shmem_special_inode_operations;
274 static const struct vm_operations_struct shmem_vm_ops;
275 static const struct vm_operations_struct shmem_anon_vm_ops;
276 static struct file_system_type shmem_fs_type;
277 
shmem_mapping(const struct address_space * mapping)278 bool shmem_mapping(const struct address_space *mapping)
279 {
280 	return mapping->a_ops == &shmem_aops;
281 }
282 EXPORT_SYMBOL_GPL(shmem_mapping);
283 
vma_is_anon_shmem(const struct vm_area_struct * vma)284 bool vma_is_anon_shmem(const struct vm_area_struct *vma)
285 {
286 	return vma->vm_ops == &shmem_anon_vm_ops;
287 }
288 
vma_is_shmem(const struct vm_area_struct * vma)289 bool vma_is_shmem(const struct vm_area_struct *vma)
290 {
291 	return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops;
292 }
293 
294 static LIST_HEAD(shmem_swaplist);
295 static DEFINE_SPINLOCK(shmem_swaplist_lock);
296 
297 #ifdef CONFIG_TMPFS_QUOTA
298 
shmem_enable_quotas(struct super_block * sb,unsigned short quota_types)299 static int shmem_enable_quotas(struct super_block *sb,
300 			       unsigned short quota_types)
301 {
302 	int type, err = 0;
303 
304 	sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
305 	for (type = 0; type < SHMEM_MAXQUOTAS; type++) {
306 		if (!(quota_types & (1 << type)))
307 			continue;
308 		err = dquot_load_quota_sb(sb, type, QFMT_SHMEM,
309 					  DQUOT_USAGE_ENABLED |
310 					  DQUOT_LIMITS_ENABLED);
311 		if (err)
312 			goto out_err;
313 	}
314 	return 0;
315 
316 out_err:
317 	pr_warn("tmpfs: failed to enable quota tracking (type=%d, err=%d)\n",
318 		type, err);
319 	for (type--; type >= 0; type--)
320 		dquot_quota_off(sb, type);
321 	return err;
322 }
323 
shmem_disable_quotas(struct super_block * sb)324 static void shmem_disable_quotas(struct super_block *sb)
325 {
326 	int type;
327 
328 	for (type = 0; type < SHMEM_MAXQUOTAS; type++)
329 		dquot_quota_off(sb, type);
330 }
331 
shmem_get_dquots(struct inode * inode)332 static struct dquot __rcu **shmem_get_dquots(struct inode *inode)
333 {
334 	return SHMEM_I(inode)->i_dquot;
335 }
336 #endif /* CONFIG_TMPFS_QUOTA */
337 
338 /*
339  * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
340  * produces a novel ino for the newly allocated inode.
341  *
342  * It may also be called when making a hard link to permit the space needed by
343  * each dentry. However, in that case, no new inode number is needed since that
344  * internally draws from another pool of inode numbers (currently global
345  * get_next_ino()). This case is indicated by passing NULL as inop.
346  */
347 #define SHMEM_INO_BATCH 1024
shmem_reserve_inode(struct super_block * sb,ino_t * inop)348 static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
349 {
350 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
351 	ino_t ino;
352 
353 	if (!(sb->s_flags & SB_KERNMOUNT)) {
354 		raw_spin_lock(&sbinfo->stat_lock);
355 		if (sbinfo->max_inodes) {
356 			if (sbinfo->free_ispace < BOGO_INODE_SIZE) {
357 				raw_spin_unlock(&sbinfo->stat_lock);
358 				return -ENOSPC;
359 			}
360 			sbinfo->free_ispace -= BOGO_INODE_SIZE;
361 		}
362 		if (inop) {
363 			ino = sbinfo->next_ino++;
364 			if (unlikely(is_zero_ino(ino)))
365 				ino = sbinfo->next_ino++;
366 			if (unlikely(!sbinfo->full_inums &&
367 				     ino > UINT_MAX)) {
368 				/*
369 				 * Emulate get_next_ino uint wraparound for
370 				 * compatibility
371 				 */
372 				if (IS_ENABLED(CONFIG_64BIT))
373 					pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
374 						__func__, MINOR(sb->s_dev));
375 				sbinfo->next_ino = 1;
376 				ino = sbinfo->next_ino++;
377 			}
378 			*inop = ino;
379 		}
380 		raw_spin_unlock(&sbinfo->stat_lock);
381 	} else if (inop) {
382 		/*
383 		 * __shmem_file_setup, one of our callers, is lock-free: it
384 		 * doesn't hold stat_lock in shmem_reserve_inode since
385 		 * max_inodes is always 0, and is called from potentially
386 		 * unknown contexts. As such, use a per-cpu batched allocator
387 		 * which doesn't require the per-sb stat_lock unless we are at
388 		 * the batch boundary.
389 		 *
390 		 * We don't need to worry about inode{32,64} since SB_KERNMOUNT
391 		 * shmem mounts are not exposed to userspace, so we don't need
392 		 * to worry about things like glibc compatibility.
393 		 */
394 		ino_t *next_ino;
395 
396 		next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
397 		ino = *next_ino;
398 		if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
399 			raw_spin_lock(&sbinfo->stat_lock);
400 			ino = sbinfo->next_ino;
401 			sbinfo->next_ino += SHMEM_INO_BATCH;
402 			raw_spin_unlock(&sbinfo->stat_lock);
403 			if (unlikely(is_zero_ino(ino)))
404 				ino++;
405 		}
406 		*inop = ino;
407 		*next_ino = ++ino;
408 		put_cpu();
409 	}
410 
411 	return 0;
412 }
413 
shmem_free_inode(struct super_block * sb,size_t freed_ispace)414 static void shmem_free_inode(struct super_block *sb, size_t freed_ispace)
415 {
416 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
417 	if (sbinfo->max_inodes) {
418 		raw_spin_lock(&sbinfo->stat_lock);
419 		sbinfo->free_ispace += BOGO_INODE_SIZE + freed_ispace;
420 		raw_spin_unlock(&sbinfo->stat_lock);
421 	}
422 }
423 
424 /**
425  * shmem_recalc_inode - recalculate the block usage of an inode
426  * @inode: inode to recalc
427  * @alloced: the change in number of pages allocated to inode
428  * @swapped: the change in number of pages swapped from inode
429  *
430  * We have to calculate the free blocks since the mm can drop
431  * undirtied hole pages behind our back.
432  *
433  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
434  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
435  *
436  * Return: true if swapped was incremented from 0, for shmem_writeout().
437  */
shmem_recalc_inode(struct inode * inode,long alloced,long swapped)438 static bool shmem_recalc_inode(struct inode *inode, long alloced, long swapped)
439 {
440 	struct shmem_inode_info *info = SHMEM_I(inode);
441 	bool first_swapped = false;
442 	long freed;
443 
444 	spin_lock(&info->lock);
445 	info->alloced += alloced;
446 	info->swapped += swapped;
447 	freed = info->alloced - info->swapped -
448 		READ_ONCE(inode->i_mapping->nrpages);
449 	/*
450 	 * Special case: whereas normally shmem_recalc_inode() is called
451 	 * after i_mapping->nrpages has already been adjusted (up or down),
452 	 * shmem_writeout() has to raise swapped before nrpages is lowered -
453 	 * to stop a racing shmem_recalc_inode() from thinking that a page has
454 	 * been freed.  Compensate here, to avoid the need for a followup call.
455 	 */
456 	if (swapped > 0) {
457 		if (info->swapped == swapped)
458 			first_swapped = true;
459 		freed += swapped;
460 	}
461 	if (freed > 0)
462 		info->alloced -= freed;
463 	spin_unlock(&info->lock);
464 
465 	/* The quota case may block */
466 	if (freed > 0)
467 		shmem_inode_unacct_blocks(inode, freed);
468 	return first_swapped;
469 }
470 
shmem_charge(struct inode * inode,long pages)471 bool shmem_charge(struct inode *inode, long pages)
472 {
473 	struct address_space *mapping = inode->i_mapping;
474 
475 	if (shmem_inode_acct_blocks(inode, pages))
476 		return false;
477 
478 	/* nrpages adjustment first, then shmem_recalc_inode() when balanced */
479 	xa_lock_irq(&mapping->i_pages);
480 	mapping->nrpages += pages;
481 	xa_unlock_irq(&mapping->i_pages);
482 
483 	shmem_recalc_inode(inode, pages, 0);
484 	return true;
485 }
486 
shmem_uncharge(struct inode * inode,long pages)487 void shmem_uncharge(struct inode *inode, long pages)
488 {
489 	/* pages argument is currently unused: keep it to help debugging */
490 	/* nrpages adjustment done by __filemap_remove_folio() or caller */
491 
492 	shmem_recalc_inode(inode, 0, 0);
493 }
494 
495 /*
496  * Replace item expected in xarray by a new item, while holding xa_lock.
497  */
shmem_replace_entry(struct address_space * mapping,pgoff_t index,void * expected,void * replacement)498 static int shmem_replace_entry(struct address_space *mapping,
499 			pgoff_t index, void *expected, void *replacement)
500 {
501 	XA_STATE(xas, &mapping->i_pages, index);
502 	void *item;
503 
504 	VM_BUG_ON(!expected);
505 	VM_BUG_ON(!replacement);
506 	item = xas_load(&xas);
507 	if (item != expected)
508 		return -ENOENT;
509 	xas_store(&xas, replacement);
510 	return 0;
511 }
512 
513 /*
514  * Sometimes, before we decide whether to proceed or to fail, we must check
515  * that an entry was not already brought back or split by a racing thread.
516  *
517  * Checking folio is not enough: by the time a swapcache folio is locked, it
518  * might be reused, and again be swapcache, using the same swap as before.
519  * Returns the swap entry's order if it still presents, else returns -1.
520  */
shmem_confirm_swap(struct address_space * mapping,pgoff_t index,swp_entry_t swap)521 static int shmem_confirm_swap(struct address_space *mapping, pgoff_t index,
522 			      swp_entry_t swap)
523 {
524 	XA_STATE(xas, &mapping->i_pages, index);
525 	int ret = -1;
526 	void *entry;
527 
528 	rcu_read_lock();
529 	do {
530 		entry = xas_load(&xas);
531 		if (entry == swp_to_radix_entry(swap))
532 			ret = xas_get_order(&xas);
533 	} while (xas_retry(&xas, entry));
534 	rcu_read_unlock();
535 	return ret;
536 }
537 
538 /*
539  * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
540  *
541  * SHMEM_HUGE_NEVER:
542  *	disables huge pages for the mount;
543  * SHMEM_HUGE_ALWAYS:
544  *	enables huge pages for the mount;
545  * SHMEM_HUGE_WITHIN_SIZE:
546  *	only allocate huge pages if the page will be fully within i_size,
547  *	also respect madvise() hints;
548  * SHMEM_HUGE_ADVISE:
549  *	only allocate huge pages if requested with madvise();
550  */
551 
552 #define SHMEM_HUGE_NEVER	0
553 #define SHMEM_HUGE_ALWAYS	1
554 #define SHMEM_HUGE_WITHIN_SIZE	2
555 #define SHMEM_HUGE_ADVISE	3
556 
557 /*
558  * Special values.
559  * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
560  *
561  * SHMEM_HUGE_DENY:
562  *	disables huge on shm_mnt and all mounts, for emergency use;
563  * SHMEM_HUGE_FORCE:
564  *	enables huge on shm_mnt and all mounts, w/o needing option, for testing;
565  *
566  */
567 #define SHMEM_HUGE_DENY		(-1)
568 #define SHMEM_HUGE_FORCE	(-2)
569 
570 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
571 /* ifdef here to avoid bloating shmem.o when not necessary */
572 
573 static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
574 static int tmpfs_huge __read_mostly = SHMEM_HUGE_NEVER;
575 
shmem_get_orders_within_size(struct inode * inode,unsigned long within_size_orders,pgoff_t index,loff_t write_end)576 static unsigned int shmem_get_orders_within_size(struct inode *inode,
577 		unsigned long within_size_orders, pgoff_t index,
578 		loff_t write_end)
579 {
580 	pgoff_t aligned_index;
581 	unsigned long order;
582 	loff_t i_size;
583 
584 	order = highest_order(within_size_orders);
585 	while (within_size_orders) {
586 		aligned_index = round_up(index + 1, 1 << order);
587 		i_size = max(write_end, i_size_read(inode));
588 		i_size = round_up(i_size, PAGE_SIZE);
589 		if (i_size >> PAGE_SHIFT >= aligned_index)
590 			return within_size_orders;
591 
592 		order = next_order(&within_size_orders, order);
593 	}
594 
595 	return 0;
596 }
597 
shmem_huge_global_enabled(struct inode * inode,pgoff_t index,loff_t write_end,bool shmem_huge_force,struct vm_area_struct * vma,vm_flags_t vm_flags)598 static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
599 					      loff_t write_end, bool shmem_huge_force,
600 					      struct vm_area_struct *vma,
601 					      vm_flags_t vm_flags)
602 {
603 	unsigned int maybe_pmd_order = HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER ?
604 		0 : BIT(HPAGE_PMD_ORDER);
605 	unsigned long within_size_orders;
606 
607 	if (!S_ISREG(inode->i_mode))
608 		return 0;
609 	if (shmem_huge == SHMEM_HUGE_DENY)
610 		return 0;
611 	if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
612 		return maybe_pmd_order;
613 
614 	/*
615 	 * The huge order allocation for anon shmem is controlled through
616 	 * the mTHP interface, so we still use PMD-sized huge order to
617 	 * check whether global control is enabled.
618 	 *
619 	 * For tmpfs mmap()'s huge order, we still use PMD-sized order to
620 	 * allocate huge pages due to lack of a write size hint.
621 	 *
622 	 * For tmpfs with 'huge=always' or 'huge=within_size' mount option,
623 	 * we will always try PMD-sized order first. If that failed, it will
624 	 * fall back to small large folios.
625 	 */
626 	switch (SHMEM_SB(inode->i_sb)->huge) {
627 	case SHMEM_HUGE_ALWAYS:
628 		if (vma)
629 			return maybe_pmd_order;
630 
631 		return THP_ORDERS_ALL_FILE_DEFAULT;
632 	case SHMEM_HUGE_WITHIN_SIZE:
633 		if (vma)
634 			within_size_orders = maybe_pmd_order;
635 		else
636 			within_size_orders = THP_ORDERS_ALL_FILE_DEFAULT;
637 
638 		within_size_orders = shmem_get_orders_within_size(inode, within_size_orders,
639 								  index, write_end);
640 		if (within_size_orders > 0)
641 			return within_size_orders;
642 
643 		fallthrough;
644 	case SHMEM_HUGE_ADVISE:
645 		if (vm_flags & VM_HUGEPAGE)
646 			return maybe_pmd_order;
647 		fallthrough;
648 	default:
649 		return 0;
650 	}
651 }
652 
shmem_parse_huge(const char * str)653 static int shmem_parse_huge(const char *str)
654 {
655 	int huge;
656 
657 	if (!str)
658 		return -EINVAL;
659 
660 	if (!strcmp(str, "never"))
661 		huge = SHMEM_HUGE_NEVER;
662 	else if (!strcmp(str, "always"))
663 		huge = SHMEM_HUGE_ALWAYS;
664 	else if (!strcmp(str, "within_size"))
665 		huge = SHMEM_HUGE_WITHIN_SIZE;
666 	else if (!strcmp(str, "advise"))
667 		huge = SHMEM_HUGE_ADVISE;
668 	else if (!strcmp(str, "deny"))
669 		huge = SHMEM_HUGE_DENY;
670 	else if (!strcmp(str, "force"))
671 		huge = SHMEM_HUGE_FORCE;
672 	else
673 		return -EINVAL;
674 
675 	if (!has_transparent_hugepage() &&
676 	    huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
677 		return -EINVAL;
678 
679 	/* Do not override huge allocation policy with non-PMD sized mTHP */
680 	if (huge == SHMEM_HUGE_FORCE &&
681 	    huge_shmem_orders_inherit != BIT(HPAGE_PMD_ORDER))
682 		return -EINVAL;
683 
684 	return huge;
685 }
686 
687 #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
shmem_format_huge(int huge)688 static const char *shmem_format_huge(int huge)
689 {
690 	switch (huge) {
691 	case SHMEM_HUGE_NEVER:
692 		return "never";
693 	case SHMEM_HUGE_ALWAYS:
694 		return "always";
695 	case SHMEM_HUGE_WITHIN_SIZE:
696 		return "within_size";
697 	case SHMEM_HUGE_ADVISE:
698 		return "advise";
699 	case SHMEM_HUGE_DENY:
700 		return "deny";
701 	case SHMEM_HUGE_FORCE:
702 		return "force";
703 	default:
704 		VM_BUG_ON(1);
705 		return "bad_val";
706 	}
707 }
708 #endif
709 
shmem_unused_huge_shrink(struct shmem_sb_info * sbinfo,struct shrink_control * sc,unsigned long nr_to_free)710 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
711 		struct shrink_control *sc, unsigned long nr_to_free)
712 {
713 	LIST_HEAD(list), *pos, *next;
714 	struct inode *inode;
715 	struct shmem_inode_info *info;
716 	struct folio *folio;
717 	unsigned long batch = sc ? sc->nr_to_scan : 128;
718 	unsigned long split = 0, freed = 0;
719 
720 	if (list_empty(&sbinfo->shrinklist))
721 		return SHRINK_STOP;
722 
723 	spin_lock(&sbinfo->shrinklist_lock);
724 	list_for_each_safe(pos, next, &sbinfo->shrinklist) {
725 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
726 
727 		/* pin the inode */
728 		inode = igrab(&info->vfs_inode);
729 
730 		/* inode is about to be evicted */
731 		if (!inode) {
732 			list_del_init(&info->shrinklist);
733 			goto next;
734 		}
735 
736 		list_move(&info->shrinklist, &list);
737 next:
738 		sbinfo->shrinklist_len--;
739 		if (!--batch)
740 			break;
741 	}
742 	spin_unlock(&sbinfo->shrinklist_lock);
743 
744 	list_for_each_safe(pos, next, &list) {
745 		pgoff_t next, end;
746 		loff_t i_size;
747 		int ret;
748 
749 		info = list_entry(pos, struct shmem_inode_info, shrinklist);
750 		inode = &info->vfs_inode;
751 
752 		if (nr_to_free && freed >= nr_to_free)
753 			goto move_back;
754 
755 		i_size = i_size_read(inode);
756 		folio = filemap_get_entry(inode->i_mapping, i_size / PAGE_SIZE);
757 		if (!folio || xa_is_value(folio))
758 			goto drop;
759 
760 		/* No large folio at the end of the file: nothing to split */
761 		if (!folio_test_large(folio)) {
762 			folio_put(folio);
763 			goto drop;
764 		}
765 
766 		/* Check if there is anything to gain from splitting */
767 		next = folio_next_index(folio);
768 		end = shmem_fallocend(inode, DIV_ROUND_UP(i_size, PAGE_SIZE));
769 		if (end <= folio->index || end >= next) {
770 			folio_put(folio);
771 			goto drop;
772 		}
773 
774 		/*
775 		 * Move the inode on the list back to shrinklist if we failed
776 		 * to lock the page at this time.
777 		 *
778 		 * Waiting for the lock may lead to deadlock in the
779 		 * reclaim path.
780 		 */
781 		if (!folio_trylock(folio)) {
782 			folio_put(folio);
783 			goto move_back;
784 		}
785 
786 		ret = split_folio(folio);
787 		folio_unlock(folio);
788 		folio_put(folio);
789 
790 		/* If split failed move the inode on the list back to shrinklist */
791 		if (ret)
792 			goto move_back;
793 
794 		freed += next - end;
795 		split++;
796 drop:
797 		list_del_init(&info->shrinklist);
798 		goto put;
799 move_back:
800 		/*
801 		 * Make sure the inode is either on the global list or deleted
802 		 * from any local list before iput() since it could be deleted
803 		 * in another thread once we put the inode (then the local list
804 		 * is corrupted).
805 		 */
806 		spin_lock(&sbinfo->shrinklist_lock);
807 		list_move(&info->shrinklist, &sbinfo->shrinklist);
808 		sbinfo->shrinklist_len++;
809 		spin_unlock(&sbinfo->shrinklist_lock);
810 put:
811 		iput(inode);
812 	}
813 
814 	return split;
815 }
816 
shmem_unused_huge_scan(struct super_block * sb,struct shrink_control * sc)817 static long shmem_unused_huge_scan(struct super_block *sb,
818 		struct shrink_control *sc)
819 {
820 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
821 
822 	if (!READ_ONCE(sbinfo->shrinklist_len))
823 		return SHRINK_STOP;
824 
825 	return shmem_unused_huge_shrink(sbinfo, sc, 0);
826 }
827 
shmem_unused_huge_count(struct super_block * sb,struct shrink_control * sc)828 static long shmem_unused_huge_count(struct super_block *sb,
829 		struct shrink_control *sc)
830 {
831 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
832 	return READ_ONCE(sbinfo->shrinklist_len);
833 }
834 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
835 
836 #define shmem_huge SHMEM_HUGE_DENY
837 
shmem_unused_huge_shrink(struct shmem_sb_info * sbinfo,struct shrink_control * sc,unsigned long nr_to_free)838 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
839 		struct shrink_control *sc, unsigned long nr_to_free)
840 {
841 	return 0;
842 }
843 
shmem_huge_global_enabled(struct inode * inode,pgoff_t index,loff_t write_end,bool shmem_huge_force,struct vm_area_struct * vma,vm_flags_t vm_flags)844 static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
845 					      loff_t write_end, bool shmem_huge_force,
846 					      struct vm_area_struct *vma,
847 					      vm_flags_t vm_flags)
848 {
849 	return 0;
850 }
851 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
852 
shmem_update_stats(struct folio * folio,int nr_pages)853 static void shmem_update_stats(struct folio *folio, int nr_pages)
854 {
855 	if (folio_test_pmd_mappable(folio))
856 		__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr_pages);
857 	__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
858 	__lruvec_stat_mod_folio(folio, NR_SHMEM, nr_pages);
859 }
860 
861 /*
862  * Somewhat like filemap_add_folio, but error if expected item has gone.
863  */
shmem_add_to_page_cache(struct folio * folio,struct address_space * mapping,pgoff_t index,void * expected,gfp_t gfp)864 static int shmem_add_to_page_cache(struct folio *folio,
865 				   struct address_space *mapping,
866 				   pgoff_t index, void *expected, gfp_t gfp)
867 {
868 	XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
869 	unsigned long nr = folio_nr_pages(folio);
870 	swp_entry_t iter, swap;
871 	void *entry;
872 
873 	VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
874 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
875 	VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
876 
877 	folio_ref_add(folio, nr);
878 	folio->mapping = mapping;
879 	folio->index = index;
880 
881 	gfp &= GFP_RECLAIM_MASK;
882 	folio_throttle_swaprate(folio, gfp);
883 	swap = radix_to_swp_entry(expected);
884 
885 	do {
886 		iter = swap;
887 		xas_lock_irq(&xas);
888 		xas_for_each_conflict(&xas, entry) {
889 			/*
890 			 * The range must either be empty, or filled with
891 			 * expected swap entries. Shmem swap entries are never
892 			 * partially freed without split of both entry and
893 			 * folio, so there shouldn't be any holes.
894 			 */
895 			if (!expected || entry != swp_to_radix_entry(iter)) {
896 				xas_set_err(&xas, -EEXIST);
897 				goto unlock;
898 			}
899 			iter.val += 1 << xas_get_order(&xas);
900 		}
901 		if (expected && iter.val - nr != swap.val) {
902 			xas_set_err(&xas, -EEXIST);
903 			goto unlock;
904 		}
905 		xas_store(&xas, folio);
906 		if (xas_error(&xas))
907 			goto unlock;
908 		shmem_update_stats(folio, nr);
909 		mapping->nrpages += nr;
910 unlock:
911 		xas_unlock_irq(&xas);
912 	} while (xas_nomem(&xas, gfp));
913 
914 	if (xas_error(&xas)) {
915 		folio->mapping = NULL;
916 		folio_ref_sub(folio, nr);
917 		return xas_error(&xas);
918 	}
919 
920 	return 0;
921 }
922 
923 /*
924  * Somewhat like filemap_remove_folio, but substitutes swap for @folio.
925  */
shmem_delete_from_page_cache(struct folio * folio,void * radswap)926 static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
927 {
928 	struct address_space *mapping = folio->mapping;
929 	long nr = folio_nr_pages(folio);
930 	int error;
931 
932 	xa_lock_irq(&mapping->i_pages);
933 	error = shmem_replace_entry(mapping, folio->index, folio, radswap);
934 	folio->mapping = NULL;
935 	mapping->nrpages -= nr;
936 	shmem_update_stats(folio, -nr);
937 	xa_unlock_irq(&mapping->i_pages);
938 	folio_put_refs(folio, nr);
939 	BUG_ON(error);
940 }
941 
942 /*
943  * Remove swap entry from page cache, free the swap and its page cache. Returns
944  * the number of pages being freed. 0 means entry not found in XArray (0 pages
945  * being freed).
946  */
shmem_free_swap(struct address_space * mapping,pgoff_t index,void * radswap)947 static long shmem_free_swap(struct address_space *mapping,
948 			    pgoff_t index, void *radswap)
949 {
950 	int order = xa_get_order(&mapping->i_pages, index);
951 	void *old;
952 
953 	old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
954 	if (old != radswap)
955 		return 0;
956 	free_swap_and_cache_nr(radix_to_swp_entry(radswap), 1 << order);
957 
958 	return 1 << order;
959 }
960 
961 /*
962  * Determine (in bytes) how many of the shmem object's pages mapped by the
963  * given offsets are swapped out.
964  *
965  * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
966  * as long as the inode doesn't go away and racy results are not a problem.
967  */
shmem_partial_swap_usage(struct address_space * mapping,pgoff_t start,pgoff_t end)968 unsigned long shmem_partial_swap_usage(struct address_space *mapping,
969 						pgoff_t start, pgoff_t end)
970 {
971 	XA_STATE(xas, &mapping->i_pages, start);
972 	struct folio *folio;
973 	unsigned long swapped = 0;
974 	unsigned long max = end - 1;
975 
976 	rcu_read_lock();
977 	xas_for_each(&xas, folio, max) {
978 		if (xas_retry(&xas, folio))
979 			continue;
980 		if (xa_is_value(folio))
981 			swapped += 1 << xas_get_order(&xas);
982 		if (xas.xa_index == max)
983 			break;
984 		if (need_resched()) {
985 			xas_pause(&xas);
986 			cond_resched_rcu();
987 		}
988 	}
989 	rcu_read_unlock();
990 
991 	return swapped << PAGE_SHIFT;
992 }
993 
994 /*
995  * Determine (in bytes) how many of the shmem object's pages mapped by the
996  * given vma is swapped out.
997  *
998  * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
999  * as long as the inode doesn't go away and racy results are not a problem.
1000  */
shmem_swap_usage(struct vm_area_struct * vma)1001 unsigned long shmem_swap_usage(struct vm_area_struct *vma)
1002 {
1003 	struct inode *inode = file_inode(vma->vm_file);
1004 	struct shmem_inode_info *info = SHMEM_I(inode);
1005 	struct address_space *mapping = inode->i_mapping;
1006 	unsigned long swapped;
1007 
1008 	/* Be careful as we don't hold info->lock */
1009 	swapped = READ_ONCE(info->swapped);
1010 
1011 	/*
1012 	 * The easier cases are when the shmem object has nothing in swap, or
1013 	 * the vma maps it whole. Then we can simply use the stats that we
1014 	 * already track.
1015 	 */
1016 	if (!swapped)
1017 		return 0;
1018 
1019 	if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
1020 		return swapped << PAGE_SHIFT;
1021 
1022 	/* Here comes the more involved part */
1023 	return shmem_partial_swap_usage(mapping, vma->vm_pgoff,
1024 					vma->vm_pgoff + vma_pages(vma));
1025 }
1026 
1027 /*
1028  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
1029  */
shmem_unlock_mapping(struct address_space * mapping)1030 void shmem_unlock_mapping(struct address_space *mapping)
1031 {
1032 	struct folio_batch fbatch;
1033 	pgoff_t index = 0;
1034 
1035 	folio_batch_init(&fbatch);
1036 	/*
1037 	 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
1038 	 */
1039 	while (!mapping_unevictable(mapping) &&
1040 	       filemap_get_folios(mapping, &index, ~0UL, &fbatch)) {
1041 		check_move_unevictable_folios(&fbatch);
1042 		folio_batch_release(&fbatch);
1043 		cond_resched();
1044 	}
1045 }
1046 
shmem_get_partial_folio(struct inode * inode,pgoff_t index)1047 static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
1048 {
1049 	struct folio *folio;
1050 
1051 	/*
1052 	 * At first avoid shmem_get_folio(,,,SGP_READ): that fails
1053 	 * beyond i_size, and reports fallocated folios as holes.
1054 	 */
1055 	folio = filemap_get_entry(inode->i_mapping, index);
1056 	if (!folio)
1057 		return folio;
1058 	if (!xa_is_value(folio)) {
1059 		folio_lock(folio);
1060 		if (folio->mapping == inode->i_mapping)
1061 			return folio;
1062 		/* The folio has been swapped out */
1063 		folio_unlock(folio);
1064 		folio_put(folio);
1065 	}
1066 	/*
1067 	 * But read a folio back from swap if any of it is within i_size
1068 	 * (although in some cases this is just a waste of time).
1069 	 */
1070 	folio = NULL;
1071 	shmem_get_folio(inode, index, 0, &folio, SGP_READ);
1072 	return folio;
1073 }
1074 
1075 /*
1076  * Remove range of pages and swap entries from page cache, and free them.
1077  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
1078  */
shmem_undo_range(struct inode * inode,loff_t lstart,loff_t lend,bool unfalloc)1079 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
1080 								 bool unfalloc)
1081 {
1082 	struct address_space *mapping = inode->i_mapping;
1083 	struct shmem_inode_info *info = SHMEM_I(inode);
1084 	pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
1085 	pgoff_t end = (lend + 1) >> PAGE_SHIFT;
1086 	struct folio_batch fbatch;
1087 	pgoff_t indices[PAGEVEC_SIZE];
1088 	struct folio *folio;
1089 	bool same_folio;
1090 	long nr_swaps_freed = 0;
1091 	pgoff_t index;
1092 	int i;
1093 
1094 	if (lend == -1)
1095 		end = -1;	/* unsigned, so actually very big */
1096 
1097 	if (info->fallocend > start && info->fallocend <= end && !unfalloc)
1098 		info->fallocend = start;
1099 
1100 	folio_batch_init(&fbatch);
1101 	index = start;
1102 	while (index < end && find_lock_entries(mapping, &index, end - 1,
1103 			&fbatch, indices)) {
1104 		for (i = 0; i < folio_batch_count(&fbatch); i++) {
1105 			folio = fbatch.folios[i];
1106 
1107 			if (xa_is_value(folio)) {
1108 				if (unfalloc)
1109 					continue;
1110 				nr_swaps_freed += shmem_free_swap(mapping,
1111 							indices[i], folio);
1112 				continue;
1113 			}
1114 
1115 			if (!unfalloc || !folio_test_uptodate(folio))
1116 				truncate_inode_folio(mapping, folio);
1117 			folio_unlock(folio);
1118 		}
1119 		folio_batch_remove_exceptionals(&fbatch);
1120 		folio_batch_release(&fbatch);
1121 		cond_resched();
1122 	}
1123 
1124 	/*
1125 	 * When undoing a failed fallocate, we want none of the partial folio
1126 	 * zeroing and splitting below, but shall want to truncate the whole
1127 	 * folio when !uptodate indicates that it was added by this fallocate,
1128 	 * even when [lstart, lend] covers only a part of the folio.
1129 	 */
1130 	if (unfalloc)
1131 		goto whole_folios;
1132 
1133 	same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
1134 	folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
1135 	if (folio) {
1136 		same_folio = lend < folio_pos(folio) + folio_size(folio);
1137 		folio_mark_dirty(folio);
1138 		if (!truncate_inode_partial_folio(folio, lstart, lend)) {
1139 			start = folio_next_index(folio);
1140 			if (same_folio)
1141 				end = folio->index;
1142 		}
1143 		folio_unlock(folio);
1144 		folio_put(folio);
1145 		folio = NULL;
1146 	}
1147 
1148 	if (!same_folio)
1149 		folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT);
1150 	if (folio) {
1151 		folio_mark_dirty(folio);
1152 		if (!truncate_inode_partial_folio(folio, lstart, lend))
1153 			end = folio->index;
1154 		folio_unlock(folio);
1155 		folio_put(folio);
1156 	}
1157 
1158 whole_folios:
1159 
1160 	index = start;
1161 	while (index < end) {
1162 		cond_resched();
1163 
1164 		if (!find_get_entries(mapping, &index, end - 1, &fbatch,
1165 				indices)) {
1166 			/* If all gone or hole-punch or unfalloc, we're done */
1167 			if (index == start || end != -1)
1168 				break;
1169 			/* But if truncating, restart to make sure all gone */
1170 			index = start;
1171 			continue;
1172 		}
1173 		for (i = 0; i < folio_batch_count(&fbatch); i++) {
1174 			folio = fbatch.folios[i];
1175 
1176 			if (xa_is_value(folio)) {
1177 				long swaps_freed;
1178 
1179 				if (unfalloc)
1180 					continue;
1181 				swaps_freed = shmem_free_swap(mapping, indices[i], folio);
1182 				if (!swaps_freed) {
1183 					/* Swap was replaced by page: retry */
1184 					index = indices[i];
1185 					break;
1186 				}
1187 				nr_swaps_freed += swaps_freed;
1188 				continue;
1189 			}
1190 
1191 			folio_lock(folio);
1192 
1193 			if (!unfalloc || !folio_test_uptodate(folio)) {
1194 				if (folio_mapping(folio) != mapping) {
1195 					/* Page was replaced by swap: retry */
1196 					folio_unlock(folio);
1197 					index = indices[i];
1198 					break;
1199 				}
1200 				VM_BUG_ON_FOLIO(folio_test_writeback(folio),
1201 						folio);
1202 
1203 				if (!folio_test_large(folio)) {
1204 					truncate_inode_folio(mapping, folio);
1205 				} else if (truncate_inode_partial_folio(folio, lstart, lend)) {
1206 					/*
1207 					 * If we split a page, reset the loop so
1208 					 * that we pick up the new sub pages.
1209 					 * Otherwise the THP was entirely
1210 					 * dropped or the target range was
1211 					 * zeroed, so just continue the loop as
1212 					 * is.
1213 					 */
1214 					if (!folio_test_large(folio)) {
1215 						folio_unlock(folio);
1216 						index = start;
1217 						break;
1218 					}
1219 				}
1220 			}
1221 			folio_unlock(folio);
1222 		}
1223 		folio_batch_remove_exceptionals(&fbatch);
1224 		folio_batch_release(&fbatch);
1225 	}
1226 
1227 	shmem_recalc_inode(inode, 0, -nr_swaps_freed);
1228 }
1229 
shmem_truncate_range(struct inode * inode,loff_t lstart,loff_t lend)1230 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1231 {
1232 	shmem_undo_range(inode, lstart, lend, false);
1233 	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1234 	inode_inc_iversion(inode);
1235 }
1236 EXPORT_SYMBOL_GPL(shmem_truncate_range);
1237 
shmem_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)1238 static int shmem_getattr(struct mnt_idmap *idmap,
1239 			 const struct path *path, struct kstat *stat,
1240 			 u32 request_mask, unsigned int query_flags)
1241 {
1242 	struct inode *inode = path->dentry->d_inode;
1243 	struct shmem_inode_info *info = SHMEM_I(inode);
1244 
1245 	if (info->alloced - info->swapped != inode->i_mapping->nrpages)
1246 		shmem_recalc_inode(inode, 0, 0);
1247 
1248 	if (info->fsflags & FS_APPEND_FL)
1249 		stat->attributes |= STATX_ATTR_APPEND;
1250 	if (info->fsflags & FS_IMMUTABLE_FL)
1251 		stat->attributes |= STATX_ATTR_IMMUTABLE;
1252 	if (info->fsflags & FS_NODUMP_FL)
1253 		stat->attributes |= STATX_ATTR_NODUMP;
1254 	stat->attributes_mask |= (STATX_ATTR_APPEND |
1255 			STATX_ATTR_IMMUTABLE |
1256 			STATX_ATTR_NODUMP);
1257 	generic_fillattr(idmap, request_mask, inode, stat);
1258 
1259 	if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0))
1260 		stat->blksize = HPAGE_PMD_SIZE;
1261 
1262 	if (request_mask & STATX_BTIME) {
1263 		stat->result_mask |= STATX_BTIME;
1264 		stat->btime.tv_sec = info->i_crtime.tv_sec;
1265 		stat->btime.tv_nsec = info->i_crtime.tv_nsec;
1266 	}
1267 
1268 	return 0;
1269 }
1270 
shmem_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)1271 static int shmem_setattr(struct mnt_idmap *idmap,
1272 			 struct dentry *dentry, struct iattr *attr)
1273 {
1274 	struct inode *inode = d_inode(dentry);
1275 	struct shmem_inode_info *info = SHMEM_I(inode);
1276 	int error;
1277 	bool update_mtime = false;
1278 	bool update_ctime = true;
1279 
1280 	error = setattr_prepare(idmap, dentry, attr);
1281 	if (error)
1282 		return error;
1283 
1284 	if ((info->seals & F_SEAL_EXEC) && (attr->ia_valid & ATTR_MODE)) {
1285 		if ((inode->i_mode ^ attr->ia_mode) & 0111) {
1286 			return -EPERM;
1287 		}
1288 	}
1289 
1290 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1291 		loff_t oldsize = inode->i_size;
1292 		loff_t newsize = attr->ia_size;
1293 
1294 		/* protected by i_rwsem */
1295 		if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1296 		    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1297 			return -EPERM;
1298 
1299 		if (newsize != oldsize) {
1300 			error = shmem_reacct_size(SHMEM_I(inode)->flags,
1301 					oldsize, newsize);
1302 			if (error)
1303 				return error;
1304 			i_size_write(inode, newsize);
1305 			update_mtime = true;
1306 		} else {
1307 			update_ctime = false;
1308 		}
1309 		if (newsize <= oldsize) {
1310 			loff_t holebegin = round_up(newsize, PAGE_SIZE);
1311 			if (oldsize > holebegin)
1312 				unmap_mapping_range(inode->i_mapping,
1313 							holebegin, 0, 1);
1314 			if (info->alloced)
1315 				shmem_truncate_range(inode,
1316 							newsize, (loff_t)-1);
1317 			/* unmap again to remove racily COWed private pages */
1318 			if (oldsize > holebegin)
1319 				unmap_mapping_range(inode->i_mapping,
1320 							holebegin, 0, 1);
1321 		}
1322 	}
1323 
1324 	if (is_quota_modification(idmap, inode, attr)) {
1325 		error = dquot_initialize(inode);
1326 		if (error)
1327 			return error;
1328 	}
1329 
1330 	/* Transfer quota accounting */
1331 	if (i_uid_needs_update(idmap, attr, inode) ||
1332 	    i_gid_needs_update(idmap, attr, inode)) {
1333 		error = dquot_transfer(idmap, inode, attr);
1334 		if (error)
1335 			return error;
1336 	}
1337 
1338 	setattr_copy(idmap, inode, attr);
1339 	if (attr->ia_valid & ATTR_MODE)
1340 		error = posix_acl_chmod(idmap, dentry, inode->i_mode);
1341 	if (!error && update_ctime) {
1342 		inode_set_ctime_current(inode);
1343 		if (update_mtime)
1344 			inode_set_mtime_to_ts(inode, inode_get_ctime(inode));
1345 		inode_inc_iversion(inode);
1346 	}
1347 	return error;
1348 }
1349 
shmem_evict_inode(struct inode * inode)1350 static void shmem_evict_inode(struct inode *inode)
1351 {
1352 	struct shmem_inode_info *info = SHMEM_I(inode);
1353 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1354 	size_t freed = 0;
1355 
1356 	if (shmem_mapping(inode->i_mapping)) {
1357 		shmem_unacct_size(info->flags, inode->i_size);
1358 		inode->i_size = 0;
1359 		mapping_set_exiting(inode->i_mapping);
1360 		shmem_truncate_range(inode, 0, (loff_t)-1);
1361 		if (!list_empty(&info->shrinklist)) {
1362 			spin_lock(&sbinfo->shrinklist_lock);
1363 			if (!list_empty(&info->shrinklist)) {
1364 				list_del_init(&info->shrinklist);
1365 				sbinfo->shrinklist_len--;
1366 			}
1367 			spin_unlock(&sbinfo->shrinklist_lock);
1368 		}
1369 		while (!list_empty(&info->swaplist)) {
1370 			/* Wait while shmem_unuse() is scanning this inode... */
1371 			wait_var_event(&info->stop_eviction,
1372 				       !atomic_read(&info->stop_eviction));
1373 			spin_lock(&shmem_swaplist_lock);
1374 			/* ...but beware of the race if we peeked too early */
1375 			if (!atomic_read(&info->stop_eviction))
1376 				list_del_init(&info->swaplist);
1377 			spin_unlock(&shmem_swaplist_lock);
1378 		}
1379 	}
1380 
1381 	simple_xattrs_free(&info->xattrs, sbinfo->max_inodes ? &freed : NULL);
1382 	shmem_free_inode(inode->i_sb, freed);
1383 	WARN_ON(inode->i_blocks);
1384 	clear_inode(inode);
1385 #ifdef CONFIG_TMPFS_QUOTA
1386 	dquot_free_inode(inode);
1387 	dquot_drop(inode);
1388 #endif
1389 }
1390 
shmem_find_swap_entries(struct address_space * mapping,pgoff_t start,struct folio_batch * fbatch,pgoff_t * indices,unsigned int type)1391 static unsigned int shmem_find_swap_entries(struct address_space *mapping,
1392 				pgoff_t start, struct folio_batch *fbatch,
1393 				pgoff_t *indices, unsigned int type)
1394 {
1395 	XA_STATE(xas, &mapping->i_pages, start);
1396 	struct folio *folio;
1397 	swp_entry_t entry;
1398 
1399 	rcu_read_lock();
1400 	xas_for_each(&xas, folio, ULONG_MAX) {
1401 		if (xas_retry(&xas, folio))
1402 			continue;
1403 
1404 		if (!xa_is_value(folio))
1405 			continue;
1406 
1407 		entry = radix_to_swp_entry(folio);
1408 		/*
1409 		 * swapin error entries can be found in the mapping. But they're
1410 		 * deliberately ignored here as we've done everything we can do.
1411 		 */
1412 		if (swp_type(entry) != type)
1413 			continue;
1414 
1415 		indices[folio_batch_count(fbatch)] = xas.xa_index;
1416 		if (!folio_batch_add(fbatch, folio))
1417 			break;
1418 
1419 		if (need_resched()) {
1420 			xas_pause(&xas);
1421 			cond_resched_rcu();
1422 		}
1423 	}
1424 	rcu_read_unlock();
1425 
1426 	return folio_batch_count(fbatch);
1427 }
1428 
1429 /*
1430  * Move the swapped pages for an inode to page cache. Returns the count
1431  * of pages swapped in, or the error in case of failure.
1432  */
shmem_unuse_swap_entries(struct inode * inode,struct folio_batch * fbatch,pgoff_t * indices)1433 static int shmem_unuse_swap_entries(struct inode *inode,
1434 		struct folio_batch *fbatch, pgoff_t *indices)
1435 {
1436 	int i = 0;
1437 	int ret = 0;
1438 	int error = 0;
1439 	struct address_space *mapping = inode->i_mapping;
1440 
1441 	for (i = 0; i < folio_batch_count(fbatch); i++) {
1442 		struct folio *folio = fbatch->folios[i];
1443 
1444 		error = shmem_swapin_folio(inode, indices[i], &folio, SGP_CACHE,
1445 					mapping_gfp_mask(mapping), NULL, NULL);
1446 		if (error == 0) {
1447 			folio_unlock(folio);
1448 			folio_put(folio);
1449 			ret++;
1450 		}
1451 		if (error == -ENOMEM)
1452 			break;
1453 		error = 0;
1454 	}
1455 	return error ? error : ret;
1456 }
1457 
1458 /*
1459  * If swap found in inode, free it and move page from swapcache to filecache.
1460  */
shmem_unuse_inode(struct inode * inode,unsigned int type)1461 static int shmem_unuse_inode(struct inode *inode, unsigned int type)
1462 {
1463 	struct address_space *mapping = inode->i_mapping;
1464 	pgoff_t start = 0;
1465 	struct folio_batch fbatch;
1466 	pgoff_t indices[PAGEVEC_SIZE];
1467 	int ret = 0;
1468 
1469 	do {
1470 		folio_batch_init(&fbatch);
1471 		if (!shmem_find_swap_entries(mapping, start, &fbatch,
1472 					     indices, type)) {
1473 			ret = 0;
1474 			break;
1475 		}
1476 
1477 		ret = shmem_unuse_swap_entries(inode, &fbatch, indices);
1478 		if (ret < 0)
1479 			break;
1480 
1481 		start = indices[folio_batch_count(&fbatch) - 1];
1482 	} while (true);
1483 
1484 	return ret;
1485 }
1486 
1487 /*
1488  * Read all the shared memory data that resides in the swap
1489  * device 'type' back into memory, so the swap device can be
1490  * unused.
1491  */
shmem_unuse(unsigned int type)1492 int shmem_unuse(unsigned int type)
1493 {
1494 	struct shmem_inode_info *info, *next;
1495 	int error = 0;
1496 
1497 	if (list_empty(&shmem_swaplist))
1498 		return 0;
1499 
1500 	spin_lock(&shmem_swaplist_lock);
1501 start_over:
1502 	list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1503 		if (!info->swapped) {
1504 			list_del_init(&info->swaplist);
1505 			continue;
1506 		}
1507 		/*
1508 		 * Drop the swaplist mutex while searching the inode for swap;
1509 		 * but before doing so, make sure shmem_evict_inode() will not
1510 		 * remove placeholder inode from swaplist, nor let it be freed
1511 		 * (igrab() would protect from unlink, but not from unmount).
1512 		 */
1513 		atomic_inc(&info->stop_eviction);
1514 		spin_unlock(&shmem_swaplist_lock);
1515 
1516 		error = shmem_unuse_inode(&info->vfs_inode, type);
1517 		cond_resched();
1518 
1519 		spin_lock(&shmem_swaplist_lock);
1520 		if (atomic_dec_and_test(&info->stop_eviction))
1521 			wake_up_var(&info->stop_eviction);
1522 		if (error)
1523 			break;
1524 		if (list_empty(&info->swaplist))
1525 			goto start_over;
1526 		next = list_next_entry(info, swaplist);
1527 		if (!info->swapped)
1528 			list_del_init(&info->swaplist);
1529 	}
1530 	spin_unlock(&shmem_swaplist_lock);
1531 
1532 	return error;
1533 }
1534 
1535 /**
1536  * shmem_writeout - Write the folio to swap
1537  * @folio: The folio to write
1538  * @plug: swap plug
1539  * @folio_list: list to put back folios on split
1540  *
1541  * Move the folio from the page cache to the swap cache.
1542  */
shmem_writeout(struct folio * folio,struct swap_iocb ** plug,struct list_head * folio_list)1543 int shmem_writeout(struct folio *folio, struct swap_iocb **plug,
1544 		struct list_head *folio_list)
1545 {
1546 	struct address_space *mapping = folio->mapping;
1547 	struct inode *inode = mapping->host;
1548 	struct shmem_inode_info *info = SHMEM_I(inode);
1549 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1550 	pgoff_t index;
1551 	int nr_pages;
1552 	bool split = false;
1553 
1554 	if ((info->flags & VM_LOCKED) || sbinfo->noswap)
1555 		goto redirty;
1556 
1557 	if (!total_swap_pages)
1558 		goto redirty;
1559 
1560 	/*
1561 	 * If CONFIG_THP_SWAP is not enabled, the large folio should be
1562 	 * split when swapping.
1563 	 *
1564 	 * And shrinkage of pages beyond i_size does not split swap, so
1565 	 * swapout of a large folio crossing i_size needs to split too
1566 	 * (unless fallocate has been used to preallocate beyond EOF).
1567 	 */
1568 	if (folio_test_large(folio)) {
1569 		index = shmem_fallocend(inode,
1570 			DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE));
1571 		if ((index > folio->index && index < folio_next_index(folio)) ||
1572 		    !IS_ENABLED(CONFIG_THP_SWAP))
1573 			split = true;
1574 	}
1575 
1576 	if (split) {
1577 try_split:
1578 		/* Ensure the subpages are still dirty */
1579 		folio_test_set_dirty(folio);
1580 		if (split_folio_to_list(folio, folio_list))
1581 			goto redirty;
1582 		folio_clear_dirty(folio);
1583 	}
1584 
1585 	index = folio->index;
1586 	nr_pages = folio_nr_pages(folio);
1587 
1588 	/*
1589 	 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1590 	 * value into swapfile.c, the only way we can correctly account for a
1591 	 * fallocated folio arriving here is now to initialize it and write it.
1592 	 *
1593 	 * That's okay for a folio already fallocated earlier, but if we have
1594 	 * not yet completed the fallocation, then (a) we want to keep track
1595 	 * of this folio in case we have to undo it, and (b) it may not be a
1596 	 * good idea to continue anyway, once we're pushing into swap.  So
1597 	 * reactivate the folio, and let shmem_fallocate() quit when too many.
1598 	 */
1599 	if (!folio_test_uptodate(folio)) {
1600 		if (inode->i_private) {
1601 			struct shmem_falloc *shmem_falloc;
1602 			spin_lock(&inode->i_lock);
1603 			shmem_falloc = inode->i_private;
1604 			if (shmem_falloc &&
1605 			    !shmem_falloc->waitq &&
1606 			    index >= shmem_falloc->start &&
1607 			    index < shmem_falloc->next)
1608 				shmem_falloc->nr_unswapped += nr_pages;
1609 			else
1610 				shmem_falloc = NULL;
1611 			spin_unlock(&inode->i_lock);
1612 			if (shmem_falloc)
1613 				goto redirty;
1614 		}
1615 		folio_zero_range(folio, 0, folio_size(folio));
1616 		flush_dcache_folio(folio);
1617 		folio_mark_uptodate(folio);
1618 	}
1619 
1620 	if (!folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
1621 		bool first_swapped = shmem_recalc_inode(inode, 0, nr_pages);
1622 		int error;
1623 
1624 		/*
1625 		 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1626 		 * if it's not already there.  Do it now before the folio is
1627 		 * removed from page cache, when its pagelock no longer
1628 		 * protects the inode from eviction.  And do it now, after
1629 		 * we've incremented swapped, because shmem_unuse() will
1630 		 * prune a !swapped inode from the swaplist.
1631 		 */
1632 		if (first_swapped) {
1633 			spin_lock(&shmem_swaplist_lock);
1634 			if (list_empty(&info->swaplist))
1635 				list_add(&info->swaplist, &shmem_swaplist);
1636 			spin_unlock(&shmem_swaplist_lock);
1637 		}
1638 
1639 		swap_shmem_alloc(folio->swap, nr_pages);
1640 		shmem_delete_from_page_cache(folio, swp_to_radix_entry(folio->swap));
1641 
1642 		BUG_ON(folio_mapped(folio));
1643 		error = swap_writeout(folio, plug);
1644 		if (error != AOP_WRITEPAGE_ACTIVATE) {
1645 			/* folio has been unlocked */
1646 			return error;
1647 		}
1648 
1649 		/*
1650 		 * The intention here is to avoid holding on to the swap when
1651 		 * zswap was unable to compress and unable to writeback; but
1652 		 * it will be appropriate if other reactivate cases are added.
1653 		 */
1654 		error = shmem_add_to_page_cache(folio, mapping, index,
1655 				swp_to_radix_entry(folio->swap),
1656 				__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
1657 		/* Swap entry might be erased by racing shmem_free_swap() */
1658 		if (!error) {
1659 			shmem_recalc_inode(inode, 0, -nr_pages);
1660 			swap_free_nr(folio->swap, nr_pages);
1661 		}
1662 
1663 		/*
1664 		 * The swap_cache_del_folio() below could be left for
1665 		 * shrink_folio_list()'s folio_free_swap() to dispose of;
1666 		 * but I'm a little nervous about letting this folio out of
1667 		 * shmem_writeout() in a hybrid half-tmpfs-half-swap state
1668 		 * e.g. folio_mapping(folio) might give an unexpected answer.
1669 		 */
1670 		swap_cache_del_folio(folio);
1671 		goto redirty;
1672 	}
1673 	if (nr_pages > 1)
1674 		goto try_split;
1675 redirty:
1676 	folio_mark_dirty(folio);
1677 	return AOP_WRITEPAGE_ACTIVATE;	/* Return with folio locked */
1678 }
1679 EXPORT_SYMBOL_GPL(shmem_writeout);
1680 
1681 #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
shmem_show_mpol(struct seq_file * seq,struct mempolicy * mpol)1682 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1683 {
1684 	char buffer[64];
1685 
1686 	if (!mpol || mpol->mode == MPOL_DEFAULT)
1687 		return;		/* show nothing */
1688 
1689 	mpol_to_str(buffer, sizeof(buffer), mpol);
1690 
1691 	seq_printf(seq, ",mpol=%s", buffer);
1692 }
1693 
shmem_get_sbmpol(struct shmem_sb_info * sbinfo)1694 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1695 {
1696 	struct mempolicy *mpol = NULL;
1697 	if (sbinfo->mpol) {
1698 		raw_spin_lock(&sbinfo->stat_lock);	/* prevent replace/use races */
1699 		mpol = sbinfo->mpol;
1700 		mpol_get(mpol);
1701 		raw_spin_unlock(&sbinfo->stat_lock);
1702 	}
1703 	return mpol;
1704 }
1705 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
shmem_show_mpol(struct seq_file * seq,struct mempolicy * mpol)1706 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1707 {
1708 }
shmem_get_sbmpol(struct shmem_sb_info * sbinfo)1709 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1710 {
1711 	return NULL;
1712 }
1713 #endif /* CONFIG_NUMA && CONFIG_TMPFS */
1714 
1715 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
1716 			pgoff_t index, unsigned int order, pgoff_t *ilx);
1717 
shmem_swapin_cluster(swp_entry_t swap,gfp_t gfp,struct shmem_inode_info * info,pgoff_t index)1718 static struct folio *shmem_swapin_cluster(swp_entry_t swap, gfp_t gfp,
1719 			struct shmem_inode_info *info, pgoff_t index)
1720 {
1721 	struct mempolicy *mpol;
1722 	pgoff_t ilx;
1723 	struct folio *folio;
1724 
1725 	mpol = shmem_get_pgoff_policy(info, index, 0, &ilx);
1726 	folio = swap_cluster_readahead(swap, gfp, mpol, ilx);
1727 	mpol_cond_put(mpol);
1728 
1729 	return folio;
1730 }
1731 
1732 /*
1733  * Make sure huge_gfp is always more limited than limit_gfp.
1734  * Some of the flags set permissions, while others set limitations.
1735  */
limit_gfp_mask(gfp_t huge_gfp,gfp_t limit_gfp)1736 static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
1737 {
1738 	gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
1739 	gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
1740 	gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
1741 	gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
1742 
1743 	/* Allow allocations only from the originally specified zones. */
1744 	result |= zoneflags;
1745 
1746 	/*
1747 	 * Minimize the result gfp by taking the union with the deny flags,
1748 	 * and the intersection of the allow flags.
1749 	 */
1750 	result |= (limit_gfp & denyflags);
1751 	result |= (huge_gfp & limit_gfp) & allowflags;
1752 
1753 	return result;
1754 }
1755 
1756 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
shmem_hpage_pmd_enabled(void)1757 bool shmem_hpage_pmd_enabled(void)
1758 {
1759 	if (shmem_huge == SHMEM_HUGE_DENY)
1760 		return false;
1761 	if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_always))
1762 		return true;
1763 	if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_madvise))
1764 		return true;
1765 	if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_within_size))
1766 		return true;
1767 	if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_inherit) &&
1768 	    shmem_huge != SHMEM_HUGE_NEVER)
1769 		return true;
1770 
1771 	return false;
1772 }
1773 
shmem_allowable_huge_orders(struct inode * inode,struct vm_area_struct * vma,pgoff_t index,loff_t write_end,bool shmem_huge_force)1774 unsigned long shmem_allowable_huge_orders(struct inode *inode,
1775 				struct vm_area_struct *vma, pgoff_t index,
1776 				loff_t write_end, bool shmem_huge_force)
1777 {
1778 	unsigned long mask = READ_ONCE(huge_shmem_orders_always);
1779 	unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
1780 	vm_flags_t vm_flags = vma ? vma->vm_flags : 0;
1781 	unsigned int global_orders;
1782 
1783 	if (thp_disabled_by_hw() || (vma && vma_thp_disabled(vma, vm_flags, shmem_huge_force)))
1784 		return 0;
1785 
1786 	global_orders = shmem_huge_global_enabled(inode, index, write_end,
1787 						  shmem_huge_force, vma, vm_flags);
1788 	/* Tmpfs huge pages allocation */
1789 	if (!vma || !vma_is_anon_shmem(vma))
1790 		return global_orders;
1791 
1792 	/*
1793 	 * Following the 'deny' semantics of the top level, force the huge
1794 	 * option off from all mounts.
1795 	 */
1796 	if (shmem_huge == SHMEM_HUGE_DENY)
1797 		return 0;
1798 
1799 	/*
1800 	 * Only allow inherit orders if the top-level value is 'force', which
1801 	 * means non-PMD sized THP can not override 'huge' mount option now.
1802 	 */
1803 	if (shmem_huge == SHMEM_HUGE_FORCE)
1804 		return READ_ONCE(huge_shmem_orders_inherit);
1805 
1806 	/* Allow mTHP that will be fully within i_size. */
1807 	mask |= shmem_get_orders_within_size(inode, within_size_orders, index, 0);
1808 
1809 	if (vm_flags & VM_HUGEPAGE)
1810 		mask |= READ_ONCE(huge_shmem_orders_madvise);
1811 
1812 	if (global_orders > 0)
1813 		mask |= READ_ONCE(huge_shmem_orders_inherit);
1814 
1815 	return THP_ORDERS_ALL_FILE_DEFAULT & mask;
1816 }
1817 
shmem_suitable_orders(struct inode * inode,struct vm_fault * vmf,struct address_space * mapping,pgoff_t index,unsigned long orders)1818 static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
1819 					   struct address_space *mapping, pgoff_t index,
1820 					   unsigned long orders)
1821 {
1822 	struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
1823 	pgoff_t aligned_index;
1824 	unsigned long pages;
1825 	int order;
1826 
1827 	if (vma) {
1828 		orders = thp_vma_suitable_orders(vma, vmf->address, orders);
1829 		if (!orders)
1830 			return 0;
1831 	}
1832 
1833 	/* Find the highest order that can add into the page cache */
1834 	order = highest_order(orders);
1835 	while (orders) {
1836 		pages = 1UL << order;
1837 		aligned_index = round_down(index, pages);
1838 		/*
1839 		 * Check for conflict before waiting on a huge allocation.
1840 		 * Conflict might be that a huge page has just been allocated
1841 		 * and added to page cache by a racing thread, or that there
1842 		 * is already at least one small page in the huge extent.
1843 		 * Be careful to retry when appropriate, but not forever!
1844 		 * Elsewhere -EEXIST would be the right code, but not here.
1845 		 */
1846 		if (!xa_find(&mapping->i_pages, &aligned_index,
1847 			     aligned_index + pages - 1, XA_PRESENT))
1848 			break;
1849 		order = next_order(&orders, order);
1850 	}
1851 
1852 	return orders;
1853 }
1854 #else
shmem_suitable_orders(struct inode * inode,struct vm_fault * vmf,struct address_space * mapping,pgoff_t index,unsigned long orders)1855 static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
1856 					   struct address_space *mapping, pgoff_t index,
1857 					   unsigned long orders)
1858 {
1859 	return 0;
1860 }
1861 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1862 
shmem_alloc_folio(gfp_t gfp,int order,struct shmem_inode_info * info,pgoff_t index)1863 static struct folio *shmem_alloc_folio(gfp_t gfp, int order,
1864 		struct shmem_inode_info *info, pgoff_t index)
1865 {
1866 	struct mempolicy *mpol;
1867 	pgoff_t ilx;
1868 	struct folio *folio;
1869 
1870 	mpol = shmem_get_pgoff_policy(info, index, order, &ilx);
1871 	folio = folio_alloc_mpol(gfp, order, mpol, ilx, numa_node_id());
1872 	mpol_cond_put(mpol);
1873 
1874 	return folio;
1875 }
1876 
shmem_alloc_and_add_folio(struct vm_fault * vmf,gfp_t gfp,struct inode * inode,pgoff_t index,struct mm_struct * fault_mm,unsigned long orders)1877 static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
1878 		gfp_t gfp, struct inode *inode, pgoff_t index,
1879 		struct mm_struct *fault_mm, unsigned long orders)
1880 {
1881 	struct address_space *mapping = inode->i_mapping;
1882 	struct shmem_inode_info *info = SHMEM_I(inode);
1883 	unsigned long suitable_orders = 0;
1884 	struct folio *folio = NULL;
1885 	pgoff_t aligned_index;
1886 	long pages;
1887 	int error, order;
1888 
1889 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1890 		orders = 0;
1891 
1892 	if (orders > 0) {
1893 		suitable_orders = shmem_suitable_orders(inode, vmf,
1894 							mapping, index, orders);
1895 
1896 		order = highest_order(suitable_orders);
1897 		while (suitable_orders) {
1898 			pages = 1UL << order;
1899 			aligned_index = round_down(index, pages);
1900 			folio = shmem_alloc_folio(gfp, order, info, aligned_index);
1901 			if (folio) {
1902 				index = aligned_index;
1903 				goto allocated;
1904 			}
1905 
1906 			if (pages == HPAGE_PMD_NR)
1907 				count_vm_event(THP_FILE_FALLBACK);
1908 			count_mthp_stat(order, MTHP_STAT_SHMEM_FALLBACK);
1909 			order = next_order(&suitable_orders, order);
1910 		}
1911 	} else {
1912 		pages = 1;
1913 		folio = shmem_alloc_folio(gfp, 0, info, index);
1914 	}
1915 	if (!folio)
1916 		return ERR_PTR(-ENOMEM);
1917 
1918 allocated:
1919 	__folio_set_locked(folio);
1920 	__folio_set_swapbacked(folio);
1921 
1922 	gfp &= GFP_RECLAIM_MASK;
1923 	error = mem_cgroup_charge(folio, fault_mm, gfp);
1924 	if (error) {
1925 		if (xa_find(&mapping->i_pages, &index,
1926 				index + pages - 1, XA_PRESENT)) {
1927 			error = -EEXIST;
1928 		} else if (pages > 1) {
1929 			if (pages == HPAGE_PMD_NR) {
1930 				count_vm_event(THP_FILE_FALLBACK);
1931 				count_vm_event(THP_FILE_FALLBACK_CHARGE);
1932 			}
1933 			count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK);
1934 			count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE);
1935 		}
1936 		goto unlock;
1937 	}
1938 
1939 	error = shmem_add_to_page_cache(folio, mapping, index, NULL, gfp);
1940 	if (error)
1941 		goto unlock;
1942 
1943 	error = shmem_inode_acct_blocks(inode, pages);
1944 	if (error) {
1945 		struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1946 		long freed;
1947 		/*
1948 		 * Try to reclaim some space by splitting a few
1949 		 * large folios beyond i_size on the filesystem.
1950 		 */
1951 		shmem_unused_huge_shrink(sbinfo, NULL, pages);
1952 		/*
1953 		 * And do a shmem_recalc_inode() to account for freed pages:
1954 		 * except our folio is there in cache, so not quite balanced.
1955 		 */
1956 		spin_lock(&info->lock);
1957 		freed = pages + info->alloced - info->swapped -
1958 			READ_ONCE(mapping->nrpages);
1959 		if (freed > 0)
1960 			info->alloced -= freed;
1961 		spin_unlock(&info->lock);
1962 		if (freed > 0)
1963 			shmem_inode_unacct_blocks(inode, freed);
1964 		error = shmem_inode_acct_blocks(inode, pages);
1965 		if (error) {
1966 			filemap_remove_folio(folio);
1967 			goto unlock;
1968 		}
1969 	}
1970 
1971 	shmem_recalc_inode(inode, pages, 0);
1972 	folio_add_lru(folio);
1973 	return folio;
1974 
1975 unlock:
1976 	folio_unlock(folio);
1977 	folio_put(folio);
1978 	return ERR_PTR(error);
1979 }
1980 
shmem_swap_alloc_folio(struct inode * inode,struct vm_area_struct * vma,pgoff_t index,swp_entry_t entry,int order,gfp_t gfp)1981 static struct folio *shmem_swap_alloc_folio(struct inode *inode,
1982 		struct vm_area_struct *vma, pgoff_t index,
1983 		swp_entry_t entry, int order, gfp_t gfp)
1984 {
1985 	struct shmem_inode_info *info = SHMEM_I(inode);
1986 	int nr_pages = 1 << order;
1987 	struct folio *new;
1988 	gfp_t alloc_gfp;
1989 	void *shadow;
1990 
1991 	/*
1992 	 * We have arrived here because our zones are constrained, so don't
1993 	 * limit chance of success with further cpuset and node constraints.
1994 	 */
1995 	gfp &= ~GFP_CONSTRAINT_MASK;
1996 	alloc_gfp = gfp;
1997 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
1998 		if (WARN_ON_ONCE(order))
1999 			return ERR_PTR(-EINVAL);
2000 	} else if (order) {
2001 		/*
2002 		 * If uffd is active for the vma, we need per-page fault
2003 		 * fidelity to maintain the uffd semantics, then fallback
2004 		 * to swapin order-0 folio, as well as for zswap case.
2005 		 * Any existing sub folio in the swap cache also blocks
2006 		 * mTHP swapin.
2007 		 */
2008 		if ((vma && unlikely(userfaultfd_armed(vma))) ||
2009 		     !zswap_never_enabled() ||
2010 		     non_swapcache_batch(entry, nr_pages) != nr_pages)
2011 			goto fallback;
2012 
2013 		alloc_gfp = limit_gfp_mask(vma_thp_gfp_mask(vma), gfp);
2014 	}
2015 retry:
2016 	new = shmem_alloc_folio(alloc_gfp, order, info, index);
2017 	if (!new) {
2018 		new = ERR_PTR(-ENOMEM);
2019 		goto fallback;
2020 	}
2021 
2022 	if (mem_cgroup_swapin_charge_folio(new, vma ? vma->vm_mm : NULL,
2023 					   alloc_gfp, entry)) {
2024 		folio_put(new);
2025 		new = ERR_PTR(-ENOMEM);
2026 		goto fallback;
2027 	}
2028 
2029 	/*
2030 	 * Prevent parallel swapin from proceeding with the swap cache flag.
2031 	 *
2032 	 * Of course there is another possible concurrent scenario as well,
2033 	 * that is to say, the swap cache flag of a large folio has already
2034 	 * been set by swapcache_prepare(), while another thread may have
2035 	 * already split the large swap entry stored in the shmem mapping.
2036 	 * In this case, shmem_add_to_page_cache() will help identify the
2037 	 * concurrent swapin and return -EEXIST.
2038 	 */
2039 	if (swapcache_prepare(entry, nr_pages)) {
2040 		folio_put(new);
2041 		new = ERR_PTR(-EEXIST);
2042 		/* Try smaller folio to avoid cache conflict */
2043 		goto fallback;
2044 	}
2045 
2046 	__folio_set_locked(new);
2047 	__folio_set_swapbacked(new);
2048 	new->swap = entry;
2049 
2050 	memcg1_swapin(entry, nr_pages);
2051 	shadow = swap_cache_get_shadow(entry);
2052 	if (shadow)
2053 		workingset_refault(new, shadow);
2054 	folio_add_lru(new);
2055 	swap_read_folio(new, NULL);
2056 	return new;
2057 fallback:
2058 	/* Order 0 swapin failed, nothing to fallback to, abort */
2059 	if (!order)
2060 		return new;
2061 	entry.val += index - round_down(index, nr_pages);
2062 	alloc_gfp = gfp;
2063 	nr_pages = 1;
2064 	order = 0;
2065 	goto retry;
2066 }
2067 
2068 /*
2069  * When a page is moved from swapcache to shmem filecache (either by the
2070  * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of
2071  * shmem_unuse_inode()), it may have been read in earlier from swap, in
2072  * ignorance of the mapping it belongs to.  If that mapping has special
2073  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
2074  * we may need to copy to a suitable page before moving to filecache.
2075  *
2076  * In a future release, this may well be extended to respect cpuset and
2077  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
2078  * but for now it is a simple matter of zone.
2079  */
shmem_should_replace_folio(struct folio * folio,gfp_t gfp)2080 static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp)
2081 {
2082 	return folio_zonenum(folio) > gfp_zone(gfp);
2083 }
2084 
shmem_replace_folio(struct folio ** foliop,gfp_t gfp,struct shmem_inode_info * info,pgoff_t index,struct vm_area_struct * vma)2085 static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
2086 				struct shmem_inode_info *info, pgoff_t index,
2087 				struct vm_area_struct *vma)
2088 {
2089 	struct swap_cluster_info *ci;
2090 	struct folio *new, *old = *foliop;
2091 	swp_entry_t entry = old->swap;
2092 	int nr_pages = folio_nr_pages(old);
2093 	int error = 0;
2094 
2095 	/*
2096 	 * We have arrived here because our zones are constrained, so don't
2097 	 * limit chance of success by further cpuset and node constraints.
2098 	 */
2099 	gfp &= ~GFP_CONSTRAINT_MASK;
2100 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2101 	if (nr_pages > 1) {
2102 		gfp_t huge_gfp = vma_thp_gfp_mask(vma);
2103 
2104 		gfp = limit_gfp_mask(huge_gfp, gfp);
2105 	}
2106 #endif
2107 
2108 	new = shmem_alloc_folio(gfp, folio_order(old), info, index);
2109 	if (!new)
2110 		return -ENOMEM;
2111 
2112 	folio_ref_add(new, nr_pages);
2113 	folio_copy(new, old);
2114 	flush_dcache_folio(new);
2115 
2116 	__folio_set_locked(new);
2117 	__folio_set_swapbacked(new);
2118 	folio_mark_uptodate(new);
2119 	new->swap = entry;
2120 	folio_set_swapcache(new);
2121 
2122 	ci = swap_cluster_get_and_lock_irq(old);
2123 	__swap_cache_replace_folio(ci, old, new);
2124 	mem_cgroup_replace_folio(old, new);
2125 	shmem_update_stats(new, nr_pages);
2126 	shmem_update_stats(old, -nr_pages);
2127 	swap_cluster_unlock_irq(ci);
2128 
2129 	folio_add_lru(new);
2130 	*foliop = new;
2131 
2132 	folio_clear_swapcache(old);
2133 	old->private = NULL;
2134 
2135 	folio_unlock(old);
2136 	/*
2137 	 * The old folio are removed from swap cache, drop the 'nr_pages'
2138 	 * reference, as well as one temporary reference getting from swap
2139 	 * cache.
2140 	 */
2141 	folio_put_refs(old, nr_pages + 1);
2142 	return error;
2143 }
2144 
shmem_set_folio_swapin_error(struct inode * inode,pgoff_t index,struct folio * folio,swp_entry_t swap,bool skip_swapcache)2145 static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
2146 					 struct folio *folio, swp_entry_t swap,
2147 					 bool skip_swapcache)
2148 {
2149 	struct address_space *mapping = inode->i_mapping;
2150 	swp_entry_t swapin_error;
2151 	void *old;
2152 	int nr_pages;
2153 
2154 	swapin_error = make_poisoned_swp_entry();
2155 	old = xa_cmpxchg_irq(&mapping->i_pages, index,
2156 			     swp_to_radix_entry(swap),
2157 			     swp_to_radix_entry(swapin_error), 0);
2158 	if (old != swp_to_radix_entry(swap))
2159 		return;
2160 
2161 	nr_pages = folio_nr_pages(folio);
2162 	folio_wait_writeback(folio);
2163 	if (!skip_swapcache)
2164 		swap_cache_del_folio(folio);
2165 	/*
2166 	 * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
2167 	 * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
2168 	 * in shmem_evict_inode().
2169 	 */
2170 	shmem_recalc_inode(inode, -nr_pages, -nr_pages);
2171 	swap_free_nr(swap, nr_pages);
2172 }
2173 
shmem_split_large_entry(struct inode * inode,pgoff_t index,swp_entry_t swap,gfp_t gfp)2174 static int shmem_split_large_entry(struct inode *inode, pgoff_t index,
2175 				   swp_entry_t swap, gfp_t gfp)
2176 {
2177 	struct address_space *mapping = inode->i_mapping;
2178 	XA_STATE_ORDER(xas, &mapping->i_pages, index, 0);
2179 	int split_order = 0;
2180 	int i;
2181 
2182 	/* Convert user data gfp flags to xarray node gfp flags */
2183 	gfp &= GFP_RECLAIM_MASK;
2184 
2185 	for (;;) {
2186 		void *old = NULL;
2187 		int cur_order;
2188 		pgoff_t swap_index;
2189 
2190 		xas_lock_irq(&xas);
2191 		old = xas_load(&xas);
2192 		if (!xa_is_value(old) || swp_to_radix_entry(swap) != old) {
2193 			xas_set_err(&xas, -EEXIST);
2194 			goto unlock;
2195 		}
2196 
2197 		cur_order = xas_get_order(&xas);
2198 		if (!cur_order)
2199 			goto unlock;
2200 
2201 		/* Try to split large swap entry in pagecache */
2202 		swap_index = round_down(index, 1 << cur_order);
2203 		split_order = xas_try_split_min_order(cur_order);
2204 
2205 		while (cur_order > 0) {
2206 			pgoff_t aligned_index =
2207 				round_down(index, 1 << cur_order);
2208 			pgoff_t swap_offset = aligned_index - swap_index;
2209 
2210 			xas_set_order(&xas, index, split_order);
2211 			xas_try_split(&xas, old, cur_order);
2212 			if (xas_error(&xas))
2213 				goto unlock;
2214 
2215 			/*
2216 			 * Re-set the swap entry after splitting, and the swap
2217 			 * offset of the original large entry must be continuous.
2218 			 */
2219 			for (i = 0; i < 1 << cur_order;
2220 			     i += (1 << split_order)) {
2221 				swp_entry_t tmp;
2222 
2223 				tmp = swp_entry(swp_type(swap),
2224 						swp_offset(swap) + swap_offset +
2225 							i);
2226 				__xa_store(&mapping->i_pages, aligned_index + i,
2227 					   swp_to_radix_entry(tmp), 0);
2228 			}
2229 			cur_order = split_order;
2230 			split_order = xas_try_split_min_order(split_order);
2231 		}
2232 
2233 unlock:
2234 		xas_unlock_irq(&xas);
2235 
2236 		if (!xas_nomem(&xas, gfp))
2237 			break;
2238 	}
2239 
2240 	if (xas_error(&xas))
2241 		return xas_error(&xas);
2242 
2243 	return 0;
2244 }
2245 
2246 /*
2247  * Swap in the folio pointed to by *foliop.
2248  * Caller has to make sure that *foliop contains a valid swapped folio.
2249  * Returns 0 and the folio in foliop if success. On failure, returns the
2250  * error code and NULL in *foliop.
2251  */
shmem_swapin_folio(struct inode * inode,pgoff_t index,struct folio ** foliop,enum sgp_type sgp,gfp_t gfp,struct vm_area_struct * vma,vm_fault_t * fault_type)2252 static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
2253 			     struct folio **foliop, enum sgp_type sgp,
2254 			     gfp_t gfp, struct vm_area_struct *vma,
2255 			     vm_fault_t *fault_type)
2256 {
2257 	struct address_space *mapping = inode->i_mapping;
2258 	struct mm_struct *fault_mm = vma ? vma->vm_mm : NULL;
2259 	struct shmem_inode_info *info = SHMEM_I(inode);
2260 	swp_entry_t swap, index_entry;
2261 	struct swap_info_struct *si;
2262 	struct folio *folio = NULL;
2263 	bool skip_swapcache = false;
2264 	int error, nr_pages, order;
2265 	pgoff_t offset;
2266 
2267 	VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
2268 	index_entry = radix_to_swp_entry(*foliop);
2269 	swap = index_entry;
2270 	*foliop = NULL;
2271 
2272 	if (is_poisoned_swp_entry(index_entry))
2273 		return -EIO;
2274 
2275 	si = get_swap_device(index_entry);
2276 	order = shmem_confirm_swap(mapping, index, index_entry);
2277 	if (unlikely(!si)) {
2278 		if (order < 0)
2279 			return -EEXIST;
2280 		else
2281 			return -EINVAL;
2282 	}
2283 	if (unlikely(order < 0)) {
2284 		put_swap_device(si);
2285 		return -EEXIST;
2286 	}
2287 
2288 	/* index may point to the middle of a large entry, get the sub entry */
2289 	if (order) {
2290 		offset = index - round_down(index, 1 << order);
2291 		swap = swp_entry(swp_type(swap), swp_offset(swap) + offset);
2292 	}
2293 
2294 	/* Look it up and read it in.. */
2295 	folio = swap_cache_get_folio(swap);
2296 	if (!folio) {
2297 		if (data_race(si->flags & SWP_SYNCHRONOUS_IO)) {
2298 			/* Direct swapin skipping swap cache & readahead */
2299 			folio = shmem_swap_alloc_folio(inode, vma, index,
2300 						       index_entry, order, gfp);
2301 			if (IS_ERR(folio)) {
2302 				error = PTR_ERR(folio);
2303 				folio = NULL;
2304 				goto failed;
2305 			}
2306 			skip_swapcache = true;
2307 		} else {
2308 			/* Cached swapin only supports order 0 folio */
2309 			folio = shmem_swapin_cluster(swap, gfp, info, index);
2310 			if (!folio) {
2311 				error = -ENOMEM;
2312 				goto failed;
2313 			}
2314 		}
2315 		if (fault_type) {
2316 			*fault_type |= VM_FAULT_MAJOR;
2317 			count_vm_event(PGMAJFAULT);
2318 			count_memcg_event_mm(fault_mm, PGMAJFAULT);
2319 		}
2320 	} else {
2321 		swap_update_readahead(folio, NULL, 0);
2322 	}
2323 
2324 	if (order > folio_order(folio)) {
2325 		/*
2326 		 * Swapin may get smaller folios due to various reasons:
2327 		 * It may fallback to order 0 due to memory pressure or race,
2328 		 * swap readahead may swap in order 0 folios into swapcache
2329 		 * asynchronously, while the shmem mapping can still stores
2330 		 * large swap entries. In such cases, we should split the
2331 		 * large swap entry to prevent possible data corruption.
2332 		 */
2333 		error = shmem_split_large_entry(inode, index, index_entry, gfp);
2334 		if (error)
2335 			goto failed_nolock;
2336 	}
2337 
2338 	/*
2339 	 * If the folio is large, round down swap and index by folio size.
2340 	 * No matter what race occurs, the swap layer ensures we either get
2341 	 * a valid folio that has its swap entry aligned by size, or a
2342 	 * temporarily invalid one which we'll abort very soon and retry.
2343 	 *
2344 	 * shmem_add_to_page_cache ensures the whole range contains expected
2345 	 * entries and prevents any corruption, so any race split is fine
2346 	 * too, it will succeed as long as the entries are still there.
2347 	 */
2348 	nr_pages = folio_nr_pages(folio);
2349 	if (nr_pages > 1) {
2350 		swap.val = round_down(swap.val, nr_pages);
2351 		index = round_down(index, nr_pages);
2352 	}
2353 
2354 	/*
2355 	 * We have to do this with the folio locked to prevent races.
2356 	 * The shmem_confirm_swap below only checks if the first swap
2357 	 * entry matches the folio, that's enough to ensure the folio
2358 	 * is not used outside of shmem, as shmem swap entries
2359 	 * and swap cache folios are never partially freed.
2360 	 */
2361 	folio_lock(folio);
2362 	if ((!skip_swapcache && !folio_test_swapcache(folio)) ||
2363 	    shmem_confirm_swap(mapping, index, swap) < 0 ||
2364 	    folio->swap.val != swap.val) {
2365 		error = -EEXIST;
2366 		goto unlock;
2367 	}
2368 	if (!folio_test_uptodate(folio)) {
2369 		error = -EIO;
2370 		goto failed;
2371 	}
2372 	folio_wait_writeback(folio);
2373 
2374 	/*
2375 	 * Some architectures may have to restore extra metadata to the
2376 	 * folio after reading from swap.
2377 	 */
2378 	arch_swap_restore(folio_swap(swap, folio), folio);
2379 
2380 	if (shmem_should_replace_folio(folio, gfp)) {
2381 		error = shmem_replace_folio(&folio, gfp, info, index, vma);
2382 		if (error)
2383 			goto failed;
2384 	}
2385 
2386 	error = shmem_add_to_page_cache(folio, mapping, index,
2387 					swp_to_radix_entry(swap), gfp);
2388 	if (error)
2389 		goto failed;
2390 
2391 	shmem_recalc_inode(inode, 0, -nr_pages);
2392 
2393 	if (sgp == SGP_WRITE)
2394 		folio_mark_accessed(folio);
2395 
2396 	if (skip_swapcache) {
2397 		folio->swap.val = 0;
2398 		swapcache_clear(si, swap, nr_pages);
2399 	} else {
2400 		swap_cache_del_folio(folio);
2401 	}
2402 	folio_mark_dirty(folio);
2403 	swap_free_nr(swap, nr_pages);
2404 	put_swap_device(si);
2405 
2406 	*foliop = folio;
2407 	return 0;
2408 failed:
2409 	if (shmem_confirm_swap(mapping, index, swap) < 0)
2410 		error = -EEXIST;
2411 	if (error == -EIO)
2412 		shmem_set_folio_swapin_error(inode, index, folio, swap,
2413 					     skip_swapcache);
2414 unlock:
2415 	if (folio)
2416 		folio_unlock(folio);
2417 failed_nolock:
2418 	if (skip_swapcache)
2419 		swapcache_clear(si, folio->swap, folio_nr_pages(folio));
2420 	if (folio)
2421 		folio_put(folio);
2422 	put_swap_device(si);
2423 
2424 	return error;
2425 }
2426 
2427 /*
2428  * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate
2429  *
2430  * If we allocate a new one we do not mark it dirty. That's up to the
2431  * vm. If we swap it in we mark it dirty since we also free the swap
2432  * entry since a page cannot live in both the swap and page cache.
2433  *
2434  * vmf and fault_type are only supplied by shmem_fault: otherwise they are NULL.
2435  */
shmem_get_folio_gfp(struct inode * inode,pgoff_t index,loff_t write_end,struct folio ** foliop,enum sgp_type sgp,gfp_t gfp,struct vm_fault * vmf,vm_fault_t * fault_type)2436 static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
2437 		loff_t write_end, struct folio **foliop, enum sgp_type sgp,
2438 		gfp_t gfp, struct vm_fault *vmf, vm_fault_t *fault_type)
2439 {
2440 	struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
2441 	struct mm_struct *fault_mm;
2442 	struct folio *folio;
2443 	int error;
2444 	bool alloced;
2445 	unsigned long orders = 0;
2446 
2447 	if (WARN_ON_ONCE(!shmem_mapping(inode->i_mapping)))
2448 		return -EINVAL;
2449 
2450 	if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
2451 		return -EFBIG;
2452 repeat:
2453 	if (sgp <= SGP_CACHE &&
2454 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode))
2455 		return -EINVAL;
2456 
2457 	alloced = false;
2458 	fault_mm = vma ? vma->vm_mm : NULL;
2459 
2460 	folio = filemap_get_entry(inode->i_mapping, index);
2461 	if (folio && vma && userfaultfd_minor(vma)) {
2462 		if (!xa_is_value(folio))
2463 			folio_put(folio);
2464 		*fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
2465 		return 0;
2466 	}
2467 
2468 	if (xa_is_value(folio)) {
2469 		error = shmem_swapin_folio(inode, index, &folio,
2470 					   sgp, gfp, vma, fault_type);
2471 		if (error == -EEXIST)
2472 			goto repeat;
2473 
2474 		*foliop = folio;
2475 		return error;
2476 	}
2477 
2478 	if (folio) {
2479 		folio_lock(folio);
2480 
2481 		/* Has the folio been truncated or swapped out? */
2482 		if (unlikely(folio->mapping != inode->i_mapping)) {
2483 			folio_unlock(folio);
2484 			folio_put(folio);
2485 			goto repeat;
2486 		}
2487 		if (sgp == SGP_WRITE)
2488 			folio_mark_accessed(folio);
2489 		if (folio_test_uptodate(folio))
2490 			goto out;
2491 		/* fallocated folio */
2492 		if (sgp != SGP_READ)
2493 			goto clear;
2494 		folio_unlock(folio);
2495 		folio_put(folio);
2496 	}
2497 
2498 	/*
2499 	 * SGP_READ: succeed on hole, with NULL folio, letting caller zero.
2500 	 * SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail.
2501 	 */
2502 	*foliop = NULL;
2503 	if (sgp == SGP_READ)
2504 		return 0;
2505 	if (sgp == SGP_NOALLOC)
2506 		return -ENOENT;
2507 
2508 	/*
2509 	 * Fast cache lookup and swap lookup did not find it: allocate.
2510 	 */
2511 
2512 	if (vma && userfaultfd_missing(vma)) {
2513 		*fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
2514 		return 0;
2515 	}
2516 
2517 	/* Find hugepage orders that are allowed for anonymous shmem and tmpfs. */
2518 	orders = shmem_allowable_huge_orders(inode, vma, index, write_end, false);
2519 	if (orders > 0) {
2520 		gfp_t huge_gfp;
2521 
2522 		huge_gfp = vma_thp_gfp_mask(vma);
2523 		huge_gfp = limit_gfp_mask(huge_gfp, gfp);
2524 		folio = shmem_alloc_and_add_folio(vmf, huge_gfp,
2525 				inode, index, fault_mm, orders);
2526 		if (!IS_ERR(folio)) {
2527 			if (folio_test_pmd_mappable(folio))
2528 				count_vm_event(THP_FILE_ALLOC);
2529 			count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC);
2530 			goto alloced;
2531 		}
2532 		if (PTR_ERR(folio) == -EEXIST)
2533 			goto repeat;
2534 	}
2535 
2536 	folio = shmem_alloc_and_add_folio(vmf, gfp, inode, index, fault_mm, 0);
2537 	if (IS_ERR(folio)) {
2538 		error = PTR_ERR(folio);
2539 		if (error == -EEXIST)
2540 			goto repeat;
2541 		folio = NULL;
2542 		goto unlock;
2543 	}
2544 
2545 alloced:
2546 	alloced = true;
2547 	if (folio_test_large(folio) &&
2548 	    DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
2549 					folio_next_index(folio)) {
2550 		struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2551 		struct shmem_inode_info *info = SHMEM_I(inode);
2552 		/*
2553 		 * Part of the large folio is beyond i_size: subject
2554 		 * to shrink under memory pressure.
2555 		 */
2556 		spin_lock(&sbinfo->shrinklist_lock);
2557 		/*
2558 		 * _careful to defend against unlocked access to
2559 		 * ->shrink_list in shmem_unused_huge_shrink()
2560 		 */
2561 		if (list_empty_careful(&info->shrinklist)) {
2562 			list_add_tail(&info->shrinklist,
2563 				      &sbinfo->shrinklist);
2564 			sbinfo->shrinklist_len++;
2565 		}
2566 		spin_unlock(&sbinfo->shrinklist_lock);
2567 	}
2568 
2569 	if (sgp == SGP_WRITE)
2570 		folio_set_referenced(folio);
2571 	/*
2572 	 * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio.
2573 	 */
2574 	if (sgp == SGP_FALLOC)
2575 		sgp = SGP_WRITE;
2576 clear:
2577 	/*
2578 	 * Let SGP_WRITE caller clear ends if write does not fill folio;
2579 	 * but SGP_FALLOC on a folio fallocated earlier must initialize
2580 	 * it now, lest undo on failure cancel our earlier guarantee.
2581 	 */
2582 	if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
2583 		long i, n = folio_nr_pages(folio);
2584 
2585 		for (i = 0; i < n; i++)
2586 			clear_highpage(folio_page(folio, i));
2587 		flush_dcache_folio(folio);
2588 		folio_mark_uptodate(folio);
2589 	}
2590 
2591 	/* Perhaps the file has been truncated since we checked */
2592 	if (sgp <= SGP_CACHE &&
2593 	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
2594 		error = -EINVAL;
2595 		goto unlock;
2596 	}
2597 out:
2598 	*foliop = folio;
2599 	return 0;
2600 
2601 	/*
2602 	 * Error recovery.
2603 	 */
2604 unlock:
2605 	if (alloced)
2606 		filemap_remove_folio(folio);
2607 	shmem_recalc_inode(inode, 0, 0);
2608 	if (folio) {
2609 		folio_unlock(folio);
2610 		folio_put(folio);
2611 	}
2612 	return error;
2613 }
2614 
2615 /**
2616  * shmem_get_folio - find, and lock a shmem folio.
2617  * @inode:	inode to search
2618  * @index:	the page index.
2619  * @write_end:	end of a write, could extend inode size
2620  * @foliop:	pointer to the folio if found
2621  * @sgp:	SGP_* flags to control behavior
2622  *
2623  * Looks up the page cache entry at @inode & @index.  If a folio is
2624  * present, it is returned locked with an increased refcount.
2625  *
2626  * If the caller modifies data in the folio, it must call folio_mark_dirty()
2627  * before unlocking the folio to ensure that the folio is not reclaimed.
2628  * There is no need to reserve space before calling folio_mark_dirty().
2629  *
2630  * When no folio is found, the behavior depends on @sgp:
2631  *  - for SGP_READ, *@foliop is %NULL and 0 is returned
2632  *  - for SGP_NOALLOC, *@foliop is %NULL and -ENOENT is returned
2633  *  - for all other flags a new folio is allocated, inserted into the
2634  *    page cache and returned locked in @foliop.
2635  *
2636  * Context: May sleep.
2637  * Return: 0 if successful, else a negative error code.
2638  */
shmem_get_folio(struct inode * inode,pgoff_t index,loff_t write_end,struct folio ** foliop,enum sgp_type sgp)2639 int shmem_get_folio(struct inode *inode, pgoff_t index, loff_t write_end,
2640 		    struct folio **foliop, enum sgp_type sgp)
2641 {
2642 	return shmem_get_folio_gfp(inode, index, write_end, foliop, sgp,
2643 			mapping_gfp_mask(inode->i_mapping), NULL, NULL);
2644 }
2645 EXPORT_SYMBOL_GPL(shmem_get_folio);
2646 
2647 /*
2648  * This is like autoremove_wake_function, but it removes the wait queue
2649  * entry unconditionally - even if something else had already woken the
2650  * target.
2651  */
synchronous_wake_function(wait_queue_entry_t * wait,unsigned int mode,int sync,void * key)2652 static int synchronous_wake_function(wait_queue_entry_t *wait,
2653 			unsigned int mode, int sync, void *key)
2654 {
2655 	int ret = default_wake_function(wait, mode, sync, key);
2656 	list_del_init(&wait->entry);
2657 	return ret;
2658 }
2659 
2660 /*
2661  * Trinity finds that probing a hole which tmpfs is punching can
2662  * prevent the hole-punch from ever completing: which in turn
2663  * locks writers out with its hold on i_rwsem.  So refrain from
2664  * faulting pages into the hole while it's being punched.  Although
2665  * shmem_undo_range() does remove the additions, it may be unable to
2666  * keep up, as each new page needs its own unmap_mapping_range() call,
2667  * and the i_mmap tree grows ever slower to scan if new vmas are added.
2668  *
2669  * It does not matter if we sometimes reach this check just before the
2670  * hole-punch begins, so that one fault then races with the punch:
2671  * we just need to make racing faults a rare case.
2672  *
2673  * The implementation below would be much simpler if we just used a
2674  * standard mutex or completion: but we cannot take i_rwsem in fault,
2675  * and bloating every shmem inode for this unlikely case would be sad.
2676  */
shmem_falloc_wait(struct vm_fault * vmf,struct inode * inode)2677 static vm_fault_t shmem_falloc_wait(struct vm_fault *vmf, struct inode *inode)
2678 {
2679 	struct shmem_falloc *shmem_falloc;
2680 	struct file *fpin = NULL;
2681 	vm_fault_t ret = 0;
2682 
2683 	spin_lock(&inode->i_lock);
2684 	shmem_falloc = inode->i_private;
2685 	if (shmem_falloc &&
2686 	    shmem_falloc->waitq &&
2687 	    vmf->pgoff >= shmem_falloc->start &&
2688 	    vmf->pgoff < shmem_falloc->next) {
2689 		wait_queue_head_t *shmem_falloc_waitq;
2690 		DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
2691 
2692 		ret = VM_FAULT_NOPAGE;
2693 		fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2694 		shmem_falloc_waitq = shmem_falloc->waitq;
2695 		prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2696 				TASK_UNINTERRUPTIBLE);
2697 		spin_unlock(&inode->i_lock);
2698 		schedule();
2699 
2700 		/*
2701 		 * shmem_falloc_waitq points into the shmem_fallocate()
2702 		 * stack of the hole-punching task: shmem_falloc_waitq
2703 		 * is usually invalid by the time we reach here, but
2704 		 * finish_wait() does not dereference it in that case;
2705 		 * though i_lock needed lest racing with wake_up_all().
2706 		 */
2707 		spin_lock(&inode->i_lock);
2708 		finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2709 	}
2710 	spin_unlock(&inode->i_lock);
2711 	if (fpin) {
2712 		fput(fpin);
2713 		ret = VM_FAULT_RETRY;
2714 	}
2715 	return ret;
2716 }
2717 
shmem_fault(struct vm_fault * vmf)2718 static vm_fault_t shmem_fault(struct vm_fault *vmf)
2719 {
2720 	struct inode *inode = file_inode(vmf->vma->vm_file);
2721 	gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
2722 	struct folio *folio = NULL;
2723 	vm_fault_t ret = 0;
2724 	int err;
2725 
2726 	/*
2727 	 * Trinity finds that probing a hole which tmpfs is punching can
2728 	 * prevent the hole-punch from ever completing: noted in i_private.
2729 	 */
2730 	if (unlikely(inode->i_private)) {
2731 		ret = shmem_falloc_wait(vmf, inode);
2732 		if (ret)
2733 			return ret;
2734 	}
2735 
2736 	WARN_ON_ONCE(vmf->page != NULL);
2737 	err = shmem_get_folio_gfp(inode, vmf->pgoff, 0, &folio, SGP_CACHE,
2738 				  gfp, vmf, &ret);
2739 	if (err)
2740 		return vmf_error(err);
2741 	if (folio) {
2742 		vmf->page = folio_file_page(folio, vmf->pgoff);
2743 		ret |= VM_FAULT_LOCKED;
2744 	}
2745 	return ret;
2746 }
2747 
shmem_get_unmapped_area(struct file * file,unsigned long uaddr,unsigned long len,unsigned long pgoff,unsigned long flags)2748 unsigned long shmem_get_unmapped_area(struct file *file,
2749 				      unsigned long uaddr, unsigned long len,
2750 				      unsigned long pgoff, unsigned long flags)
2751 {
2752 	unsigned long addr;
2753 	unsigned long offset;
2754 	unsigned long inflated_len;
2755 	unsigned long inflated_addr;
2756 	unsigned long inflated_offset;
2757 	unsigned long hpage_size;
2758 
2759 	if (len > TASK_SIZE)
2760 		return -ENOMEM;
2761 
2762 	addr = mm_get_unmapped_area(current->mm, file, uaddr, len, pgoff,
2763 				    flags);
2764 
2765 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2766 		return addr;
2767 	if (IS_ERR_VALUE(addr))
2768 		return addr;
2769 	if (addr & ~PAGE_MASK)
2770 		return addr;
2771 	if (addr > TASK_SIZE - len)
2772 		return addr;
2773 
2774 	if (shmem_huge == SHMEM_HUGE_DENY)
2775 		return addr;
2776 	if (flags & MAP_FIXED)
2777 		return addr;
2778 	/*
2779 	 * Our priority is to support MAP_SHARED mapped hugely;
2780 	 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2781 	 * But if caller specified an address hint and we allocated area there
2782 	 * successfully, respect that as before.
2783 	 */
2784 	if (uaddr == addr)
2785 		return addr;
2786 
2787 	hpage_size = HPAGE_PMD_SIZE;
2788 	if (shmem_huge != SHMEM_HUGE_FORCE) {
2789 		struct super_block *sb;
2790 		unsigned long __maybe_unused hpage_orders;
2791 		int order = 0;
2792 
2793 		if (file) {
2794 			VM_BUG_ON(file->f_op != &shmem_file_operations);
2795 			sb = file_inode(file)->i_sb;
2796 		} else {
2797 			/*
2798 			 * Called directly from mm/mmap.c, or drivers/char/mem.c
2799 			 * for "/dev/zero", to create a shared anonymous object.
2800 			 */
2801 			if (IS_ERR(shm_mnt))
2802 				return addr;
2803 			sb = shm_mnt->mnt_sb;
2804 
2805 			/*
2806 			 * Find the highest mTHP order used for anonymous shmem to
2807 			 * provide a suitable alignment address.
2808 			 */
2809 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2810 			hpage_orders = READ_ONCE(huge_shmem_orders_always);
2811 			hpage_orders |= READ_ONCE(huge_shmem_orders_within_size);
2812 			hpage_orders |= READ_ONCE(huge_shmem_orders_madvise);
2813 			if (SHMEM_SB(sb)->huge != SHMEM_HUGE_NEVER)
2814 				hpage_orders |= READ_ONCE(huge_shmem_orders_inherit);
2815 
2816 			if (hpage_orders > 0) {
2817 				order = highest_order(hpage_orders);
2818 				hpage_size = PAGE_SIZE << order;
2819 			}
2820 #endif
2821 		}
2822 		if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER && !order)
2823 			return addr;
2824 	}
2825 
2826 	if (len < hpage_size)
2827 		return addr;
2828 
2829 	offset = (pgoff << PAGE_SHIFT) & (hpage_size - 1);
2830 	if (offset && offset + len < 2 * hpage_size)
2831 		return addr;
2832 	if ((addr & (hpage_size - 1)) == offset)
2833 		return addr;
2834 
2835 	inflated_len = len + hpage_size - PAGE_SIZE;
2836 	if (inflated_len > TASK_SIZE)
2837 		return addr;
2838 	if (inflated_len < len)
2839 		return addr;
2840 
2841 	inflated_addr = mm_get_unmapped_area(current->mm, NULL, uaddr,
2842 					     inflated_len, 0, flags);
2843 	if (IS_ERR_VALUE(inflated_addr))
2844 		return addr;
2845 	if (inflated_addr & ~PAGE_MASK)
2846 		return addr;
2847 
2848 	inflated_offset = inflated_addr & (hpage_size - 1);
2849 	inflated_addr += offset - inflated_offset;
2850 	if (inflated_offset > offset)
2851 		inflated_addr += hpage_size;
2852 
2853 	if (inflated_addr > TASK_SIZE - len)
2854 		return addr;
2855 	return inflated_addr;
2856 }
2857 
2858 #ifdef CONFIG_NUMA
shmem_set_policy(struct vm_area_struct * vma,struct mempolicy * mpol)2859 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2860 {
2861 	struct inode *inode = file_inode(vma->vm_file);
2862 	return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2863 }
2864 
shmem_get_policy(struct vm_area_struct * vma,unsigned long addr,pgoff_t * ilx)2865 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2866 					  unsigned long addr, pgoff_t *ilx)
2867 {
2868 	struct inode *inode = file_inode(vma->vm_file);
2869 	pgoff_t index;
2870 
2871 	/*
2872 	 * Bias interleave by inode number to distribute better across nodes;
2873 	 * but this interface is independent of which page order is used, so
2874 	 * supplies only that bias, letting caller apply the offset (adjusted
2875 	 * by page order, as in shmem_get_pgoff_policy() and get_vma_policy()).
2876 	 */
2877 	*ilx = inode->i_ino;
2878 	index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2879 	return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2880 }
2881 
shmem_get_pgoff_policy(struct shmem_inode_info * info,pgoff_t index,unsigned int order,pgoff_t * ilx)2882 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
2883 			pgoff_t index, unsigned int order, pgoff_t *ilx)
2884 {
2885 	struct mempolicy *mpol;
2886 
2887 	/* Bias interleave by inode number to distribute better across nodes */
2888 	*ilx = info->vfs_inode.i_ino + (index >> order);
2889 
2890 	mpol = mpol_shared_policy_lookup(&info->policy, index);
2891 	return mpol ? mpol : get_task_policy(current);
2892 }
2893 #else
shmem_get_pgoff_policy(struct shmem_inode_info * info,pgoff_t index,unsigned int order,pgoff_t * ilx)2894 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info,
2895 			pgoff_t index, unsigned int order, pgoff_t *ilx)
2896 {
2897 	*ilx = 0;
2898 	return NULL;
2899 }
2900 #endif /* CONFIG_NUMA */
2901 
shmem_lock(struct file * file,int lock,struct ucounts * ucounts)2902 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
2903 {
2904 	struct inode *inode = file_inode(file);
2905 	struct shmem_inode_info *info = SHMEM_I(inode);
2906 	int retval = -ENOMEM;
2907 
2908 	/*
2909 	 * What serializes the accesses to info->flags?
2910 	 * ipc_lock_object() when called from shmctl_do_lock(),
2911 	 * no serialization needed when called from shm_destroy().
2912 	 */
2913 	if (lock && !(info->flags & VM_LOCKED)) {
2914 		if (!user_shm_lock(inode->i_size, ucounts))
2915 			goto out_nomem;
2916 		info->flags |= VM_LOCKED;
2917 		mapping_set_unevictable(file->f_mapping);
2918 	}
2919 	if (!lock && (info->flags & VM_LOCKED) && ucounts) {
2920 		user_shm_unlock(inode->i_size, ucounts);
2921 		info->flags &= ~VM_LOCKED;
2922 		mapping_clear_unevictable(file->f_mapping);
2923 	}
2924 	retval = 0;
2925 
2926 out_nomem:
2927 	return retval;
2928 }
2929 
shmem_mmap(struct file * file,struct vm_area_struct * vma)2930 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2931 {
2932 	struct inode *inode = file_inode(file);
2933 
2934 	file_accessed(file);
2935 	/* This is anonymous shared memory if it is unlinked at the time of mmap */
2936 	if (inode->i_nlink)
2937 		vma->vm_ops = &shmem_vm_ops;
2938 	else
2939 		vma->vm_ops = &shmem_anon_vm_ops;
2940 	return 0;
2941 }
2942 
shmem_file_open(struct inode * inode,struct file * file)2943 static int shmem_file_open(struct inode *inode, struct file *file)
2944 {
2945 	file->f_mode |= FMODE_CAN_ODIRECT;
2946 	return generic_file_open(inode, file);
2947 }
2948 
2949 #ifdef CONFIG_TMPFS_XATTR
2950 static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2951 
2952 #if IS_ENABLED(CONFIG_UNICODE)
2953 /*
2954  * shmem_inode_casefold_flags - Deal with casefold file attribute flag
2955  *
2956  * The casefold file attribute needs some special checks. I can just be added to
2957  * an empty dir, and can't be removed from a non-empty dir.
2958  */
shmem_inode_casefold_flags(struct inode * inode,unsigned int fsflags,struct dentry * dentry,unsigned int * i_flags)2959 static int shmem_inode_casefold_flags(struct inode *inode, unsigned int fsflags,
2960 				      struct dentry *dentry, unsigned int *i_flags)
2961 {
2962 	unsigned int old = inode->i_flags;
2963 	struct super_block *sb = inode->i_sb;
2964 
2965 	if (fsflags & FS_CASEFOLD_FL) {
2966 		if (!(old & S_CASEFOLD)) {
2967 			if (!sb->s_encoding)
2968 				return -EOPNOTSUPP;
2969 
2970 			if (!S_ISDIR(inode->i_mode))
2971 				return -ENOTDIR;
2972 
2973 			if (dentry && !simple_empty(dentry))
2974 				return -ENOTEMPTY;
2975 		}
2976 
2977 		*i_flags = *i_flags | S_CASEFOLD;
2978 	} else if (old & S_CASEFOLD) {
2979 		if (dentry && !simple_empty(dentry))
2980 			return -ENOTEMPTY;
2981 	}
2982 
2983 	return 0;
2984 }
2985 #else
shmem_inode_casefold_flags(struct inode * inode,unsigned int fsflags,struct dentry * dentry,unsigned int * i_flags)2986 static int shmem_inode_casefold_flags(struct inode *inode, unsigned int fsflags,
2987 				      struct dentry *dentry, unsigned int *i_flags)
2988 {
2989 	if (fsflags & FS_CASEFOLD_FL)
2990 		return -EOPNOTSUPP;
2991 
2992 	return 0;
2993 }
2994 #endif
2995 
2996 /*
2997  * chattr's fsflags are unrelated to extended attributes,
2998  * but tmpfs has chosen to enable them under the same config option.
2999  */
shmem_set_inode_flags(struct inode * inode,unsigned int fsflags,struct dentry * dentry)3000 static int shmem_set_inode_flags(struct inode *inode, unsigned int fsflags, struct dentry *dentry)
3001 {
3002 	unsigned int i_flags = 0;
3003 	int ret;
3004 
3005 	ret = shmem_inode_casefold_flags(inode, fsflags, dentry, &i_flags);
3006 	if (ret)
3007 		return ret;
3008 
3009 	if (fsflags & FS_NOATIME_FL)
3010 		i_flags |= S_NOATIME;
3011 	if (fsflags & FS_APPEND_FL)
3012 		i_flags |= S_APPEND;
3013 	if (fsflags & FS_IMMUTABLE_FL)
3014 		i_flags |= S_IMMUTABLE;
3015 	/*
3016 	 * But FS_NODUMP_FL does not require any action in i_flags.
3017 	 */
3018 	inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE | S_CASEFOLD);
3019 
3020 	return 0;
3021 }
3022 #else
shmem_set_inode_flags(struct inode * inode,unsigned int fsflags,struct dentry * dentry)3023 static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags, struct dentry *dentry)
3024 {
3025 }
3026 #define shmem_initxattrs NULL
3027 #endif
3028 
shmem_get_offset_ctx(struct inode * inode)3029 static struct offset_ctx *shmem_get_offset_ctx(struct inode *inode)
3030 {
3031 	return &SHMEM_I(inode)->dir_offsets;
3032 }
3033 
__shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,unsigned long flags)3034 static struct inode *__shmem_get_inode(struct mnt_idmap *idmap,
3035 					     struct super_block *sb,
3036 					     struct inode *dir, umode_t mode,
3037 					     dev_t dev, unsigned long flags)
3038 {
3039 	struct inode *inode;
3040 	struct shmem_inode_info *info;
3041 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3042 	ino_t ino;
3043 	int err;
3044 
3045 	err = shmem_reserve_inode(sb, &ino);
3046 	if (err)
3047 		return ERR_PTR(err);
3048 
3049 	inode = new_inode(sb);
3050 	if (!inode) {
3051 		shmem_free_inode(sb, 0);
3052 		return ERR_PTR(-ENOSPC);
3053 	}
3054 
3055 	inode->i_ino = ino;
3056 	inode_init_owner(idmap, inode, dir, mode);
3057 	inode->i_blocks = 0;
3058 	simple_inode_init_ts(inode);
3059 	inode->i_generation = get_random_u32();
3060 	info = SHMEM_I(inode);
3061 	memset(info, 0, (char *)inode - (char *)info);
3062 	spin_lock_init(&info->lock);
3063 	atomic_set(&info->stop_eviction, 0);
3064 	info->seals = F_SEAL_SEAL;
3065 	info->flags = flags & VM_NORESERVE;
3066 	info->i_crtime = inode_get_mtime(inode);
3067 	info->fsflags = (dir == NULL) ? 0 :
3068 		SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
3069 	if (info->fsflags)
3070 		shmem_set_inode_flags(inode, info->fsflags, NULL);
3071 	INIT_LIST_HEAD(&info->shrinklist);
3072 	INIT_LIST_HEAD(&info->swaplist);
3073 	simple_xattrs_init(&info->xattrs);
3074 	cache_no_acl(inode);
3075 	if (sbinfo->noswap)
3076 		mapping_set_unevictable(inode->i_mapping);
3077 
3078 	/* Don't consider 'deny' for emergencies and 'force' for testing */
3079 	if (sbinfo->huge)
3080 		mapping_set_large_folios(inode->i_mapping);
3081 
3082 	switch (mode & S_IFMT) {
3083 	default:
3084 		inode->i_op = &shmem_special_inode_operations;
3085 		init_special_inode(inode, mode, dev);
3086 		break;
3087 	case S_IFREG:
3088 		inode->i_mapping->a_ops = &shmem_aops;
3089 		inode->i_op = &shmem_inode_operations;
3090 		inode->i_fop = &shmem_file_operations;
3091 		mpol_shared_policy_init(&info->policy,
3092 					 shmem_get_sbmpol(sbinfo));
3093 		break;
3094 	case S_IFDIR:
3095 		inc_nlink(inode);
3096 		/* Some things misbehave if size == 0 on a directory */
3097 		inode->i_size = 2 * BOGO_DIRENT_SIZE;
3098 		inode->i_op = &shmem_dir_inode_operations;
3099 		inode->i_fop = &simple_offset_dir_operations;
3100 		simple_offset_init(shmem_get_offset_ctx(inode));
3101 		break;
3102 	case S_IFLNK:
3103 		/*
3104 		 * Must not load anything in the rbtree,
3105 		 * mpol_free_shared_policy will not be called.
3106 		 */
3107 		mpol_shared_policy_init(&info->policy, NULL);
3108 		break;
3109 	}
3110 
3111 	lockdep_annotate_inode_mutex_key(inode);
3112 	return inode;
3113 }
3114 
3115 #ifdef CONFIG_TMPFS_QUOTA
shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,unsigned long flags)3116 static struct inode *shmem_get_inode(struct mnt_idmap *idmap,
3117 				     struct super_block *sb, struct inode *dir,
3118 				     umode_t mode, dev_t dev, unsigned long flags)
3119 {
3120 	int err;
3121 	struct inode *inode;
3122 
3123 	inode = __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
3124 	if (IS_ERR(inode))
3125 		return inode;
3126 
3127 	err = dquot_initialize(inode);
3128 	if (err)
3129 		goto errout;
3130 
3131 	err = dquot_alloc_inode(inode);
3132 	if (err) {
3133 		dquot_drop(inode);
3134 		goto errout;
3135 	}
3136 	return inode;
3137 
3138 errout:
3139 	inode->i_flags |= S_NOQUOTA;
3140 	iput(inode);
3141 	return ERR_PTR(err);
3142 }
3143 #else
shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,unsigned long flags)3144 static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
3145 				     struct super_block *sb, struct inode *dir,
3146 				     umode_t mode, dev_t dev, unsigned long flags)
3147 {
3148 	return __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
3149 }
3150 #endif /* CONFIG_TMPFS_QUOTA */
3151 
3152 #ifdef CONFIG_USERFAULTFD
shmem_mfill_atomic_pte(pmd_t * dst_pmd,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,uffd_flags_t flags,struct folio ** foliop)3153 int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
3154 			   struct vm_area_struct *dst_vma,
3155 			   unsigned long dst_addr,
3156 			   unsigned long src_addr,
3157 			   uffd_flags_t flags,
3158 			   struct folio **foliop)
3159 {
3160 	struct inode *inode = file_inode(dst_vma->vm_file);
3161 	struct shmem_inode_info *info = SHMEM_I(inode);
3162 	struct address_space *mapping = inode->i_mapping;
3163 	gfp_t gfp = mapping_gfp_mask(mapping);
3164 	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
3165 	void *page_kaddr;
3166 	struct folio *folio;
3167 	int ret;
3168 	pgoff_t max_off;
3169 
3170 	if (shmem_inode_acct_blocks(inode, 1)) {
3171 		/*
3172 		 * We may have got a page, returned -ENOENT triggering a retry,
3173 		 * and now we find ourselves with -ENOMEM. Release the page, to
3174 		 * avoid a BUG_ON in our caller.
3175 		 */
3176 		if (unlikely(*foliop)) {
3177 			folio_put(*foliop);
3178 			*foliop = NULL;
3179 		}
3180 		return -ENOMEM;
3181 	}
3182 
3183 	if (!*foliop) {
3184 		ret = -ENOMEM;
3185 		folio = shmem_alloc_folio(gfp, 0, info, pgoff);
3186 		if (!folio)
3187 			goto out_unacct_blocks;
3188 
3189 		if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
3190 			page_kaddr = kmap_local_folio(folio, 0);
3191 			/*
3192 			 * The read mmap_lock is held here.  Despite the
3193 			 * mmap_lock being read recursive a deadlock is still
3194 			 * possible if a writer has taken a lock.  For example:
3195 			 *
3196 			 * process A thread 1 takes read lock on own mmap_lock
3197 			 * process A thread 2 calls mmap, blocks taking write lock
3198 			 * process B thread 1 takes page fault, read lock on own mmap lock
3199 			 * process B thread 2 calls mmap, blocks taking write lock
3200 			 * process A thread 1 blocks taking read lock on process B
3201 			 * process B thread 1 blocks taking read lock on process A
3202 			 *
3203 			 * Disable page faults to prevent potential deadlock
3204 			 * and retry the copy outside the mmap_lock.
3205 			 */
3206 			pagefault_disable();
3207 			ret = copy_from_user(page_kaddr,
3208 					     (const void __user *)src_addr,
3209 					     PAGE_SIZE);
3210 			pagefault_enable();
3211 			kunmap_local(page_kaddr);
3212 
3213 			/* fallback to copy_from_user outside mmap_lock */
3214 			if (unlikely(ret)) {
3215 				*foliop = folio;
3216 				ret = -ENOENT;
3217 				/* don't free the page */
3218 				goto out_unacct_blocks;
3219 			}
3220 
3221 			flush_dcache_folio(folio);
3222 		} else {		/* ZEROPAGE */
3223 			clear_user_highpage(&folio->page, dst_addr);
3224 		}
3225 	} else {
3226 		folio = *foliop;
3227 		VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
3228 		*foliop = NULL;
3229 	}
3230 
3231 	VM_BUG_ON(folio_test_locked(folio));
3232 	VM_BUG_ON(folio_test_swapbacked(folio));
3233 	__folio_set_locked(folio);
3234 	__folio_set_swapbacked(folio);
3235 	__folio_mark_uptodate(folio);
3236 
3237 	ret = -EFAULT;
3238 	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3239 	if (unlikely(pgoff >= max_off))
3240 		goto out_release;
3241 
3242 	ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp);
3243 	if (ret)
3244 		goto out_release;
3245 	ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp);
3246 	if (ret)
3247 		goto out_release;
3248 
3249 	ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
3250 				       &folio->page, true, flags);
3251 	if (ret)
3252 		goto out_delete_from_cache;
3253 
3254 	shmem_recalc_inode(inode, 1, 0);
3255 	folio_unlock(folio);
3256 	return 0;
3257 out_delete_from_cache:
3258 	filemap_remove_folio(folio);
3259 out_release:
3260 	folio_unlock(folio);
3261 	folio_put(folio);
3262 out_unacct_blocks:
3263 	shmem_inode_unacct_blocks(inode, 1);
3264 	return ret;
3265 }
3266 #endif /* CONFIG_USERFAULTFD */
3267 
3268 #ifdef CONFIG_TMPFS
3269 static const struct inode_operations shmem_symlink_inode_operations;
3270 static const struct inode_operations shmem_short_symlink_operations;
3271 
3272 static int
shmem_write_begin(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata)3273 shmem_write_begin(const struct kiocb *iocb, struct address_space *mapping,
3274 		  loff_t pos, unsigned len,
3275 		  struct folio **foliop, void **fsdata)
3276 {
3277 	struct inode *inode = mapping->host;
3278 	struct shmem_inode_info *info = SHMEM_I(inode);
3279 	pgoff_t index = pos >> PAGE_SHIFT;
3280 	struct folio *folio;
3281 	int ret = 0;
3282 
3283 	/* i_rwsem is held by caller */
3284 	if (unlikely(info->seals & (F_SEAL_GROW |
3285 				   F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
3286 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
3287 			return -EPERM;
3288 		if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
3289 			return -EPERM;
3290 	}
3291 
3292 	ret = shmem_get_folio(inode, index, pos + len, &folio, SGP_WRITE);
3293 	if (ret)
3294 		return ret;
3295 
3296 	if (folio_contain_hwpoisoned_page(folio)) {
3297 		folio_unlock(folio);
3298 		folio_put(folio);
3299 		return -EIO;
3300 	}
3301 
3302 	*foliop = folio;
3303 	return 0;
3304 }
3305 
3306 static int
shmem_write_end(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)3307 shmem_write_end(const struct kiocb *iocb, struct address_space *mapping,
3308 		loff_t pos, unsigned len, unsigned copied,
3309 		struct folio *folio, void *fsdata)
3310 {
3311 	struct inode *inode = mapping->host;
3312 
3313 	if (pos + copied > inode->i_size)
3314 		i_size_write(inode, pos + copied);
3315 
3316 	if (!folio_test_uptodate(folio)) {
3317 		if (copied < folio_size(folio)) {
3318 			size_t from = offset_in_folio(folio, pos);
3319 			folio_zero_segments(folio, 0, from,
3320 					from + copied, folio_size(folio));
3321 		}
3322 		folio_mark_uptodate(folio);
3323 	}
3324 	folio_mark_dirty(folio);
3325 	folio_unlock(folio);
3326 	folio_put(folio);
3327 
3328 	return copied;
3329 }
3330 
shmem_file_read_iter(struct kiocb * iocb,struct iov_iter * to)3331 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
3332 {
3333 	struct file *file = iocb->ki_filp;
3334 	struct inode *inode = file_inode(file);
3335 	struct address_space *mapping = inode->i_mapping;
3336 	pgoff_t index;
3337 	unsigned long offset;
3338 	int error = 0;
3339 	ssize_t retval = 0;
3340 
3341 	for (;;) {
3342 		struct folio *folio = NULL;
3343 		struct page *page = NULL;
3344 		unsigned long nr, ret;
3345 		loff_t end_offset, i_size = i_size_read(inode);
3346 		bool fallback_page_copy = false;
3347 		size_t fsize;
3348 
3349 		if (unlikely(iocb->ki_pos >= i_size))
3350 			break;
3351 
3352 		index = iocb->ki_pos >> PAGE_SHIFT;
3353 		error = shmem_get_folio(inode, index, 0, &folio, SGP_READ);
3354 		if (error) {
3355 			if (error == -EINVAL)
3356 				error = 0;
3357 			break;
3358 		}
3359 		if (folio) {
3360 			folio_unlock(folio);
3361 
3362 			page = folio_file_page(folio, index);
3363 			if (PageHWPoison(page)) {
3364 				folio_put(folio);
3365 				error = -EIO;
3366 				break;
3367 			}
3368 
3369 			if (folio_test_large(folio) &&
3370 			    folio_test_has_hwpoisoned(folio))
3371 				fallback_page_copy = true;
3372 		}
3373 
3374 		/*
3375 		 * We must evaluate after, since reads (unlike writes)
3376 		 * are called without i_rwsem protection against truncate
3377 		 */
3378 		i_size = i_size_read(inode);
3379 		if (unlikely(iocb->ki_pos >= i_size)) {
3380 			if (folio)
3381 				folio_put(folio);
3382 			break;
3383 		}
3384 		end_offset = min_t(loff_t, i_size, iocb->ki_pos + to->count);
3385 		if (folio && likely(!fallback_page_copy))
3386 			fsize = folio_size(folio);
3387 		else
3388 			fsize = PAGE_SIZE;
3389 		offset = iocb->ki_pos & (fsize - 1);
3390 		nr = min_t(loff_t, end_offset - iocb->ki_pos, fsize - offset);
3391 
3392 		if (folio) {
3393 			/*
3394 			 * If users can be writing to this page using arbitrary
3395 			 * virtual addresses, take care about potential aliasing
3396 			 * before reading the page on the kernel side.
3397 			 */
3398 			if (mapping_writably_mapped(mapping)) {
3399 				if (likely(!fallback_page_copy))
3400 					flush_dcache_folio(folio);
3401 				else
3402 					flush_dcache_page(page);
3403 			}
3404 
3405 			/*
3406 			 * Mark the folio accessed if we read the beginning.
3407 			 */
3408 			if (!offset)
3409 				folio_mark_accessed(folio);
3410 			/*
3411 			 * Ok, we have the page, and it's up-to-date, so
3412 			 * now we can copy it to user space...
3413 			 */
3414 			if (likely(!fallback_page_copy))
3415 				ret = copy_folio_to_iter(folio, offset, nr, to);
3416 			else
3417 				ret = copy_page_to_iter(page, offset, nr, to);
3418 			folio_put(folio);
3419 		} else if (user_backed_iter(to)) {
3420 			/*
3421 			 * Copy to user tends to be so well optimized, but
3422 			 * clear_user() not so much, that it is noticeably
3423 			 * faster to copy the zero page instead of clearing.
3424 			 */
3425 			ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to);
3426 		} else {
3427 			/*
3428 			 * But submitting the same page twice in a row to
3429 			 * splice() - or others? - can result in confusion:
3430 			 * so don't attempt that optimization on pipes etc.
3431 			 */
3432 			ret = iov_iter_zero(nr, to);
3433 		}
3434 
3435 		retval += ret;
3436 		iocb->ki_pos += ret;
3437 
3438 		if (!iov_iter_count(to))
3439 			break;
3440 		if (ret < nr) {
3441 			error = -EFAULT;
3442 			break;
3443 		}
3444 		cond_resched();
3445 	}
3446 
3447 	file_accessed(file);
3448 	return retval ? retval : error;
3449 }
3450 
shmem_file_write_iter(struct kiocb * iocb,struct iov_iter * from)3451 static ssize_t shmem_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3452 {
3453 	struct file *file = iocb->ki_filp;
3454 	struct inode *inode = file->f_mapping->host;
3455 	ssize_t ret;
3456 
3457 	inode_lock(inode);
3458 	ret = generic_write_checks(iocb, from);
3459 	if (ret <= 0)
3460 		goto unlock;
3461 	ret = file_remove_privs(file);
3462 	if (ret)
3463 		goto unlock;
3464 	ret = file_update_time(file);
3465 	if (ret)
3466 		goto unlock;
3467 	ret = generic_perform_write(iocb, from);
3468 unlock:
3469 	inode_unlock(inode);
3470 	return ret;
3471 }
3472 
zero_pipe_buf_get(struct pipe_inode_info * pipe,struct pipe_buffer * buf)3473 static bool zero_pipe_buf_get(struct pipe_inode_info *pipe,
3474 			      struct pipe_buffer *buf)
3475 {
3476 	return true;
3477 }
3478 
zero_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)3479 static void zero_pipe_buf_release(struct pipe_inode_info *pipe,
3480 				  struct pipe_buffer *buf)
3481 {
3482 }
3483 
zero_pipe_buf_try_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)3484 static bool zero_pipe_buf_try_steal(struct pipe_inode_info *pipe,
3485 				    struct pipe_buffer *buf)
3486 {
3487 	return false;
3488 }
3489 
3490 static const struct pipe_buf_operations zero_pipe_buf_ops = {
3491 	.release	= zero_pipe_buf_release,
3492 	.try_steal	= zero_pipe_buf_try_steal,
3493 	.get		= zero_pipe_buf_get,
3494 };
3495 
splice_zeropage_into_pipe(struct pipe_inode_info * pipe,loff_t fpos,size_t size)3496 static size_t splice_zeropage_into_pipe(struct pipe_inode_info *pipe,
3497 					loff_t fpos, size_t size)
3498 {
3499 	size_t offset = fpos & ~PAGE_MASK;
3500 
3501 	size = min_t(size_t, size, PAGE_SIZE - offset);
3502 
3503 	if (!pipe_is_full(pipe)) {
3504 		struct pipe_buffer *buf = pipe_head_buf(pipe);
3505 
3506 		*buf = (struct pipe_buffer) {
3507 			.ops	= &zero_pipe_buf_ops,
3508 			.page	= ZERO_PAGE(0),
3509 			.offset	= offset,
3510 			.len	= size,
3511 		};
3512 		pipe->head++;
3513 	}
3514 
3515 	return size;
3516 }
3517 
shmem_file_splice_read(struct file * in,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)3518 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
3519 				      struct pipe_inode_info *pipe,
3520 				      size_t len, unsigned int flags)
3521 {
3522 	struct inode *inode = file_inode(in);
3523 	struct address_space *mapping = inode->i_mapping;
3524 	struct folio *folio = NULL;
3525 	size_t total_spliced = 0, used, npages, n, part;
3526 	loff_t isize;
3527 	int error = 0;
3528 
3529 	/* Work out how much data we can actually add into the pipe */
3530 	used = pipe_buf_usage(pipe);
3531 	npages = max_t(ssize_t, pipe->max_usage - used, 0);
3532 	len = min_t(size_t, len, npages * PAGE_SIZE);
3533 
3534 	do {
3535 		bool fallback_page_splice = false;
3536 		struct page *page = NULL;
3537 		pgoff_t index;
3538 		size_t size;
3539 
3540 		if (*ppos >= i_size_read(inode))
3541 			break;
3542 
3543 		index = *ppos >> PAGE_SHIFT;
3544 		error = shmem_get_folio(inode, index, 0, &folio, SGP_READ);
3545 		if (error) {
3546 			if (error == -EINVAL)
3547 				error = 0;
3548 			break;
3549 		}
3550 		if (folio) {
3551 			folio_unlock(folio);
3552 
3553 			page = folio_file_page(folio, index);
3554 			if (PageHWPoison(page)) {
3555 				error = -EIO;
3556 				break;
3557 			}
3558 
3559 			if (folio_test_large(folio) &&
3560 			    folio_test_has_hwpoisoned(folio))
3561 				fallback_page_splice = true;
3562 		}
3563 
3564 		/*
3565 		 * i_size must be checked after we know the pages are Uptodate.
3566 		 *
3567 		 * Checking i_size after the check allows us to calculate
3568 		 * the correct value for "nr", which means the zero-filled
3569 		 * part of the page is not copied back to userspace (unless
3570 		 * another truncate extends the file - this is desired though).
3571 		 */
3572 		isize = i_size_read(inode);
3573 		if (unlikely(*ppos >= isize))
3574 			break;
3575 		/*
3576 		 * Fallback to PAGE_SIZE splice if the large folio has hwpoisoned
3577 		 * pages.
3578 		 */
3579 		size = len;
3580 		if (unlikely(fallback_page_splice)) {
3581 			size_t offset = *ppos & ~PAGE_MASK;
3582 
3583 			size = umin(size, PAGE_SIZE - offset);
3584 		}
3585 		part = min_t(loff_t, isize - *ppos, size);
3586 
3587 		if (folio) {
3588 			/*
3589 			 * If users can be writing to this page using arbitrary
3590 			 * virtual addresses, take care about potential aliasing
3591 			 * before reading the page on the kernel side.
3592 			 */
3593 			if (mapping_writably_mapped(mapping)) {
3594 				if (likely(!fallback_page_splice))
3595 					flush_dcache_folio(folio);
3596 				else
3597 					flush_dcache_page(page);
3598 			}
3599 			folio_mark_accessed(folio);
3600 			/*
3601 			 * Ok, we have the page, and it's up-to-date, so we can
3602 			 * now splice it into the pipe.
3603 			 */
3604 			n = splice_folio_into_pipe(pipe, folio, *ppos, part);
3605 			folio_put(folio);
3606 			folio = NULL;
3607 		} else {
3608 			n = splice_zeropage_into_pipe(pipe, *ppos, part);
3609 		}
3610 
3611 		if (!n)
3612 			break;
3613 		len -= n;
3614 		total_spliced += n;
3615 		*ppos += n;
3616 		in->f_ra.prev_pos = *ppos;
3617 		if (pipe_is_full(pipe))
3618 			break;
3619 
3620 		cond_resched();
3621 	} while (len);
3622 
3623 	if (folio)
3624 		folio_put(folio);
3625 
3626 	file_accessed(in);
3627 	return total_spliced ? total_spliced : error;
3628 }
3629 
shmem_file_llseek(struct file * file,loff_t offset,int whence)3630 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
3631 {
3632 	struct address_space *mapping = file->f_mapping;
3633 	struct inode *inode = mapping->host;
3634 
3635 	if (whence != SEEK_DATA && whence != SEEK_HOLE)
3636 		return generic_file_llseek_size(file, offset, whence,
3637 					MAX_LFS_FILESIZE, i_size_read(inode));
3638 	if (offset < 0)
3639 		return -ENXIO;
3640 
3641 	inode_lock(inode);
3642 	/* We're holding i_rwsem so we can access i_size directly */
3643 	offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
3644 	if (offset >= 0)
3645 		offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
3646 	inode_unlock(inode);
3647 	return offset;
3648 }
3649 
shmem_fallocate(struct file * file,int mode,loff_t offset,loff_t len)3650 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
3651 							 loff_t len)
3652 {
3653 	struct inode *inode = file_inode(file);
3654 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3655 	struct shmem_inode_info *info = SHMEM_I(inode);
3656 	struct shmem_falloc shmem_falloc;
3657 	pgoff_t start, index, end, undo_fallocend;
3658 	int error;
3659 
3660 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
3661 		return -EOPNOTSUPP;
3662 
3663 	inode_lock(inode);
3664 
3665 	if (mode & FALLOC_FL_PUNCH_HOLE) {
3666 		struct address_space *mapping = file->f_mapping;
3667 		loff_t unmap_start = round_up(offset, PAGE_SIZE);
3668 		loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
3669 		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
3670 
3671 		/* protected by i_rwsem */
3672 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
3673 			error = -EPERM;
3674 			goto out;
3675 		}
3676 
3677 		shmem_falloc.waitq = &shmem_falloc_waitq;
3678 		shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
3679 		shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
3680 		spin_lock(&inode->i_lock);
3681 		inode->i_private = &shmem_falloc;
3682 		spin_unlock(&inode->i_lock);
3683 
3684 		if ((u64)unmap_end > (u64)unmap_start)
3685 			unmap_mapping_range(mapping, unmap_start,
3686 					    1 + unmap_end - unmap_start, 0);
3687 		shmem_truncate_range(inode, offset, offset + len - 1);
3688 		/* No need to unmap again: hole-punching leaves COWed pages */
3689 
3690 		spin_lock(&inode->i_lock);
3691 		inode->i_private = NULL;
3692 		wake_up_all(&shmem_falloc_waitq);
3693 		WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
3694 		spin_unlock(&inode->i_lock);
3695 		error = 0;
3696 		goto out;
3697 	}
3698 
3699 	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
3700 	error = inode_newsize_ok(inode, offset + len);
3701 	if (error)
3702 		goto out;
3703 
3704 	if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
3705 		error = -EPERM;
3706 		goto out;
3707 	}
3708 
3709 	start = offset >> PAGE_SHIFT;
3710 	end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
3711 	/* Try to avoid a swapstorm if len is impossible to satisfy */
3712 	if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
3713 		error = -ENOSPC;
3714 		goto out;
3715 	}
3716 
3717 	shmem_falloc.waitq = NULL;
3718 	shmem_falloc.start = start;
3719 	shmem_falloc.next  = start;
3720 	shmem_falloc.nr_falloced = 0;
3721 	shmem_falloc.nr_unswapped = 0;
3722 	spin_lock(&inode->i_lock);
3723 	inode->i_private = &shmem_falloc;
3724 	spin_unlock(&inode->i_lock);
3725 
3726 	/*
3727 	 * info->fallocend is only relevant when huge pages might be
3728 	 * involved: to prevent split_huge_page() freeing fallocated
3729 	 * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size.
3730 	 */
3731 	undo_fallocend = info->fallocend;
3732 	if (info->fallocend < end)
3733 		info->fallocend = end;
3734 
3735 	for (index = start; index < end; ) {
3736 		struct folio *folio;
3737 
3738 		/*
3739 		 * Check for fatal signal so that we abort early in OOM
3740 		 * situations. We don't want to abort in case of non-fatal
3741 		 * signals as large fallocate can take noticeable time and
3742 		 * e.g. periodic timers may result in fallocate constantly
3743 		 * restarting.
3744 		 */
3745 		if (fatal_signal_pending(current))
3746 			error = -EINTR;
3747 		else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
3748 			error = -ENOMEM;
3749 		else
3750 			error = shmem_get_folio(inode, index, offset + len,
3751 						&folio, SGP_FALLOC);
3752 		if (error) {
3753 			info->fallocend = undo_fallocend;
3754 			/* Remove the !uptodate folios we added */
3755 			if (index > start) {
3756 				shmem_undo_range(inode,
3757 				    (loff_t)start << PAGE_SHIFT,
3758 				    ((loff_t)index << PAGE_SHIFT) - 1, true);
3759 			}
3760 			goto undone;
3761 		}
3762 
3763 		/*
3764 		 * Here is a more important optimization than it appears:
3765 		 * a second SGP_FALLOC on the same large folio will clear it,
3766 		 * making it uptodate and un-undoable if we fail later.
3767 		 */
3768 		index = folio_next_index(folio);
3769 		/* Beware 32-bit wraparound */
3770 		if (!index)
3771 			index--;
3772 
3773 		/*
3774 		 * Inform shmem_writeout() how far we have reached.
3775 		 * No need for lock or barrier: we have the page lock.
3776 		 */
3777 		if (!folio_test_uptodate(folio))
3778 			shmem_falloc.nr_falloced += index - shmem_falloc.next;
3779 		shmem_falloc.next = index;
3780 
3781 		/*
3782 		 * If !uptodate, leave it that way so that freeable folios
3783 		 * can be recognized if we need to rollback on error later.
3784 		 * But mark it dirty so that memory pressure will swap rather
3785 		 * than free the folios we are allocating (and SGP_CACHE folios
3786 		 * might still be clean: we now need to mark those dirty too).
3787 		 */
3788 		folio_mark_dirty(folio);
3789 		folio_unlock(folio);
3790 		folio_put(folio);
3791 		cond_resched();
3792 	}
3793 
3794 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
3795 		i_size_write(inode, offset + len);
3796 undone:
3797 	spin_lock(&inode->i_lock);
3798 	inode->i_private = NULL;
3799 	spin_unlock(&inode->i_lock);
3800 out:
3801 	if (!error)
3802 		file_modified(file);
3803 	inode_unlock(inode);
3804 	return error;
3805 }
3806 
shmem_statfs(struct dentry * dentry,struct kstatfs * buf)3807 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
3808 {
3809 	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
3810 
3811 	buf->f_type = TMPFS_MAGIC;
3812 	buf->f_bsize = PAGE_SIZE;
3813 	buf->f_namelen = NAME_MAX;
3814 	if (sbinfo->max_blocks) {
3815 		buf->f_blocks = sbinfo->max_blocks;
3816 		buf->f_bavail =
3817 		buf->f_bfree  = sbinfo->max_blocks -
3818 				percpu_counter_sum(&sbinfo->used_blocks);
3819 	}
3820 	if (sbinfo->max_inodes) {
3821 		buf->f_files = sbinfo->max_inodes;
3822 		buf->f_ffree = sbinfo->free_ispace / BOGO_INODE_SIZE;
3823 	}
3824 	/* else leave those fields 0 like simple_statfs */
3825 
3826 	buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
3827 
3828 	return 0;
3829 }
3830 
3831 /*
3832  * File creation. Allocate an inode, and we're done..
3833  */
3834 static int
shmem_mknod(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode,dev_t dev)3835 shmem_mknod(struct mnt_idmap *idmap, struct inode *dir,
3836 	    struct dentry *dentry, umode_t mode, dev_t dev)
3837 {
3838 	struct inode *inode;
3839 	int error;
3840 
3841 	if (!generic_ci_validate_strict_name(dir, &dentry->d_name))
3842 		return -EINVAL;
3843 
3844 	inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, dev, VM_NORESERVE);
3845 	if (IS_ERR(inode))
3846 		return PTR_ERR(inode);
3847 
3848 	error = simple_acl_create(dir, inode);
3849 	if (error)
3850 		goto out_iput;
3851 	error = security_inode_init_security(inode, dir, &dentry->d_name,
3852 					     shmem_initxattrs, NULL);
3853 	if (error && error != -EOPNOTSUPP)
3854 		goto out_iput;
3855 
3856 	error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3857 	if (error)
3858 		goto out_iput;
3859 
3860 	dir->i_size += BOGO_DIRENT_SIZE;
3861 	inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
3862 	inode_inc_iversion(dir);
3863 
3864 	if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
3865 		d_add(dentry, inode);
3866 	else
3867 		d_instantiate(dentry, inode);
3868 
3869 	dget(dentry); /* Extra count - pin the dentry in core */
3870 	return error;
3871 
3872 out_iput:
3873 	iput(inode);
3874 	return error;
3875 }
3876 
3877 static int
shmem_tmpfile(struct mnt_idmap * idmap,struct inode * dir,struct file * file,umode_t mode)3878 shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
3879 	      struct file *file, umode_t mode)
3880 {
3881 	struct inode *inode;
3882 	int error;
3883 
3884 	inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE);
3885 	if (IS_ERR(inode)) {
3886 		error = PTR_ERR(inode);
3887 		goto err_out;
3888 	}
3889 	error = security_inode_init_security(inode, dir, NULL,
3890 					     shmem_initxattrs, NULL);
3891 	if (error && error != -EOPNOTSUPP)
3892 		goto out_iput;
3893 	error = simple_acl_create(dir, inode);
3894 	if (error)
3895 		goto out_iput;
3896 	d_tmpfile(file, inode);
3897 
3898 err_out:
3899 	return finish_open_simple(file, error);
3900 out_iput:
3901 	iput(inode);
3902 	return error;
3903 }
3904 
shmem_mkdir(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode)3905 static struct dentry *shmem_mkdir(struct mnt_idmap *idmap, struct inode *dir,
3906 				  struct dentry *dentry, umode_t mode)
3907 {
3908 	int error;
3909 
3910 	error = shmem_mknod(idmap, dir, dentry, mode | S_IFDIR, 0);
3911 	if (error)
3912 		return ERR_PTR(error);
3913 	inc_nlink(dir);
3914 	return NULL;
3915 }
3916 
shmem_create(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode,bool excl)3917 static int shmem_create(struct mnt_idmap *idmap, struct inode *dir,
3918 			struct dentry *dentry, umode_t mode, bool excl)
3919 {
3920 	return shmem_mknod(idmap, dir, dentry, mode | S_IFREG, 0);
3921 }
3922 
3923 /*
3924  * Link a file..
3925  */
shmem_link(struct dentry * old_dentry,struct inode * dir,struct dentry * dentry)3926 static int shmem_link(struct dentry *old_dentry, struct inode *dir,
3927 		      struct dentry *dentry)
3928 {
3929 	struct inode *inode = d_inode(old_dentry);
3930 	int ret = 0;
3931 
3932 	/*
3933 	 * No ordinary (disk based) filesystem counts links as inodes;
3934 	 * but each new link needs a new dentry, pinning lowmem, and
3935 	 * tmpfs dentries cannot be pruned until they are unlinked.
3936 	 * But if an O_TMPFILE file is linked into the tmpfs, the
3937 	 * first link must skip that, to get the accounting right.
3938 	 */
3939 	if (inode->i_nlink) {
3940 		ret = shmem_reserve_inode(inode->i_sb, NULL);
3941 		if (ret)
3942 			goto out;
3943 	}
3944 
3945 	ret = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
3946 	if (ret) {
3947 		if (inode->i_nlink)
3948 			shmem_free_inode(inode->i_sb, 0);
3949 		goto out;
3950 	}
3951 
3952 	dir->i_size += BOGO_DIRENT_SIZE;
3953 	inode_set_mtime_to_ts(dir,
3954 			      inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
3955 	inode_inc_iversion(dir);
3956 	inc_nlink(inode);
3957 	ihold(inode);	/* New dentry reference */
3958 	dget(dentry);	/* Extra pinning count for the created dentry */
3959 	if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
3960 		d_add(dentry, inode);
3961 	else
3962 		d_instantiate(dentry, inode);
3963 out:
3964 	return ret;
3965 }
3966 
shmem_unlink(struct inode * dir,struct dentry * dentry)3967 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
3968 {
3969 	struct inode *inode = d_inode(dentry);
3970 
3971 	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
3972 		shmem_free_inode(inode->i_sb, 0);
3973 
3974 	simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
3975 
3976 	dir->i_size -= BOGO_DIRENT_SIZE;
3977 	inode_set_mtime_to_ts(dir,
3978 			      inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
3979 	inode_inc_iversion(dir);
3980 	drop_nlink(inode);
3981 	dput(dentry);	/* Undo the count from "create" - does all the work */
3982 
3983 	/*
3984 	 * For now, VFS can't deal with case-insensitive negative dentries, so
3985 	 * we invalidate them
3986 	 */
3987 	if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
3988 		d_invalidate(dentry);
3989 
3990 	return 0;
3991 }
3992 
shmem_rmdir(struct inode * dir,struct dentry * dentry)3993 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
3994 {
3995 	if (!simple_empty(dentry))
3996 		return -ENOTEMPTY;
3997 
3998 	drop_nlink(d_inode(dentry));
3999 	drop_nlink(dir);
4000 	return shmem_unlink(dir, dentry);
4001 }
4002 
shmem_whiteout(struct mnt_idmap * idmap,struct inode * old_dir,struct dentry * old_dentry)4003 static int shmem_whiteout(struct mnt_idmap *idmap,
4004 			  struct inode *old_dir, struct dentry *old_dentry)
4005 {
4006 	struct dentry *whiteout;
4007 	int error;
4008 
4009 	whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
4010 	if (!whiteout)
4011 		return -ENOMEM;
4012 
4013 	error = shmem_mknod(idmap, old_dir, whiteout,
4014 			    S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
4015 	dput(whiteout);
4016 	if (error)
4017 		return error;
4018 
4019 	/*
4020 	 * Cheat and hash the whiteout while the old dentry is still in
4021 	 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
4022 	 *
4023 	 * d_lookup() will consistently find one of them at this point,
4024 	 * not sure which one, but that isn't even important.
4025 	 */
4026 	d_rehash(whiteout);
4027 	return 0;
4028 }
4029 
4030 /*
4031  * The VFS layer already does all the dentry stuff for rename,
4032  * we just have to decrement the usage count for the target if
4033  * it exists so that the VFS layer correctly free's it when it
4034  * gets overwritten.
4035  */
shmem_rename2(struct mnt_idmap * idmap,struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry,unsigned int flags)4036 static int shmem_rename2(struct mnt_idmap *idmap,
4037 			 struct inode *old_dir, struct dentry *old_dentry,
4038 			 struct inode *new_dir, struct dentry *new_dentry,
4039 			 unsigned int flags)
4040 {
4041 	struct inode *inode = d_inode(old_dentry);
4042 	int they_are_dirs = S_ISDIR(inode->i_mode);
4043 	int error;
4044 
4045 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
4046 		return -EINVAL;
4047 
4048 	if (flags & RENAME_EXCHANGE)
4049 		return simple_offset_rename_exchange(old_dir, old_dentry,
4050 						     new_dir, new_dentry);
4051 
4052 	if (!simple_empty(new_dentry))
4053 		return -ENOTEMPTY;
4054 
4055 	if (flags & RENAME_WHITEOUT) {
4056 		error = shmem_whiteout(idmap, old_dir, old_dentry);
4057 		if (error)
4058 			return error;
4059 	}
4060 
4061 	error = simple_offset_rename(old_dir, old_dentry, new_dir, new_dentry);
4062 	if (error)
4063 		return error;
4064 
4065 	if (d_really_is_positive(new_dentry)) {
4066 		(void) shmem_unlink(new_dir, new_dentry);
4067 		if (they_are_dirs) {
4068 			drop_nlink(d_inode(new_dentry));
4069 			drop_nlink(old_dir);
4070 		}
4071 	} else if (they_are_dirs) {
4072 		drop_nlink(old_dir);
4073 		inc_nlink(new_dir);
4074 	}
4075 
4076 	old_dir->i_size -= BOGO_DIRENT_SIZE;
4077 	new_dir->i_size += BOGO_DIRENT_SIZE;
4078 	simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
4079 	inode_inc_iversion(old_dir);
4080 	inode_inc_iversion(new_dir);
4081 	return 0;
4082 }
4083 
shmem_symlink(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,const char * symname)4084 static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir,
4085 			 struct dentry *dentry, const char *symname)
4086 {
4087 	int error;
4088 	int len;
4089 	struct inode *inode;
4090 	struct folio *folio;
4091 	char *link;
4092 
4093 	len = strlen(symname) + 1;
4094 	if (len > PAGE_SIZE)
4095 		return -ENAMETOOLONG;
4096 
4097 	inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0,
4098 				VM_NORESERVE);
4099 	if (IS_ERR(inode))
4100 		return PTR_ERR(inode);
4101 
4102 	error = security_inode_init_security(inode, dir, &dentry->d_name,
4103 					     shmem_initxattrs, NULL);
4104 	if (error && error != -EOPNOTSUPP)
4105 		goto out_iput;
4106 
4107 	error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
4108 	if (error)
4109 		goto out_iput;
4110 
4111 	inode->i_size = len-1;
4112 	if (len <= SHORT_SYMLINK_LEN) {
4113 		link = kmemdup(symname, len, GFP_KERNEL);
4114 		if (!link) {
4115 			error = -ENOMEM;
4116 			goto out_remove_offset;
4117 		}
4118 		inode->i_op = &shmem_short_symlink_operations;
4119 		inode_set_cached_link(inode, link, len - 1);
4120 	} else {
4121 		inode_nohighmem(inode);
4122 		inode->i_mapping->a_ops = &shmem_aops;
4123 		error = shmem_get_folio(inode, 0, 0, &folio, SGP_WRITE);
4124 		if (error)
4125 			goto out_remove_offset;
4126 		inode->i_op = &shmem_symlink_inode_operations;
4127 		memcpy(folio_address(folio), symname, len);
4128 		folio_mark_uptodate(folio);
4129 		folio_mark_dirty(folio);
4130 		folio_unlock(folio);
4131 		folio_put(folio);
4132 	}
4133 	dir->i_size += BOGO_DIRENT_SIZE;
4134 	inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
4135 	inode_inc_iversion(dir);
4136 	if (IS_ENABLED(CONFIG_UNICODE) && IS_CASEFOLDED(dir))
4137 		d_add(dentry, inode);
4138 	else
4139 		d_instantiate(dentry, inode);
4140 	dget(dentry);
4141 	return 0;
4142 
4143 out_remove_offset:
4144 	simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
4145 out_iput:
4146 	iput(inode);
4147 	return error;
4148 }
4149 
shmem_put_link(void * arg)4150 static void shmem_put_link(void *arg)
4151 {
4152 	folio_mark_accessed(arg);
4153 	folio_put(arg);
4154 }
4155 
shmem_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)4156 static const char *shmem_get_link(struct dentry *dentry, struct inode *inode,
4157 				  struct delayed_call *done)
4158 {
4159 	struct folio *folio = NULL;
4160 	int error;
4161 
4162 	if (!dentry) {
4163 		folio = filemap_get_folio(inode->i_mapping, 0);
4164 		if (IS_ERR(folio))
4165 			return ERR_PTR(-ECHILD);
4166 		if (PageHWPoison(folio_page(folio, 0)) ||
4167 		    !folio_test_uptodate(folio)) {
4168 			folio_put(folio);
4169 			return ERR_PTR(-ECHILD);
4170 		}
4171 	} else {
4172 		error = shmem_get_folio(inode, 0, 0, &folio, SGP_READ);
4173 		if (error)
4174 			return ERR_PTR(error);
4175 		if (!folio)
4176 			return ERR_PTR(-ECHILD);
4177 		if (PageHWPoison(folio_page(folio, 0))) {
4178 			folio_unlock(folio);
4179 			folio_put(folio);
4180 			return ERR_PTR(-ECHILD);
4181 		}
4182 		folio_unlock(folio);
4183 	}
4184 	set_delayed_call(done, shmem_put_link, folio);
4185 	return folio_address(folio);
4186 }
4187 
4188 #ifdef CONFIG_TMPFS_XATTR
4189 
shmem_fileattr_get(struct dentry * dentry,struct file_kattr * fa)4190 static int shmem_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
4191 {
4192 	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
4193 
4194 	fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE);
4195 
4196 	return 0;
4197 }
4198 
shmem_fileattr_set(struct mnt_idmap * idmap,struct dentry * dentry,struct file_kattr * fa)4199 static int shmem_fileattr_set(struct mnt_idmap *idmap,
4200 			      struct dentry *dentry, struct file_kattr *fa)
4201 {
4202 	struct inode *inode = d_inode(dentry);
4203 	struct shmem_inode_info *info = SHMEM_I(inode);
4204 	int ret, flags;
4205 
4206 	if (fileattr_has_fsx(fa))
4207 		return -EOPNOTSUPP;
4208 	if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE)
4209 		return -EOPNOTSUPP;
4210 
4211 	flags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) |
4212 		(fa->flags & SHMEM_FL_USER_MODIFIABLE);
4213 
4214 	ret = shmem_set_inode_flags(inode, flags, dentry);
4215 
4216 	if (ret)
4217 		return ret;
4218 
4219 	info->fsflags = flags;
4220 
4221 	inode_set_ctime_current(inode);
4222 	inode_inc_iversion(inode);
4223 	return 0;
4224 }
4225 
4226 /*
4227  * Superblocks without xattr inode operations may get some security.* xattr
4228  * support from the LSM "for free". As soon as we have any other xattrs
4229  * like ACLs, we also need to implement the security.* handlers at
4230  * filesystem level, though.
4231  */
4232 
4233 /*
4234  * Callback for security_inode_init_security() for acquiring xattrs.
4235  */
shmem_initxattrs(struct inode * inode,const struct xattr * xattr_array,void * fs_info)4236 static int shmem_initxattrs(struct inode *inode,
4237 			    const struct xattr *xattr_array, void *fs_info)
4238 {
4239 	struct shmem_inode_info *info = SHMEM_I(inode);
4240 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
4241 	const struct xattr *xattr;
4242 	struct simple_xattr *new_xattr;
4243 	size_t ispace = 0;
4244 	size_t len;
4245 
4246 	if (sbinfo->max_inodes) {
4247 		for (xattr = xattr_array; xattr->name != NULL; xattr++) {
4248 			ispace += simple_xattr_space(xattr->name,
4249 				xattr->value_len + XATTR_SECURITY_PREFIX_LEN);
4250 		}
4251 		if (ispace) {
4252 			raw_spin_lock(&sbinfo->stat_lock);
4253 			if (sbinfo->free_ispace < ispace)
4254 				ispace = 0;
4255 			else
4256 				sbinfo->free_ispace -= ispace;
4257 			raw_spin_unlock(&sbinfo->stat_lock);
4258 			if (!ispace)
4259 				return -ENOSPC;
4260 		}
4261 	}
4262 
4263 	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
4264 		new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
4265 		if (!new_xattr)
4266 			break;
4267 
4268 		len = strlen(xattr->name) + 1;
4269 		new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
4270 					  GFP_KERNEL_ACCOUNT);
4271 		if (!new_xattr->name) {
4272 			kvfree(new_xattr);
4273 			break;
4274 		}
4275 
4276 		memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
4277 		       XATTR_SECURITY_PREFIX_LEN);
4278 		memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
4279 		       xattr->name, len);
4280 
4281 		simple_xattr_add(&info->xattrs, new_xattr);
4282 	}
4283 
4284 	if (xattr->name != NULL) {
4285 		if (ispace) {
4286 			raw_spin_lock(&sbinfo->stat_lock);
4287 			sbinfo->free_ispace += ispace;
4288 			raw_spin_unlock(&sbinfo->stat_lock);
4289 		}
4290 		simple_xattrs_free(&info->xattrs, NULL);
4291 		return -ENOMEM;
4292 	}
4293 
4294 	return 0;
4295 }
4296 
shmem_xattr_handler_get(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * name,void * buffer,size_t size)4297 static int shmem_xattr_handler_get(const struct xattr_handler *handler,
4298 				   struct dentry *unused, struct inode *inode,
4299 				   const char *name, void *buffer, size_t size)
4300 {
4301 	struct shmem_inode_info *info = SHMEM_I(inode);
4302 
4303 	name = xattr_full_name(handler, name);
4304 	return simple_xattr_get(&info->xattrs, name, buffer, size);
4305 }
4306 
shmem_xattr_handler_set(const struct xattr_handler * handler,struct mnt_idmap * idmap,struct dentry * unused,struct inode * inode,const char * name,const void * value,size_t size,int flags)4307 static int shmem_xattr_handler_set(const struct xattr_handler *handler,
4308 				   struct mnt_idmap *idmap,
4309 				   struct dentry *unused, struct inode *inode,
4310 				   const char *name, const void *value,
4311 				   size_t size, int flags)
4312 {
4313 	struct shmem_inode_info *info = SHMEM_I(inode);
4314 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
4315 	struct simple_xattr *old_xattr;
4316 	size_t ispace = 0;
4317 
4318 	name = xattr_full_name(handler, name);
4319 	if (value && sbinfo->max_inodes) {
4320 		ispace = simple_xattr_space(name, size);
4321 		raw_spin_lock(&sbinfo->stat_lock);
4322 		if (sbinfo->free_ispace < ispace)
4323 			ispace = 0;
4324 		else
4325 			sbinfo->free_ispace -= ispace;
4326 		raw_spin_unlock(&sbinfo->stat_lock);
4327 		if (!ispace)
4328 			return -ENOSPC;
4329 	}
4330 
4331 	old_xattr = simple_xattr_set(&info->xattrs, name, value, size, flags);
4332 	if (!IS_ERR(old_xattr)) {
4333 		ispace = 0;
4334 		if (old_xattr && sbinfo->max_inodes)
4335 			ispace = simple_xattr_space(old_xattr->name,
4336 						    old_xattr->size);
4337 		simple_xattr_free(old_xattr);
4338 		old_xattr = NULL;
4339 		inode_set_ctime_current(inode);
4340 		inode_inc_iversion(inode);
4341 	}
4342 	if (ispace) {
4343 		raw_spin_lock(&sbinfo->stat_lock);
4344 		sbinfo->free_ispace += ispace;
4345 		raw_spin_unlock(&sbinfo->stat_lock);
4346 	}
4347 	return PTR_ERR(old_xattr);
4348 }
4349 
4350 static const struct xattr_handler shmem_security_xattr_handler = {
4351 	.prefix = XATTR_SECURITY_PREFIX,
4352 	.get = shmem_xattr_handler_get,
4353 	.set = shmem_xattr_handler_set,
4354 };
4355 
4356 static const struct xattr_handler shmem_trusted_xattr_handler = {
4357 	.prefix = XATTR_TRUSTED_PREFIX,
4358 	.get = shmem_xattr_handler_get,
4359 	.set = shmem_xattr_handler_set,
4360 };
4361 
4362 static const struct xattr_handler shmem_user_xattr_handler = {
4363 	.prefix = XATTR_USER_PREFIX,
4364 	.get = shmem_xattr_handler_get,
4365 	.set = shmem_xattr_handler_set,
4366 };
4367 
4368 static const struct xattr_handler * const shmem_xattr_handlers[] = {
4369 	&shmem_security_xattr_handler,
4370 	&shmem_trusted_xattr_handler,
4371 	&shmem_user_xattr_handler,
4372 	NULL
4373 };
4374 
shmem_listxattr(struct dentry * dentry,char * buffer,size_t size)4375 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
4376 {
4377 	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
4378 	return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
4379 }
4380 #endif /* CONFIG_TMPFS_XATTR */
4381 
4382 static const struct inode_operations shmem_short_symlink_operations = {
4383 	.getattr	= shmem_getattr,
4384 	.setattr	= shmem_setattr,
4385 	.get_link	= simple_get_link,
4386 #ifdef CONFIG_TMPFS_XATTR
4387 	.listxattr	= shmem_listxattr,
4388 #endif
4389 };
4390 
4391 static const struct inode_operations shmem_symlink_inode_operations = {
4392 	.getattr	= shmem_getattr,
4393 	.setattr	= shmem_setattr,
4394 	.get_link	= shmem_get_link,
4395 #ifdef CONFIG_TMPFS_XATTR
4396 	.listxattr	= shmem_listxattr,
4397 #endif
4398 };
4399 
shmem_get_parent(struct dentry * child)4400 static struct dentry *shmem_get_parent(struct dentry *child)
4401 {
4402 	return ERR_PTR(-ESTALE);
4403 }
4404 
shmem_match(struct inode * ino,void * vfh)4405 static int shmem_match(struct inode *ino, void *vfh)
4406 {
4407 	__u32 *fh = vfh;
4408 	__u64 inum = fh[2];
4409 	inum = (inum << 32) | fh[1];
4410 	return ino->i_ino == inum && fh[0] == ino->i_generation;
4411 }
4412 
4413 /* Find any alias of inode, but prefer a hashed alias */
shmem_find_alias(struct inode * inode)4414 static struct dentry *shmem_find_alias(struct inode *inode)
4415 {
4416 	struct dentry *alias = d_find_alias(inode);
4417 
4418 	return alias ?: d_find_any_alias(inode);
4419 }
4420 
shmem_fh_to_dentry(struct super_block * sb,struct fid * fid,int fh_len,int fh_type)4421 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
4422 		struct fid *fid, int fh_len, int fh_type)
4423 {
4424 	struct inode *inode;
4425 	struct dentry *dentry = NULL;
4426 	u64 inum;
4427 
4428 	if (fh_len < 3)
4429 		return NULL;
4430 
4431 	inum = fid->raw[2];
4432 	inum = (inum << 32) | fid->raw[1];
4433 
4434 	inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
4435 			shmem_match, fid->raw);
4436 	if (inode) {
4437 		dentry = shmem_find_alias(inode);
4438 		iput(inode);
4439 	}
4440 
4441 	return dentry;
4442 }
4443 
shmem_encode_fh(struct inode * inode,__u32 * fh,int * len,struct inode * parent)4444 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
4445 				struct inode *parent)
4446 {
4447 	if (*len < 3) {
4448 		*len = 3;
4449 		return FILEID_INVALID;
4450 	}
4451 
4452 	if (inode_unhashed(inode)) {
4453 		/* Unfortunately insert_inode_hash is not idempotent,
4454 		 * so as we hash inodes here rather than at creation
4455 		 * time, we need a lock to ensure we only try
4456 		 * to do it once
4457 		 */
4458 		static DEFINE_SPINLOCK(lock);
4459 		spin_lock(&lock);
4460 		if (inode_unhashed(inode))
4461 			__insert_inode_hash(inode,
4462 					    inode->i_ino + inode->i_generation);
4463 		spin_unlock(&lock);
4464 	}
4465 
4466 	fh[0] = inode->i_generation;
4467 	fh[1] = inode->i_ino;
4468 	fh[2] = ((__u64)inode->i_ino) >> 32;
4469 
4470 	*len = 3;
4471 	return 1;
4472 }
4473 
4474 static const struct export_operations shmem_export_ops = {
4475 	.get_parent     = shmem_get_parent,
4476 	.encode_fh      = shmem_encode_fh,
4477 	.fh_to_dentry	= shmem_fh_to_dentry,
4478 };
4479 
4480 enum shmem_param {
4481 	Opt_gid,
4482 	Opt_huge,
4483 	Opt_mode,
4484 	Opt_mpol,
4485 	Opt_nr_blocks,
4486 	Opt_nr_inodes,
4487 	Opt_size,
4488 	Opt_uid,
4489 	Opt_inode32,
4490 	Opt_inode64,
4491 	Opt_noswap,
4492 	Opt_quota,
4493 	Opt_usrquota,
4494 	Opt_grpquota,
4495 	Opt_usrquota_block_hardlimit,
4496 	Opt_usrquota_inode_hardlimit,
4497 	Opt_grpquota_block_hardlimit,
4498 	Opt_grpquota_inode_hardlimit,
4499 	Opt_casefold_version,
4500 	Opt_casefold,
4501 	Opt_strict_encoding,
4502 };
4503 
4504 static const struct constant_table shmem_param_enums_huge[] = {
4505 	{"never",	SHMEM_HUGE_NEVER },
4506 	{"always",	SHMEM_HUGE_ALWAYS },
4507 	{"within_size",	SHMEM_HUGE_WITHIN_SIZE },
4508 	{"advise",	SHMEM_HUGE_ADVISE },
4509 	{}
4510 };
4511 
4512 const struct fs_parameter_spec shmem_fs_parameters[] = {
4513 	fsparam_gid   ("gid",		Opt_gid),
4514 	fsparam_enum  ("huge",		Opt_huge,  shmem_param_enums_huge),
4515 	fsparam_u32oct("mode",		Opt_mode),
4516 	fsparam_string("mpol",		Opt_mpol),
4517 	fsparam_string("nr_blocks",	Opt_nr_blocks),
4518 	fsparam_string("nr_inodes",	Opt_nr_inodes),
4519 	fsparam_string("size",		Opt_size),
4520 	fsparam_uid   ("uid",		Opt_uid),
4521 	fsparam_flag  ("inode32",	Opt_inode32),
4522 	fsparam_flag  ("inode64",	Opt_inode64),
4523 	fsparam_flag  ("noswap",	Opt_noswap),
4524 #ifdef CONFIG_TMPFS_QUOTA
4525 	fsparam_flag  ("quota",		Opt_quota),
4526 	fsparam_flag  ("usrquota",	Opt_usrquota),
4527 	fsparam_flag  ("grpquota",	Opt_grpquota),
4528 	fsparam_string("usrquota_block_hardlimit", Opt_usrquota_block_hardlimit),
4529 	fsparam_string("usrquota_inode_hardlimit", Opt_usrquota_inode_hardlimit),
4530 	fsparam_string("grpquota_block_hardlimit", Opt_grpquota_block_hardlimit),
4531 	fsparam_string("grpquota_inode_hardlimit", Opt_grpquota_inode_hardlimit),
4532 #endif
4533 	fsparam_string("casefold",	Opt_casefold_version),
4534 	fsparam_flag  ("casefold",	Opt_casefold),
4535 	fsparam_flag  ("strict_encoding", Opt_strict_encoding),
4536 	{}
4537 };
4538 
4539 #if IS_ENABLED(CONFIG_UNICODE)
shmem_parse_opt_casefold(struct fs_context * fc,struct fs_parameter * param,bool latest_version)4540 static int shmem_parse_opt_casefold(struct fs_context *fc, struct fs_parameter *param,
4541 				    bool latest_version)
4542 {
4543 	struct shmem_options *ctx = fc->fs_private;
4544 	int version = UTF8_LATEST;
4545 	struct unicode_map *encoding;
4546 	char *version_str = param->string + 5;
4547 
4548 	if (!latest_version) {
4549 		if (strncmp(param->string, "utf8-", 5))
4550 			return invalfc(fc, "Only UTF-8 encodings are supported "
4551 				       "in the format: utf8-<version number>");
4552 
4553 		version = utf8_parse_version(version_str);
4554 		if (version < 0)
4555 			return invalfc(fc, "Invalid UTF-8 version: %s", version_str);
4556 	}
4557 
4558 	encoding = utf8_load(version);
4559 
4560 	if (IS_ERR(encoding)) {
4561 		return invalfc(fc, "Failed loading UTF-8 version: utf8-%u.%u.%u\n",
4562 			       unicode_major(version), unicode_minor(version),
4563 			       unicode_rev(version));
4564 	}
4565 
4566 	pr_info("tmpfs: Using encoding : utf8-%u.%u.%u\n",
4567 		unicode_major(version), unicode_minor(version), unicode_rev(version));
4568 
4569 	ctx->encoding = encoding;
4570 
4571 	return 0;
4572 }
4573 #else
shmem_parse_opt_casefold(struct fs_context * fc,struct fs_parameter * param,bool latest_version)4574 static int shmem_parse_opt_casefold(struct fs_context *fc, struct fs_parameter *param,
4575 				    bool latest_version)
4576 {
4577 	return invalfc(fc, "tmpfs: Kernel not built with CONFIG_UNICODE\n");
4578 }
4579 #endif
4580 
shmem_parse_one(struct fs_context * fc,struct fs_parameter * param)4581 static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
4582 {
4583 	struct shmem_options *ctx = fc->fs_private;
4584 	struct fs_parse_result result;
4585 	unsigned long long size;
4586 	char *rest;
4587 	int opt;
4588 	kuid_t kuid;
4589 	kgid_t kgid;
4590 
4591 	opt = fs_parse(fc, shmem_fs_parameters, param, &result);
4592 	if (opt < 0)
4593 		return opt;
4594 
4595 	switch (opt) {
4596 	case Opt_size:
4597 		size = memparse(param->string, &rest);
4598 		if (*rest == '%') {
4599 			size <<= PAGE_SHIFT;
4600 			size *= totalram_pages();
4601 			do_div(size, 100);
4602 			rest++;
4603 		}
4604 		if (*rest)
4605 			goto bad_value;
4606 		ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
4607 		ctx->seen |= SHMEM_SEEN_BLOCKS;
4608 		break;
4609 	case Opt_nr_blocks:
4610 		ctx->blocks = memparse(param->string, &rest);
4611 		if (*rest || ctx->blocks > LONG_MAX)
4612 			goto bad_value;
4613 		ctx->seen |= SHMEM_SEEN_BLOCKS;
4614 		break;
4615 	case Opt_nr_inodes:
4616 		ctx->inodes = memparse(param->string, &rest);
4617 		if (*rest || ctx->inodes > ULONG_MAX / BOGO_INODE_SIZE)
4618 			goto bad_value;
4619 		ctx->seen |= SHMEM_SEEN_INODES;
4620 		break;
4621 	case Opt_mode:
4622 		ctx->mode = result.uint_32 & 07777;
4623 		break;
4624 	case Opt_uid:
4625 		kuid = result.uid;
4626 
4627 		/*
4628 		 * The requested uid must be representable in the
4629 		 * filesystem's idmapping.
4630 		 */
4631 		if (!kuid_has_mapping(fc->user_ns, kuid))
4632 			goto bad_value;
4633 
4634 		ctx->uid = kuid;
4635 		break;
4636 	case Opt_gid:
4637 		kgid = result.gid;
4638 
4639 		/*
4640 		 * The requested gid must be representable in the
4641 		 * filesystem's idmapping.
4642 		 */
4643 		if (!kgid_has_mapping(fc->user_ns, kgid))
4644 			goto bad_value;
4645 
4646 		ctx->gid = kgid;
4647 		break;
4648 	case Opt_huge:
4649 		ctx->huge = result.uint_32;
4650 		if (ctx->huge != SHMEM_HUGE_NEVER &&
4651 		    !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
4652 		      has_transparent_hugepage()))
4653 			goto unsupported_parameter;
4654 		ctx->seen |= SHMEM_SEEN_HUGE;
4655 		break;
4656 	case Opt_mpol:
4657 		if (IS_ENABLED(CONFIG_NUMA)) {
4658 			mpol_put(ctx->mpol);
4659 			ctx->mpol = NULL;
4660 			if (mpol_parse_str(param->string, &ctx->mpol))
4661 				goto bad_value;
4662 			break;
4663 		}
4664 		goto unsupported_parameter;
4665 	case Opt_inode32:
4666 		ctx->full_inums = false;
4667 		ctx->seen |= SHMEM_SEEN_INUMS;
4668 		break;
4669 	case Opt_inode64:
4670 		if (sizeof(ino_t) < 8) {
4671 			return invalfc(fc,
4672 				       "Cannot use inode64 with <64bit inums in kernel\n");
4673 		}
4674 		ctx->full_inums = true;
4675 		ctx->seen |= SHMEM_SEEN_INUMS;
4676 		break;
4677 	case Opt_noswap:
4678 		if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) {
4679 			return invalfc(fc,
4680 				       "Turning off swap in unprivileged tmpfs mounts unsupported");
4681 		}
4682 		ctx->noswap = true;
4683 		ctx->seen |= SHMEM_SEEN_NOSWAP;
4684 		break;
4685 	case Opt_quota:
4686 		if (fc->user_ns != &init_user_ns)
4687 			return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4688 		ctx->seen |= SHMEM_SEEN_QUOTA;
4689 		ctx->quota_types |= (QTYPE_MASK_USR | QTYPE_MASK_GRP);
4690 		break;
4691 	case Opt_usrquota:
4692 		if (fc->user_ns != &init_user_ns)
4693 			return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4694 		ctx->seen |= SHMEM_SEEN_QUOTA;
4695 		ctx->quota_types |= QTYPE_MASK_USR;
4696 		break;
4697 	case Opt_grpquota:
4698 		if (fc->user_ns != &init_user_ns)
4699 			return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
4700 		ctx->seen |= SHMEM_SEEN_QUOTA;
4701 		ctx->quota_types |= QTYPE_MASK_GRP;
4702 		break;
4703 	case Opt_usrquota_block_hardlimit:
4704 		size = memparse(param->string, &rest);
4705 		if (*rest || !size)
4706 			goto bad_value;
4707 		if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
4708 			return invalfc(fc,
4709 				       "User quota block hardlimit too large.");
4710 		ctx->qlimits.usrquota_bhardlimit = size;
4711 		break;
4712 	case Opt_grpquota_block_hardlimit:
4713 		size = memparse(param->string, &rest);
4714 		if (*rest || !size)
4715 			goto bad_value;
4716 		if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
4717 			return invalfc(fc,
4718 				       "Group quota block hardlimit too large.");
4719 		ctx->qlimits.grpquota_bhardlimit = size;
4720 		break;
4721 	case Opt_usrquota_inode_hardlimit:
4722 		size = memparse(param->string, &rest);
4723 		if (*rest || !size)
4724 			goto bad_value;
4725 		if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
4726 			return invalfc(fc,
4727 				       "User quota inode hardlimit too large.");
4728 		ctx->qlimits.usrquota_ihardlimit = size;
4729 		break;
4730 	case Opt_grpquota_inode_hardlimit:
4731 		size = memparse(param->string, &rest);
4732 		if (*rest || !size)
4733 			goto bad_value;
4734 		if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
4735 			return invalfc(fc,
4736 				       "Group quota inode hardlimit too large.");
4737 		ctx->qlimits.grpquota_ihardlimit = size;
4738 		break;
4739 	case Opt_casefold_version:
4740 		return shmem_parse_opt_casefold(fc, param, false);
4741 	case Opt_casefold:
4742 		return shmem_parse_opt_casefold(fc, param, true);
4743 	case Opt_strict_encoding:
4744 #if IS_ENABLED(CONFIG_UNICODE)
4745 		ctx->strict_encoding = true;
4746 		break;
4747 #else
4748 		return invalfc(fc, "tmpfs: Kernel not built with CONFIG_UNICODE\n");
4749 #endif
4750 	}
4751 	return 0;
4752 
4753 unsupported_parameter:
4754 	return invalfc(fc, "Unsupported parameter '%s'", param->key);
4755 bad_value:
4756 	return invalfc(fc, "Bad value for '%s'", param->key);
4757 }
4758 
shmem_next_opt(char ** s)4759 static char *shmem_next_opt(char **s)
4760 {
4761 	char *sbegin = *s;
4762 	char *p;
4763 
4764 	if (sbegin == NULL)
4765 		return NULL;
4766 
4767 	/*
4768 	 * NUL-terminate this option: unfortunately,
4769 	 * mount options form a comma-separated list,
4770 	 * but mpol's nodelist may also contain commas.
4771 	 */
4772 	for (;;) {
4773 		p = strchr(*s, ',');
4774 		if (p == NULL)
4775 			break;
4776 		*s = p + 1;
4777 		if (!isdigit(*(p+1))) {
4778 			*p = '\0';
4779 			return sbegin;
4780 		}
4781 	}
4782 
4783 	*s = NULL;
4784 	return sbegin;
4785 }
4786 
shmem_parse_monolithic(struct fs_context * fc,void * data)4787 static int shmem_parse_monolithic(struct fs_context *fc, void *data)
4788 {
4789 	return vfs_parse_monolithic_sep(fc, data, shmem_next_opt);
4790 }
4791 
4792 /*
4793  * Reconfigure a shmem filesystem.
4794  */
shmem_reconfigure(struct fs_context * fc)4795 static int shmem_reconfigure(struct fs_context *fc)
4796 {
4797 	struct shmem_options *ctx = fc->fs_private;
4798 	struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
4799 	unsigned long used_isp;
4800 	struct mempolicy *mpol = NULL;
4801 	const char *err;
4802 
4803 	raw_spin_lock(&sbinfo->stat_lock);
4804 	used_isp = sbinfo->max_inodes * BOGO_INODE_SIZE - sbinfo->free_ispace;
4805 
4806 	if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
4807 		if (!sbinfo->max_blocks) {
4808 			err = "Cannot retroactively limit size";
4809 			goto out;
4810 		}
4811 		if (percpu_counter_compare(&sbinfo->used_blocks,
4812 					   ctx->blocks) > 0) {
4813 			err = "Too small a size for current use";
4814 			goto out;
4815 		}
4816 	}
4817 	if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
4818 		if (!sbinfo->max_inodes) {
4819 			err = "Cannot retroactively limit inodes";
4820 			goto out;
4821 		}
4822 		if (ctx->inodes * BOGO_INODE_SIZE < used_isp) {
4823 			err = "Too few inodes for current use";
4824 			goto out;
4825 		}
4826 	}
4827 
4828 	if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
4829 	    sbinfo->next_ino > UINT_MAX) {
4830 		err = "Current inum too high to switch to 32-bit inums";
4831 		goto out;
4832 	}
4833 	if ((ctx->seen & SHMEM_SEEN_NOSWAP) && ctx->noswap && !sbinfo->noswap) {
4834 		err = "Cannot disable swap on remount";
4835 		goto out;
4836 	}
4837 	if (!(ctx->seen & SHMEM_SEEN_NOSWAP) && !ctx->noswap && sbinfo->noswap) {
4838 		err = "Cannot enable swap on remount if it was disabled on first mount";
4839 		goto out;
4840 	}
4841 
4842 	if (ctx->seen & SHMEM_SEEN_QUOTA &&
4843 	    !sb_any_quota_loaded(fc->root->d_sb)) {
4844 		err = "Cannot enable quota on remount";
4845 		goto out;
4846 	}
4847 
4848 #ifdef CONFIG_TMPFS_QUOTA
4849 #define CHANGED_LIMIT(name)						\
4850 	(ctx->qlimits.name## hardlimit &&				\
4851 	(ctx->qlimits.name## hardlimit != sbinfo->qlimits.name## hardlimit))
4852 
4853 	if (CHANGED_LIMIT(usrquota_b) || CHANGED_LIMIT(usrquota_i) ||
4854 	    CHANGED_LIMIT(grpquota_b) || CHANGED_LIMIT(grpquota_i)) {
4855 		err = "Cannot change global quota limit on remount";
4856 		goto out;
4857 	}
4858 #endif /* CONFIG_TMPFS_QUOTA */
4859 
4860 	if (ctx->seen & SHMEM_SEEN_HUGE)
4861 		sbinfo->huge = ctx->huge;
4862 	if (ctx->seen & SHMEM_SEEN_INUMS)
4863 		sbinfo->full_inums = ctx->full_inums;
4864 	if (ctx->seen & SHMEM_SEEN_BLOCKS)
4865 		sbinfo->max_blocks  = ctx->blocks;
4866 	if (ctx->seen & SHMEM_SEEN_INODES) {
4867 		sbinfo->max_inodes  = ctx->inodes;
4868 		sbinfo->free_ispace = ctx->inodes * BOGO_INODE_SIZE - used_isp;
4869 	}
4870 
4871 	/*
4872 	 * Preserve previous mempolicy unless mpol remount option was specified.
4873 	 */
4874 	if (ctx->mpol) {
4875 		mpol = sbinfo->mpol;
4876 		sbinfo->mpol = ctx->mpol;	/* transfers initial ref */
4877 		ctx->mpol = NULL;
4878 	}
4879 
4880 	if (ctx->noswap)
4881 		sbinfo->noswap = true;
4882 
4883 	raw_spin_unlock(&sbinfo->stat_lock);
4884 	mpol_put(mpol);
4885 	return 0;
4886 out:
4887 	raw_spin_unlock(&sbinfo->stat_lock);
4888 	return invalfc(fc, "%s", err);
4889 }
4890 
shmem_show_options(struct seq_file * seq,struct dentry * root)4891 static int shmem_show_options(struct seq_file *seq, struct dentry *root)
4892 {
4893 	struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
4894 	struct mempolicy *mpol;
4895 
4896 	if (sbinfo->max_blocks != shmem_default_max_blocks())
4897 		seq_printf(seq, ",size=%luk", K(sbinfo->max_blocks));
4898 	if (sbinfo->max_inodes != shmem_default_max_inodes())
4899 		seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
4900 	if (sbinfo->mode != (0777 | S_ISVTX))
4901 		seq_printf(seq, ",mode=%03ho", sbinfo->mode);
4902 	if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
4903 		seq_printf(seq, ",uid=%u",
4904 				from_kuid_munged(&init_user_ns, sbinfo->uid));
4905 	if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
4906 		seq_printf(seq, ",gid=%u",
4907 				from_kgid_munged(&init_user_ns, sbinfo->gid));
4908 
4909 	/*
4910 	 * Showing inode{64,32} might be useful even if it's the system default,
4911 	 * since then people don't have to resort to checking both here and
4912 	 * /proc/config.gz to confirm 64-bit inums were successfully applied
4913 	 * (which may not even exist if IKCONFIG_PROC isn't enabled).
4914 	 *
4915 	 * We hide it when inode64 isn't the default and we are using 32-bit
4916 	 * inodes, since that probably just means the feature isn't even under
4917 	 * consideration.
4918 	 *
4919 	 * As such:
4920 	 *
4921 	 *                     +-----------------+-----------------+
4922 	 *                     | TMPFS_INODE64=y | TMPFS_INODE64=n |
4923 	 *  +------------------+-----------------+-----------------+
4924 	 *  | full_inums=true  | show            | show            |
4925 	 *  | full_inums=false | show            | hide            |
4926 	 *  +------------------+-----------------+-----------------+
4927 	 *
4928 	 */
4929 	if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
4930 		seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
4931 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4932 	/* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
4933 	if (sbinfo->huge)
4934 		seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
4935 #endif
4936 	mpol = shmem_get_sbmpol(sbinfo);
4937 	shmem_show_mpol(seq, mpol);
4938 	mpol_put(mpol);
4939 	if (sbinfo->noswap)
4940 		seq_printf(seq, ",noswap");
4941 #ifdef CONFIG_TMPFS_QUOTA
4942 	if (sb_has_quota_active(root->d_sb, USRQUOTA))
4943 		seq_printf(seq, ",usrquota");
4944 	if (sb_has_quota_active(root->d_sb, GRPQUOTA))
4945 		seq_printf(seq, ",grpquota");
4946 	if (sbinfo->qlimits.usrquota_bhardlimit)
4947 		seq_printf(seq, ",usrquota_block_hardlimit=%lld",
4948 			   sbinfo->qlimits.usrquota_bhardlimit);
4949 	if (sbinfo->qlimits.grpquota_bhardlimit)
4950 		seq_printf(seq, ",grpquota_block_hardlimit=%lld",
4951 			   sbinfo->qlimits.grpquota_bhardlimit);
4952 	if (sbinfo->qlimits.usrquota_ihardlimit)
4953 		seq_printf(seq, ",usrquota_inode_hardlimit=%lld",
4954 			   sbinfo->qlimits.usrquota_ihardlimit);
4955 	if (sbinfo->qlimits.grpquota_ihardlimit)
4956 		seq_printf(seq, ",grpquota_inode_hardlimit=%lld",
4957 			   sbinfo->qlimits.grpquota_ihardlimit);
4958 #endif
4959 	return 0;
4960 }
4961 
4962 #endif /* CONFIG_TMPFS */
4963 
shmem_put_super(struct super_block * sb)4964 static void shmem_put_super(struct super_block *sb)
4965 {
4966 	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
4967 
4968 #if IS_ENABLED(CONFIG_UNICODE)
4969 	if (sb->s_encoding)
4970 		utf8_unload(sb->s_encoding);
4971 #endif
4972 
4973 #ifdef CONFIG_TMPFS_QUOTA
4974 	shmem_disable_quotas(sb);
4975 #endif
4976 	free_percpu(sbinfo->ino_batch);
4977 	percpu_counter_destroy(&sbinfo->used_blocks);
4978 	mpol_put(sbinfo->mpol);
4979 	kfree(sbinfo);
4980 	sb->s_fs_info = NULL;
4981 }
4982 
4983 #if IS_ENABLED(CONFIG_UNICODE) && defined(CONFIG_TMPFS)
4984 static const struct dentry_operations shmem_ci_dentry_ops = {
4985 	.d_hash = generic_ci_d_hash,
4986 	.d_compare = generic_ci_d_compare,
4987 };
4988 #endif
4989 
shmem_fill_super(struct super_block * sb,struct fs_context * fc)4990 static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
4991 {
4992 	struct shmem_options *ctx = fc->fs_private;
4993 	struct inode *inode;
4994 	struct shmem_sb_info *sbinfo;
4995 	int error = -ENOMEM;
4996 
4997 	/* Round up to L1_CACHE_BYTES to resist false sharing */
4998 	sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
4999 				L1_CACHE_BYTES), GFP_KERNEL);
5000 	if (!sbinfo)
5001 		return error;
5002 
5003 	sb->s_fs_info = sbinfo;
5004 
5005 #ifdef CONFIG_TMPFS
5006 	/*
5007 	 * Per default we only allow half of the physical ram per
5008 	 * tmpfs instance, limiting inodes to one per page of lowmem;
5009 	 * but the internal instance is left unlimited.
5010 	 */
5011 	if (!(sb->s_flags & SB_KERNMOUNT)) {
5012 		if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
5013 			ctx->blocks = shmem_default_max_blocks();
5014 		if (!(ctx->seen & SHMEM_SEEN_INODES))
5015 			ctx->inodes = shmem_default_max_inodes();
5016 		if (!(ctx->seen & SHMEM_SEEN_INUMS))
5017 			ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
5018 		sbinfo->noswap = ctx->noswap;
5019 	} else {
5020 		sb->s_flags |= SB_NOUSER;
5021 	}
5022 	sb->s_export_op = &shmem_export_ops;
5023 	sb->s_flags |= SB_NOSEC;
5024 
5025 #if IS_ENABLED(CONFIG_UNICODE)
5026 	if (!ctx->encoding && ctx->strict_encoding) {
5027 		pr_err("tmpfs: strict_encoding option without encoding is forbidden\n");
5028 		error = -EINVAL;
5029 		goto failed;
5030 	}
5031 
5032 	if (ctx->encoding) {
5033 		sb->s_encoding = ctx->encoding;
5034 		set_default_d_op(sb, &shmem_ci_dentry_ops);
5035 		if (ctx->strict_encoding)
5036 			sb->s_encoding_flags = SB_ENC_STRICT_MODE_FL;
5037 	}
5038 #endif
5039 
5040 #else
5041 	sb->s_flags |= SB_NOUSER;
5042 #endif /* CONFIG_TMPFS */
5043 	sb->s_d_flags |= DCACHE_DONTCACHE;
5044 	sbinfo->max_blocks = ctx->blocks;
5045 	sbinfo->max_inodes = ctx->inodes;
5046 	sbinfo->free_ispace = sbinfo->max_inodes * BOGO_INODE_SIZE;
5047 	if (sb->s_flags & SB_KERNMOUNT) {
5048 		sbinfo->ino_batch = alloc_percpu(ino_t);
5049 		if (!sbinfo->ino_batch)
5050 			goto failed;
5051 	}
5052 	sbinfo->uid = ctx->uid;
5053 	sbinfo->gid = ctx->gid;
5054 	sbinfo->full_inums = ctx->full_inums;
5055 	sbinfo->mode = ctx->mode;
5056 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5057 	if (ctx->seen & SHMEM_SEEN_HUGE)
5058 		sbinfo->huge = ctx->huge;
5059 	else
5060 		sbinfo->huge = tmpfs_huge;
5061 #endif
5062 	sbinfo->mpol = ctx->mpol;
5063 	ctx->mpol = NULL;
5064 
5065 	raw_spin_lock_init(&sbinfo->stat_lock);
5066 	if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
5067 		goto failed;
5068 	spin_lock_init(&sbinfo->shrinklist_lock);
5069 	INIT_LIST_HEAD(&sbinfo->shrinklist);
5070 
5071 	sb->s_maxbytes = MAX_LFS_FILESIZE;
5072 	sb->s_blocksize = PAGE_SIZE;
5073 	sb->s_blocksize_bits = PAGE_SHIFT;
5074 	sb->s_magic = TMPFS_MAGIC;
5075 	sb->s_op = &shmem_ops;
5076 	sb->s_time_gran = 1;
5077 #ifdef CONFIG_TMPFS_XATTR
5078 	sb->s_xattr = shmem_xattr_handlers;
5079 #endif
5080 #ifdef CONFIG_TMPFS_POSIX_ACL
5081 	sb->s_flags |= SB_POSIXACL;
5082 #endif
5083 	uuid_t uuid;
5084 	uuid_gen(&uuid);
5085 	super_set_uuid(sb, uuid.b, sizeof(uuid));
5086 
5087 #ifdef CONFIG_TMPFS_QUOTA
5088 	if (ctx->seen & SHMEM_SEEN_QUOTA) {
5089 		sb->dq_op = &shmem_quota_operations;
5090 		sb->s_qcop = &dquot_quotactl_sysfile_ops;
5091 		sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
5092 
5093 		/* Copy the default limits from ctx into sbinfo */
5094 		memcpy(&sbinfo->qlimits, &ctx->qlimits,
5095 		       sizeof(struct shmem_quota_limits));
5096 
5097 		if (shmem_enable_quotas(sb, ctx->quota_types))
5098 			goto failed;
5099 	}
5100 #endif /* CONFIG_TMPFS_QUOTA */
5101 
5102 	inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL,
5103 				S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
5104 	if (IS_ERR(inode)) {
5105 		error = PTR_ERR(inode);
5106 		goto failed;
5107 	}
5108 	inode->i_uid = sbinfo->uid;
5109 	inode->i_gid = sbinfo->gid;
5110 	sb->s_root = d_make_root(inode);
5111 	if (!sb->s_root)
5112 		goto failed;
5113 	return 0;
5114 
5115 failed:
5116 	shmem_put_super(sb);
5117 	return error;
5118 }
5119 
shmem_get_tree(struct fs_context * fc)5120 static int shmem_get_tree(struct fs_context *fc)
5121 {
5122 	return get_tree_nodev(fc, shmem_fill_super);
5123 }
5124 
shmem_free_fc(struct fs_context * fc)5125 static void shmem_free_fc(struct fs_context *fc)
5126 {
5127 	struct shmem_options *ctx = fc->fs_private;
5128 
5129 	if (ctx) {
5130 		mpol_put(ctx->mpol);
5131 		kfree(ctx);
5132 	}
5133 }
5134 
5135 static const struct fs_context_operations shmem_fs_context_ops = {
5136 	.free			= shmem_free_fc,
5137 	.get_tree		= shmem_get_tree,
5138 #ifdef CONFIG_TMPFS
5139 	.parse_monolithic	= shmem_parse_monolithic,
5140 	.parse_param		= shmem_parse_one,
5141 	.reconfigure		= shmem_reconfigure,
5142 #endif
5143 };
5144 
5145 static struct kmem_cache *shmem_inode_cachep __ro_after_init;
5146 
shmem_alloc_inode(struct super_block * sb)5147 static struct inode *shmem_alloc_inode(struct super_block *sb)
5148 {
5149 	struct shmem_inode_info *info;
5150 	info = alloc_inode_sb(sb, shmem_inode_cachep, GFP_KERNEL);
5151 	if (!info)
5152 		return NULL;
5153 	return &info->vfs_inode;
5154 }
5155 
shmem_free_in_core_inode(struct inode * inode)5156 static void shmem_free_in_core_inode(struct inode *inode)
5157 {
5158 	if (S_ISLNK(inode->i_mode))
5159 		kfree(inode->i_link);
5160 	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
5161 }
5162 
shmem_destroy_inode(struct inode * inode)5163 static void shmem_destroy_inode(struct inode *inode)
5164 {
5165 	if (S_ISREG(inode->i_mode))
5166 		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
5167 	if (S_ISDIR(inode->i_mode))
5168 		simple_offset_destroy(shmem_get_offset_ctx(inode));
5169 }
5170 
shmem_init_inode(void * foo)5171 static void shmem_init_inode(void *foo)
5172 {
5173 	struct shmem_inode_info *info = foo;
5174 	inode_init_once(&info->vfs_inode);
5175 }
5176 
shmem_init_inodecache(void)5177 static void __init shmem_init_inodecache(void)
5178 {
5179 	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
5180 				sizeof(struct shmem_inode_info),
5181 				0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
5182 }
5183 
shmem_destroy_inodecache(void)5184 static void __init shmem_destroy_inodecache(void)
5185 {
5186 	kmem_cache_destroy(shmem_inode_cachep);
5187 }
5188 
5189 /* Keep the page in page cache instead of truncating it */
shmem_error_remove_folio(struct address_space * mapping,struct folio * folio)5190 static int shmem_error_remove_folio(struct address_space *mapping,
5191 				   struct folio *folio)
5192 {
5193 	return 0;
5194 }
5195 
5196 static const struct address_space_operations shmem_aops = {
5197 	.dirty_folio	= noop_dirty_folio,
5198 #ifdef CONFIG_TMPFS
5199 	.write_begin	= shmem_write_begin,
5200 	.write_end	= shmem_write_end,
5201 #endif
5202 #ifdef CONFIG_MIGRATION
5203 	.migrate_folio	= migrate_folio,
5204 #endif
5205 	.error_remove_folio = shmem_error_remove_folio,
5206 };
5207 
5208 static const struct file_operations shmem_file_operations = {
5209 	.mmap		= shmem_mmap,
5210 	.open		= shmem_file_open,
5211 	.get_unmapped_area = shmem_get_unmapped_area,
5212 #ifdef CONFIG_TMPFS
5213 	.llseek		= shmem_file_llseek,
5214 	.read_iter	= shmem_file_read_iter,
5215 	.write_iter	= shmem_file_write_iter,
5216 	.fsync		= noop_fsync,
5217 	.splice_read	= shmem_file_splice_read,
5218 	.splice_write	= iter_file_splice_write,
5219 	.fallocate	= shmem_fallocate,
5220 #endif
5221 };
5222 
5223 static const struct inode_operations shmem_inode_operations = {
5224 	.getattr	= shmem_getattr,
5225 	.setattr	= shmem_setattr,
5226 #ifdef CONFIG_TMPFS_XATTR
5227 	.listxattr	= shmem_listxattr,
5228 	.set_acl	= simple_set_acl,
5229 	.fileattr_get	= shmem_fileattr_get,
5230 	.fileattr_set	= shmem_fileattr_set,
5231 #endif
5232 };
5233 
5234 static const struct inode_operations shmem_dir_inode_operations = {
5235 #ifdef CONFIG_TMPFS
5236 	.getattr	= shmem_getattr,
5237 	.create		= shmem_create,
5238 	.lookup		= simple_lookup,
5239 	.link		= shmem_link,
5240 	.unlink		= shmem_unlink,
5241 	.symlink	= shmem_symlink,
5242 	.mkdir		= shmem_mkdir,
5243 	.rmdir		= shmem_rmdir,
5244 	.mknod		= shmem_mknod,
5245 	.rename		= shmem_rename2,
5246 	.tmpfile	= shmem_tmpfile,
5247 	.get_offset_ctx	= shmem_get_offset_ctx,
5248 #endif
5249 #ifdef CONFIG_TMPFS_XATTR
5250 	.listxattr	= shmem_listxattr,
5251 	.fileattr_get	= shmem_fileattr_get,
5252 	.fileattr_set	= shmem_fileattr_set,
5253 #endif
5254 #ifdef CONFIG_TMPFS_POSIX_ACL
5255 	.setattr	= shmem_setattr,
5256 	.set_acl	= simple_set_acl,
5257 #endif
5258 };
5259 
5260 static const struct inode_operations shmem_special_inode_operations = {
5261 	.getattr	= shmem_getattr,
5262 #ifdef CONFIG_TMPFS_XATTR
5263 	.listxattr	= shmem_listxattr,
5264 #endif
5265 #ifdef CONFIG_TMPFS_POSIX_ACL
5266 	.setattr	= shmem_setattr,
5267 	.set_acl	= simple_set_acl,
5268 #endif
5269 };
5270 
5271 static const struct super_operations shmem_ops = {
5272 	.alloc_inode	= shmem_alloc_inode,
5273 	.free_inode	= shmem_free_in_core_inode,
5274 	.destroy_inode	= shmem_destroy_inode,
5275 #ifdef CONFIG_TMPFS
5276 	.statfs		= shmem_statfs,
5277 	.show_options	= shmem_show_options,
5278 #endif
5279 #ifdef CONFIG_TMPFS_QUOTA
5280 	.get_dquots	= shmem_get_dquots,
5281 #endif
5282 	.evict_inode	= shmem_evict_inode,
5283 	.drop_inode	= inode_just_drop,
5284 	.put_super	= shmem_put_super,
5285 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5286 	.nr_cached_objects	= shmem_unused_huge_count,
5287 	.free_cached_objects	= shmem_unused_huge_scan,
5288 #endif
5289 };
5290 
5291 static const struct vm_operations_struct shmem_vm_ops = {
5292 	.fault		= shmem_fault,
5293 	.map_pages	= filemap_map_pages,
5294 #ifdef CONFIG_NUMA
5295 	.set_policy     = shmem_set_policy,
5296 	.get_policy     = shmem_get_policy,
5297 #endif
5298 };
5299 
5300 static const struct vm_operations_struct shmem_anon_vm_ops = {
5301 	.fault		= shmem_fault,
5302 	.map_pages	= filemap_map_pages,
5303 #ifdef CONFIG_NUMA
5304 	.set_policy     = shmem_set_policy,
5305 	.get_policy     = shmem_get_policy,
5306 #endif
5307 };
5308 
shmem_init_fs_context(struct fs_context * fc)5309 int shmem_init_fs_context(struct fs_context *fc)
5310 {
5311 	struct shmem_options *ctx;
5312 
5313 	ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
5314 	if (!ctx)
5315 		return -ENOMEM;
5316 
5317 	ctx->mode = 0777 | S_ISVTX;
5318 	ctx->uid = current_fsuid();
5319 	ctx->gid = current_fsgid();
5320 
5321 #if IS_ENABLED(CONFIG_UNICODE)
5322 	ctx->encoding = NULL;
5323 #endif
5324 
5325 	fc->fs_private = ctx;
5326 	fc->ops = &shmem_fs_context_ops;
5327 #ifdef CONFIG_TMPFS
5328 	fc->sb_flags |= SB_I_VERSION;
5329 #endif
5330 	return 0;
5331 }
5332 
5333 static struct file_system_type shmem_fs_type = {
5334 	.owner		= THIS_MODULE,
5335 	.name		= "tmpfs",
5336 	.init_fs_context = shmem_init_fs_context,
5337 #ifdef CONFIG_TMPFS
5338 	.parameters	= shmem_fs_parameters,
5339 #endif
5340 	.kill_sb	= kill_litter_super,
5341 	.fs_flags	= FS_USERNS_MOUNT | FS_ALLOW_IDMAP | FS_MGTIME,
5342 };
5343 
5344 #if defined(CONFIG_SYSFS) && defined(CONFIG_TMPFS)
5345 
5346 #define __INIT_KOBJ_ATTR(_name, _mode, _show, _store)			\
5347 {									\
5348 	.attr	= { .name = __stringify(_name), .mode = _mode },	\
5349 	.show	= _show,						\
5350 	.store	= _store,						\
5351 }
5352 
5353 #define TMPFS_ATTR_W(_name, _store)				\
5354 	static struct kobj_attribute tmpfs_attr_##_name =	\
5355 			__INIT_KOBJ_ATTR(_name, 0200, NULL, _store)
5356 
5357 #define TMPFS_ATTR_RW(_name, _show, _store)			\
5358 	static struct kobj_attribute tmpfs_attr_##_name =	\
5359 			__INIT_KOBJ_ATTR(_name, 0644, _show, _store)
5360 
5361 #define TMPFS_ATTR_RO(_name, _show)				\
5362 	static struct kobj_attribute tmpfs_attr_##_name =	\
5363 			__INIT_KOBJ_ATTR(_name, 0444, _show, NULL)
5364 
5365 #if IS_ENABLED(CONFIG_UNICODE)
casefold_show(struct kobject * kobj,struct kobj_attribute * a,char * buf)5366 static ssize_t casefold_show(struct kobject *kobj, struct kobj_attribute *a,
5367 			char *buf)
5368 {
5369 		return sysfs_emit(buf, "supported\n");
5370 }
5371 TMPFS_ATTR_RO(casefold, casefold_show);
5372 #endif
5373 
5374 static struct attribute *tmpfs_attributes[] = {
5375 #if IS_ENABLED(CONFIG_UNICODE)
5376 	&tmpfs_attr_casefold.attr,
5377 #endif
5378 	NULL
5379 };
5380 
5381 static const struct attribute_group tmpfs_attribute_group = {
5382 	.attrs = tmpfs_attributes,
5383 	.name = "features"
5384 };
5385 
5386 static struct kobject *tmpfs_kobj;
5387 
tmpfs_sysfs_init(void)5388 static int __init tmpfs_sysfs_init(void)
5389 {
5390 	int ret;
5391 
5392 	tmpfs_kobj = kobject_create_and_add("tmpfs", fs_kobj);
5393 	if (!tmpfs_kobj)
5394 		return -ENOMEM;
5395 
5396 	ret = sysfs_create_group(tmpfs_kobj, &tmpfs_attribute_group);
5397 	if (ret)
5398 		kobject_put(tmpfs_kobj);
5399 
5400 	return ret;
5401 }
5402 #endif /* CONFIG_SYSFS && CONFIG_TMPFS */
5403 
shmem_init(void)5404 void __init shmem_init(void)
5405 {
5406 	int error;
5407 
5408 	shmem_init_inodecache();
5409 
5410 #ifdef CONFIG_TMPFS_QUOTA
5411 	register_quota_format(&shmem_quota_format);
5412 #endif
5413 
5414 	error = register_filesystem(&shmem_fs_type);
5415 	if (error) {
5416 		pr_err("Could not register tmpfs\n");
5417 		goto out2;
5418 	}
5419 
5420 	shm_mnt = kern_mount(&shmem_fs_type);
5421 	if (IS_ERR(shm_mnt)) {
5422 		error = PTR_ERR(shm_mnt);
5423 		pr_err("Could not kern_mount tmpfs\n");
5424 		goto out1;
5425 	}
5426 
5427 #if defined(CONFIG_SYSFS) && defined(CONFIG_TMPFS)
5428 	error = tmpfs_sysfs_init();
5429 	if (error) {
5430 		pr_err("Could not init tmpfs sysfs\n");
5431 		goto out1;
5432 	}
5433 #endif
5434 
5435 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5436 	if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
5437 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
5438 	else
5439 		shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
5440 
5441 	/*
5442 	 * Default to setting PMD-sized THP to inherit the global setting and
5443 	 * disable all other multi-size THPs.
5444 	 */
5445 	if (!shmem_orders_configured)
5446 		huge_shmem_orders_inherit = BIT(HPAGE_PMD_ORDER);
5447 #endif
5448 	return;
5449 
5450 out1:
5451 	unregister_filesystem(&shmem_fs_type);
5452 out2:
5453 #ifdef CONFIG_TMPFS_QUOTA
5454 	unregister_quota_format(&shmem_quota_format);
5455 #endif
5456 	shmem_destroy_inodecache();
5457 	shm_mnt = ERR_PTR(error);
5458 }
5459 
5460 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
shmem_enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)5461 static ssize_t shmem_enabled_show(struct kobject *kobj,
5462 				  struct kobj_attribute *attr, char *buf)
5463 {
5464 	static const int values[] = {
5465 		SHMEM_HUGE_ALWAYS,
5466 		SHMEM_HUGE_WITHIN_SIZE,
5467 		SHMEM_HUGE_ADVISE,
5468 		SHMEM_HUGE_NEVER,
5469 		SHMEM_HUGE_DENY,
5470 		SHMEM_HUGE_FORCE,
5471 	};
5472 	int len = 0;
5473 	int i;
5474 
5475 	for (i = 0; i < ARRAY_SIZE(values); i++) {
5476 		len += sysfs_emit_at(buf, len,
5477 				shmem_huge == values[i] ? "%s[%s]" : "%s%s",
5478 				i ? " " : "", shmem_format_huge(values[i]));
5479 	}
5480 	len += sysfs_emit_at(buf, len, "\n");
5481 
5482 	return len;
5483 }
5484 
shmem_enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)5485 static ssize_t shmem_enabled_store(struct kobject *kobj,
5486 		struct kobj_attribute *attr, const char *buf, size_t count)
5487 {
5488 	char tmp[16];
5489 	int huge, err;
5490 
5491 	if (count + 1 > sizeof(tmp))
5492 		return -EINVAL;
5493 	memcpy(tmp, buf, count);
5494 	tmp[count] = '\0';
5495 	if (count && tmp[count - 1] == '\n')
5496 		tmp[count - 1] = '\0';
5497 
5498 	huge = shmem_parse_huge(tmp);
5499 	if (huge == -EINVAL)
5500 		return huge;
5501 
5502 	shmem_huge = huge;
5503 	if (shmem_huge > SHMEM_HUGE_DENY)
5504 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
5505 
5506 	err = start_stop_khugepaged();
5507 	return err ? err : count;
5508 }
5509 
5510 struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled);
5511 static DEFINE_SPINLOCK(huge_shmem_orders_lock);
5512 
thpsize_shmem_enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)5513 static ssize_t thpsize_shmem_enabled_show(struct kobject *kobj,
5514 					  struct kobj_attribute *attr, char *buf)
5515 {
5516 	int order = to_thpsize(kobj)->order;
5517 	const char *output;
5518 
5519 	if (test_bit(order, &huge_shmem_orders_always))
5520 		output = "[always] inherit within_size advise never";
5521 	else if (test_bit(order, &huge_shmem_orders_inherit))
5522 		output = "always [inherit] within_size advise never";
5523 	else if (test_bit(order, &huge_shmem_orders_within_size))
5524 		output = "always inherit [within_size] advise never";
5525 	else if (test_bit(order, &huge_shmem_orders_madvise))
5526 		output = "always inherit within_size [advise] never";
5527 	else
5528 		output = "always inherit within_size advise [never]";
5529 
5530 	return sysfs_emit(buf, "%s\n", output);
5531 }
5532 
thpsize_shmem_enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)5533 static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj,
5534 					   struct kobj_attribute *attr,
5535 					   const char *buf, size_t count)
5536 {
5537 	int order = to_thpsize(kobj)->order;
5538 	ssize_t ret = count;
5539 
5540 	if (sysfs_streq(buf, "always")) {
5541 		spin_lock(&huge_shmem_orders_lock);
5542 		clear_bit(order, &huge_shmem_orders_inherit);
5543 		clear_bit(order, &huge_shmem_orders_madvise);
5544 		clear_bit(order, &huge_shmem_orders_within_size);
5545 		set_bit(order, &huge_shmem_orders_always);
5546 		spin_unlock(&huge_shmem_orders_lock);
5547 	} else if (sysfs_streq(buf, "inherit")) {
5548 		/* Do not override huge allocation policy with non-PMD sized mTHP */
5549 		if (shmem_huge == SHMEM_HUGE_FORCE &&
5550 		    order != HPAGE_PMD_ORDER)
5551 			return -EINVAL;
5552 
5553 		spin_lock(&huge_shmem_orders_lock);
5554 		clear_bit(order, &huge_shmem_orders_always);
5555 		clear_bit(order, &huge_shmem_orders_madvise);
5556 		clear_bit(order, &huge_shmem_orders_within_size);
5557 		set_bit(order, &huge_shmem_orders_inherit);
5558 		spin_unlock(&huge_shmem_orders_lock);
5559 	} else if (sysfs_streq(buf, "within_size")) {
5560 		spin_lock(&huge_shmem_orders_lock);
5561 		clear_bit(order, &huge_shmem_orders_always);
5562 		clear_bit(order, &huge_shmem_orders_inherit);
5563 		clear_bit(order, &huge_shmem_orders_madvise);
5564 		set_bit(order, &huge_shmem_orders_within_size);
5565 		spin_unlock(&huge_shmem_orders_lock);
5566 	} else if (sysfs_streq(buf, "advise")) {
5567 		spin_lock(&huge_shmem_orders_lock);
5568 		clear_bit(order, &huge_shmem_orders_always);
5569 		clear_bit(order, &huge_shmem_orders_inherit);
5570 		clear_bit(order, &huge_shmem_orders_within_size);
5571 		set_bit(order, &huge_shmem_orders_madvise);
5572 		spin_unlock(&huge_shmem_orders_lock);
5573 	} else if (sysfs_streq(buf, "never")) {
5574 		spin_lock(&huge_shmem_orders_lock);
5575 		clear_bit(order, &huge_shmem_orders_always);
5576 		clear_bit(order, &huge_shmem_orders_inherit);
5577 		clear_bit(order, &huge_shmem_orders_within_size);
5578 		clear_bit(order, &huge_shmem_orders_madvise);
5579 		spin_unlock(&huge_shmem_orders_lock);
5580 	} else {
5581 		ret = -EINVAL;
5582 	}
5583 
5584 	if (ret > 0) {
5585 		int err = start_stop_khugepaged();
5586 
5587 		if (err)
5588 			ret = err;
5589 	}
5590 	return ret;
5591 }
5592 
5593 struct kobj_attribute thpsize_shmem_enabled_attr =
5594 	__ATTR(shmem_enabled, 0644, thpsize_shmem_enabled_show, thpsize_shmem_enabled_store);
5595 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
5596 
5597 #if defined(CONFIG_TRANSPARENT_HUGEPAGE)
5598 
setup_transparent_hugepage_shmem(char * str)5599 static int __init setup_transparent_hugepage_shmem(char *str)
5600 {
5601 	int huge;
5602 
5603 	huge = shmem_parse_huge(str);
5604 	if (huge == -EINVAL) {
5605 		pr_warn("transparent_hugepage_shmem= cannot parse, ignored\n");
5606 		return huge;
5607 	}
5608 
5609 	shmem_huge = huge;
5610 	return 1;
5611 }
5612 __setup("transparent_hugepage_shmem=", setup_transparent_hugepage_shmem);
5613 
setup_transparent_hugepage_tmpfs(char * str)5614 static int __init setup_transparent_hugepage_tmpfs(char *str)
5615 {
5616 	int huge;
5617 
5618 	huge = shmem_parse_huge(str);
5619 	if (huge < 0) {
5620 		pr_warn("transparent_hugepage_tmpfs= cannot parse, ignored\n");
5621 		return huge;
5622 	}
5623 
5624 	tmpfs_huge = huge;
5625 	return 1;
5626 }
5627 __setup("transparent_hugepage_tmpfs=", setup_transparent_hugepage_tmpfs);
5628 
5629 static char str_dup[PAGE_SIZE] __initdata;
setup_thp_shmem(char * str)5630 static int __init setup_thp_shmem(char *str)
5631 {
5632 	char *token, *range, *policy, *subtoken;
5633 	unsigned long always, inherit, madvise, within_size;
5634 	char *start_size, *end_size;
5635 	int start, end, nr;
5636 	char *p;
5637 
5638 	if (!str || strlen(str) + 1 > PAGE_SIZE)
5639 		goto err;
5640 	strscpy(str_dup, str);
5641 
5642 	always = huge_shmem_orders_always;
5643 	inherit = huge_shmem_orders_inherit;
5644 	madvise = huge_shmem_orders_madvise;
5645 	within_size = huge_shmem_orders_within_size;
5646 	p = str_dup;
5647 	while ((token = strsep(&p, ";")) != NULL) {
5648 		range = strsep(&token, ":");
5649 		policy = token;
5650 
5651 		if (!policy)
5652 			goto err;
5653 
5654 		while ((subtoken = strsep(&range, ",")) != NULL) {
5655 			if (strchr(subtoken, '-')) {
5656 				start_size = strsep(&subtoken, "-");
5657 				end_size = subtoken;
5658 
5659 				start = get_order_from_str(start_size,
5660 							   THP_ORDERS_ALL_FILE_DEFAULT);
5661 				end = get_order_from_str(end_size,
5662 							 THP_ORDERS_ALL_FILE_DEFAULT);
5663 			} else {
5664 				start_size = end_size = subtoken;
5665 				start = end = get_order_from_str(subtoken,
5666 								 THP_ORDERS_ALL_FILE_DEFAULT);
5667 			}
5668 
5669 			if (start < 0) {
5670 				pr_err("invalid size %s in thp_shmem boot parameter\n",
5671 				       start_size);
5672 				goto err;
5673 			}
5674 
5675 			if (end < 0) {
5676 				pr_err("invalid size %s in thp_shmem boot parameter\n",
5677 				       end_size);
5678 				goto err;
5679 			}
5680 
5681 			if (start > end)
5682 				goto err;
5683 
5684 			nr = end - start + 1;
5685 			if (!strcmp(policy, "always")) {
5686 				bitmap_set(&always, start, nr);
5687 				bitmap_clear(&inherit, start, nr);
5688 				bitmap_clear(&madvise, start, nr);
5689 				bitmap_clear(&within_size, start, nr);
5690 			} else if (!strcmp(policy, "advise")) {
5691 				bitmap_set(&madvise, start, nr);
5692 				bitmap_clear(&inherit, start, nr);
5693 				bitmap_clear(&always, start, nr);
5694 				bitmap_clear(&within_size, start, nr);
5695 			} else if (!strcmp(policy, "inherit")) {
5696 				bitmap_set(&inherit, start, nr);
5697 				bitmap_clear(&madvise, start, nr);
5698 				bitmap_clear(&always, start, nr);
5699 				bitmap_clear(&within_size, start, nr);
5700 			} else if (!strcmp(policy, "within_size")) {
5701 				bitmap_set(&within_size, start, nr);
5702 				bitmap_clear(&inherit, start, nr);
5703 				bitmap_clear(&madvise, start, nr);
5704 				bitmap_clear(&always, start, nr);
5705 			} else if (!strcmp(policy, "never")) {
5706 				bitmap_clear(&inherit, start, nr);
5707 				bitmap_clear(&madvise, start, nr);
5708 				bitmap_clear(&always, start, nr);
5709 				bitmap_clear(&within_size, start, nr);
5710 			} else {
5711 				pr_err("invalid policy %s in thp_shmem boot parameter\n", policy);
5712 				goto err;
5713 			}
5714 		}
5715 	}
5716 
5717 	huge_shmem_orders_always = always;
5718 	huge_shmem_orders_madvise = madvise;
5719 	huge_shmem_orders_inherit = inherit;
5720 	huge_shmem_orders_within_size = within_size;
5721 	shmem_orders_configured = true;
5722 	return 1;
5723 
5724 err:
5725 	pr_warn("thp_shmem=%s: error parsing string, ignoring setting\n", str);
5726 	return 0;
5727 }
5728 __setup("thp_shmem=", setup_thp_shmem);
5729 
5730 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
5731 
5732 #else /* !CONFIG_SHMEM */
5733 
5734 /*
5735  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
5736  *
5737  * This is intended for small system where the benefits of the full
5738  * shmem code (swap-backed and resource-limited) are outweighed by
5739  * their complexity. On systems without swap this code should be
5740  * effectively equivalent, but much lighter weight.
5741  */
5742 
5743 static struct file_system_type shmem_fs_type = {
5744 	.name		= "tmpfs",
5745 	.init_fs_context = ramfs_init_fs_context,
5746 	.parameters	= ramfs_fs_parameters,
5747 	.kill_sb	= ramfs_kill_sb,
5748 	.fs_flags	= FS_USERNS_MOUNT,
5749 };
5750 
shmem_init(void)5751 void __init shmem_init(void)
5752 {
5753 	BUG_ON(register_filesystem(&shmem_fs_type) != 0);
5754 
5755 	shm_mnt = kern_mount(&shmem_fs_type);
5756 	BUG_ON(IS_ERR(shm_mnt));
5757 }
5758 
shmem_unuse(unsigned int type)5759 int shmem_unuse(unsigned int type)
5760 {
5761 	return 0;
5762 }
5763 
shmem_lock(struct file * file,int lock,struct ucounts * ucounts)5764 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
5765 {
5766 	return 0;
5767 }
5768 
shmem_unlock_mapping(struct address_space * mapping)5769 void shmem_unlock_mapping(struct address_space *mapping)
5770 {
5771 }
5772 
5773 #ifdef CONFIG_MMU
shmem_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)5774 unsigned long shmem_get_unmapped_area(struct file *file,
5775 				      unsigned long addr, unsigned long len,
5776 				      unsigned long pgoff, unsigned long flags)
5777 {
5778 	return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
5779 }
5780 #endif
5781 
shmem_truncate_range(struct inode * inode,loff_t lstart,loff_t lend)5782 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
5783 {
5784 	truncate_inode_pages_range(inode->i_mapping, lstart, lend);
5785 }
5786 EXPORT_SYMBOL_GPL(shmem_truncate_range);
5787 
5788 #define shmem_vm_ops				generic_file_vm_ops
5789 #define shmem_anon_vm_ops			generic_file_vm_ops
5790 #define shmem_file_operations			ramfs_file_operations
5791 #define shmem_acct_size(flags, size)		0
5792 #define shmem_unacct_size(flags, size)		do {} while (0)
5793 
shmem_get_inode(struct mnt_idmap * idmap,struct super_block * sb,struct inode * dir,umode_t mode,dev_t dev,unsigned long flags)5794 static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
5795 				struct super_block *sb, struct inode *dir,
5796 				umode_t mode, dev_t dev, unsigned long flags)
5797 {
5798 	struct inode *inode = ramfs_get_inode(sb, dir, mode, dev);
5799 	return inode ? inode : ERR_PTR(-ENOSPC);
5800 }
5801 
5802 #endif /* CONFIG_SHMEM */
5803 
5804 /* common code */
5805 
__shmem_file_setup(struct vfsmount * mnt,const char * name,loff_t size,unsigned long flags,unsigned int i_flags)5806 static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name,
5807 			loff_t size, unsigned long flags, unsigned int i_flags)
5808 {
5809 	struct inode *inode;
5810 	struct file *res;
5811 
5812 	if (IS_ERR(mnt))
5813 		return ERR_CAST(mnt);
5814 
5815 	if (size < 0 || size > MAX_LFS_FILESIZE)
5816 		return ERR_PTR(-EINVAL);
5817 
5818 	if (is_idmapped_mnt(mnt))
5819 		return ERR_PTR(-EINVAL);
5820 
5821 	if (shmem_acct_size(flags, size))
5822 		return ERR_PTR(-ENOMEM);
5823 
5824 	inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL,
5825 				S_IFREG | S_IRWXUGO, 0, flags);
5826 	if (IS_ERR(inode)) {
5827 		shmem_unacct_size(flags, size);
5828 		return ERR_CAST(inode);
5829 	}
5830 	inode->i_flags |= i_flags;
5831 	inode->i_size = size;
5832 	clear_nlink(inode);	/* It is unlinked */
5833 	res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
5834 	if (!IS_ERR(res))
5835 		res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
5836 				&shmem_file_operations);
5837 	if (IS_ERR(res))
5838 		iput(inode);
5839 	return res;
5840 }
5841 
5842 /**
5843  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
5844  * 	kernel internal.  There will be NO LSM permission checks against the
5845  * 	underlying inode.  So users of this interface must do LSM checks at a
5846  *	higher layer.  The users are the big_key and shm implementations.  LSM
5847  *	checks are provided at the key or shm level rather than the inode.
5848  * @name: name for dentry (to be seen in /proc/<pid>/maps)
5849  * @size: size to be set for the file
5850  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
5851  */
shmem_kernel_file_setup(const char * name,loff_t size,unsigned long flags)5852 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
5853 {
5854 	return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
5855 }
5856 EXPORT_SYMBOL_GPL(shmem_kernel_file_setup);
5857 
5858 /**
5859  * shmem_file_setup - get an unlinked file living in tmpfs
5860  * @name: name for dentry (to be seen in /proc/<pid>/maps)
5861  * @size: size to be set for the file
5862  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
5863  */
shmem_file_setup(const char * name,loff_t size,unsigned long flags)5864 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
5865 {
5866 	return __shmem_file_setup(shm_mnt, name, size, flags, 0);
5867 }
5868 EXPORT_SYMBOL_GPL(shmem_file_setup);
5869 
5870 /**
5871  * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
5872  * @mnt: the tmpfs mount where the file will be created
5873  * @name: name for dentry (to be seen in /proc/<pid>/maps)
5874  * @size: size to be set for the file
5875  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
5876  */
shmem_file_setup_with_mnt(struct vfsmount * mnt,const char * name,loff_t size,unsigned long flags)5877 struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
5878 				       loff_t size, unsigned long flags)
5879 {
5880 	return __shmem_file_setup(mnt, name, size, flags, 0);
5881 }
5882 EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
5883 
5884 /**
5885  * shmem_zero_setup - setup a shared anonymous mapping
5886  * @vma: the vma to be mmapped is prepared by do_mmap
5887  */
shmem_zero_setup(struct vm_area_struct * vma)5888 int shmem_zero_setup(struct vm_area_struct *vma)
5889 {
5890 	struct file *file;
5891 	loff_t size = vma->vm_end - vma->vm_start;
5892 
5893 	/*
5894 	 * Cloning a new file under mmap_lock leads to a lock ordering conflict
5895 	 * between XFS directory reading and selinux: since this file is only
5896 	 * accessible to the user through its mapping, use S_PRIVATE flag to
5897 	 * bypass file security, in the same way as shmem_kernel_file_setup().
5898 	 */
5899 	file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
5900 	if (IS_ERR(file))
5901 		return PTR_ERR(file);
5902 
5903 	if (vma->vm_file)
5904 		fput(vma->vm_file);
5905 	vma->vm_file = file;
5906 	vma->vm_ops = &shmem_anon_vm_ops;
5907 
5908 	return 0;
5909 }
5910 
5911 /**
5912  * shmem_read_folio_gfp - read into page cache, using specified page allocation flags.
5913  * @mapping:	the folio's address_space
5914  * @index:	the folio index
5915  * @gfp:	the page allocator flags to use if allocating
5916  *
5917  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
5918  * with any new page allocations done using the specified allocation flags.
5919  * But read_cache_page_gfp() uses the ->read_folio() method: which does not
5920  * suit tmpfs, since it may have pages in swapcache, and needs to find those
5921  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
5922  *
5923  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
5924  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
5925  */
shmem_read_folio_gfp(struct address_space * mapping,pgoff_t index,gfp_t gfp)5926 struct folio *shmem_read_folio_gfp(struct address_space *mapping,
5927 		pgoff_t index, gfp_t gfp)
5928 {
5929 #ifdef CONFIG_SHMEM
5930 	struct inode *inode = mapping->host;
5931 	struct folio *folio;
5932 	int error;
5933 
5934 	error = shmem_get_folio_gfp(inode, index, i_size_read(inode),
5935 				    &folio, SGP_CACHE, gfp, NULL, NULL);
5936 	if (error)
5937 		return ERR_PTR(error);
5938 
5939 	folio_unlock(folio);
5940 	return folio;
5941 #else
5942 	/*
5943 	 * The tiny !SHMEM case uses ramfs without swap
5944 	 */
5945 	return mapping_read_folio_gfp(mapping, index, gfp);
5946 #endif
5947 }
5948 EXPORT_SYMBOL_GPL(shmem_read_folio_gfp);
5949 
shmem_read_mapping_page_gfp(struct address_space * mapping,pgoff_t index,gfp_t gfp)5950 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
5951 					 pgoff_t index, gfp_t gfp)
5952 {
5953 	struct folio *folio = shmem_read_folio_gfp(mapping, index, gfp);
5954 	struct page *page;
5955 
5956 	if (IS_ERR(folio))
5957 		return &folio->page;
5958 
5959 	page = folio_file_page(folio, index);
5960 	if (PageHWPoison(page)) {
5961 		folio_put(folio);
5962 		return ERR_PTR(-EIO);
5963 	}
5964 
5965 	return page;
5966 }
5967 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
5968