1 /* 2 * Resizable virtual memory filesystem for Linux. 3 * 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. 9 * Copyright (C) 2002-2011 Hugh Dickins. 10 * Copyright (C) 2011 Google Inc. 11 * Copyright (C) 2002-2005 VERITAS Software Corporation. 12 * Copyright (C) 2004 Andi Kleen, SuSE Labs 13 * 14 * Extended attribute support for tmpfs: 15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 17 * 18 * tiny-shmem: 19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20 * 21 * This file is released under the GPL. 22 */ 23 24 #include <linux/fs.h> 25 #include <linux/init.h> 26 #include <linux/vfs.h> 27 #include <linux/mount.h> 28 #include <linux/ramfs.h> 29 #include <linux/pagemap.h> 30 #include <linux/file.h> 31 #include <linux/fileattr.h> 32 #include <linux/mm.h> 33 #include <linux/random.h> 34 #include <linux/sched/signal.h> 35 #include <linux/export.h> 36 #include <linux/shmem_fs.h> 37 #include <linux/swap.h> 38 #include <linux/uio.h> 39 #include <linux/hugetlb.h> 40 #include <linux/fs_parser.h> 41 #include <linux/swapfile.h> 42 #include <linux/iversion.h> 43 #include "swap.h" 44 45 static struct vfsmount *shm_mnt __ro_after_init; 46 47 #ifdef CONFIG_SHMEM 48 /* 49 * This virtual memory filesystem is heavily based on the ramfs. It 50 * extends ramfs by the ability to use swap and honor resource limits 51 * which makes it a completely usable filesystem. 52 */ 53 54 #include <linux/xattr.h> 55 #include <linux/exportfs.h> 56 #include <linux/posix_acl.h> 57 #include <linux/posix_acl_xattr.h> 58 #include <linux/mman.h> 59 #include <linux/string.h> 60 #include <linux/slab.h> 61 #include <linux/backing-dev.h> 62 #include <linux/writeback.h> 63 #include <linux/pagevec.h> 64 #include <linux/percpu_counter.h> 65 #include <linux/falloc.h> 66 #include <linux/splice.h> 67 #include <linux/security.h> 68 #include <linux/swapops.h> 69 #include <linux/mempolicy.h> 70 #include <linux/namei.h> 71 #include <linux/ctype.h> 72 #include <linux/migrate.h> 73 #include <linux/highmem.h> 74 #include <linux/seq_file.h> 75 #include <linux/magic.h> 76 #include <linux/syscalls.h> 77 #include <linux/fcntl.h> 78 #include <uapi/linux/memfd.h> 79 #include <linux/rmap.h> 80 #include <linux/uuid.h> 81 #include <linux/quotaops.h> 82 #include <linux/rcupdate_wait.h> 83 84 #include <linux/uaccess.h> 85 86 #include "internal.h" 87 88 #define BLOCKS_PER_PAGE (PAGE_SIZE/512) 89 #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) 90 91 /* Pretend that each entry is of this size in directory's i_size */ 92 #define BOGO_DIRENT_SIZE 20 93 94 /* Pretend that one inode + its dentry occupy this much memory */ 95 #define BOGO_INODE_SIZE 1024 96 97 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 98 #define SHORT_SYMLINK_LEN 128 99 100 /* 101 * shmem_fallocate communicates with shmem_fault or shmem_writepage via 102 * inode->i_private (with i_rwsem making sure that it has only one user at 103 * a time): we would prefer not to enlarge the shmem inode just for that. 104 */ 105 struct shmem_falloc { 106 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */ 107 pgoff_t start; /* start of range currently being fallocated */ 108 pgoff_t next; /* the next page offset to be fallocated */ 109 pgoff_t nr_falloced; /* how many new pages have been fallocated */ 110 pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 111 }; 112 113 struct shmem_options { 114 unsigned long long blocks; 115 unsigned long long inodes; 116 struct mempolicy *mpol; 117 kuid_t uid; 118 kgid_t gid; 119 umode_t mode; 120 bool full_inums; 121 int huge; 122 int seen; 123 bool noswap; 124 unsigned short quota_types; 125 struct shmem_quota_limits qlimits; 126 #define SHMEM_SEEN_BLOCKS 1 127 #define SHMEM_SEEN_INODES 2 128 #define SHMEM_SEEN_HUGE 4 129 #define SHMEM_SEEN_INUMS 8 130 #define SHMEM_SEEN_NOSWAP 16 131 #define SHMEM_SEEN_QUOTA 32 132 }; 133 134 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 135 static unsigned long huge_shmem_orders_always __read_mostly; 136 static unsigned long huge_shmem_orders_madvise __read_mostly; 137 static unsigned long huge_shmem_orders_inherit __read_mostly; 138 static unsigned long huge_shmem_orders_within_size __read_mostly; 139 #endif 140 141 #ifdef CONFIG_TMPFS 142 static unsigned long shmem_default_max_blocks(void) 143 { 144 return totalram_pages() / 2; 145 } 146 147 static unsigned long shmem_default_max_inodes(void) 148 { 149 unsigned long nr_pages = totalram_pages(); 150 151 return min3(nr_pages - totalhigh_pages(), nr_pages / 2, 152 ULONG_MAX / BOGO_INODE_SIZE); 153 } 154 #endif 155 156 static int shmem_swapin_folio(struct inode *inode, pgoff_t index, 157 struct folio **foliop, enum sgp_type sgp, gfp_t gfp, 158 struct mm_struct *fault_mm, vm_fault_t *fault_type); 159 160 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 161 { 162 return sb->s_fs_info; 163 } 164 165 /* 166 * shmem_file_setup pre-accounts the whole fixed size of a VM object, 167 * for shared memory and for shared anonymous (/dev/zero) mappings 168 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 169 * consistent with the pre-accounting of private mappings ... 170 */ 171 static inline int shmem_acct_size(unsigned long flags, loff_t size) 172 { 173 return (flags & VM_NORESERVE) ? 174 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); 175 } 176 177 static inline void shmem_unacct_size(unsigned long flags, loff_t size) 178 { 179 if (!(flags & VM_NORESERVE)) 180 vm_unacct_memory(VM_ACCT(size)); 181 } 182 183 static inline int shmem_reacct_size(unsigned long flags, 184 loff_t oldsize, loff_t newsize) 185 { 186 if (!(flags & VM_NORESERVE)) { 187 if (VM_ACCT(newsize) > VM_ACCT(oldsize)) 188 return security_vm_enough_memory_mm(current->mm, 189 VM_ACCT(newsize) - VM_ACCT(oldsize)); 190 else if (VM_ACCT(newsize) < VM_ACCT(oldsize)) 191 vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize)); 192 } 193 return 0; 194 } 195 196 /* 197 * ... whereas tmpfs objects are accounted incrementally as 198 * pages are allocated, in order to allow large sparse files. 199 * shmem_get_folio reports shmem_acct_blocks failure as -ENOSPC not -ENOMEM, 200 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 201 */ 202 static inline int shmem_acct_blocks(unsigned long flags, long pages) 203 { 204 if (!(flags & VM_NORESERVE)) 205 return 0; 206 207 return security_vm_enough_memory_mm(current->mm, 208 pages * VM_ACCT(PAGE_SIZE)); 209 } 210 211 static inline void shmem_unacct_blocks(unsigned long flags, long pages) 212 { 213 if (flags & VM_NORESERVE) 214 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); 215 } 216 217 static int shmem_inode_acct_blocks(struct inode *inode, long pages) 218 { 219 struct shmem_inode_info *info = SHMEM_I(inode); 220 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 221 int err = -ENOSPC; 222 223 if (shmem_acct_blocks(info->flags, pages)) 224 return err; 225 226 might_sleep(); /* when quotas */ 227 if (sbinfo->max_blocks) { 228 if (!percpu_counter_limited_add(&sbinfo->used_blocks, 229 sbinfo->max_blocks, pages)) 230 goto unacct; 231 232 err = dquot_alloc_block_nodirty(inode, pages); 233 if (err) { 234 percpu_counter_sub(&sbinfo->used_blocks, pages); 235 goto unacct; 236 } 237 } else { 238 err = dquot_alloc_block_nodirty(inode, pages); 239 if (err) 240 goto unacct; 241 } 242 243 return 0; 244 245 unacct: 246 shmem_unacct_blocks(info->flags, pages); 247 return err; 248 } 249 250 static void shmem_inode_unacct_blocks(struct inode *inode, long pages) 251 { 252 struct shmem_inode_info *info = SHMEM_I(inode); 253 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 254 255 might_sleep(); /* when quotas */ 256 dquot_free_block_nodirty(inode, pages); 257 258 if (sbinfo->max_blocks) 259 percpu_counter_sub(&sbinfo->used_blocks, pages); 260 shmem_unacct_blocks(info->flags, pages); 261 } 262 263 static const struct super_operations shmem_ops; 264 static const struct address_space_operations shmem_aops; 265 static const struct file_operations shmem_file_operations; 266 static const struct inode_operations shmem_inode_operations; 267 static const struct inode_operations shmem_dir_inode_operations; 268 static const struct inode_operations shmem_special_inode_operations; 269 static const struct vm_operations_struct shmem_vm_ops; 270 static const struct vm_operations_struct shmem_anon_vm_ops; 271 static struct file_system_type shmem_fs_type; 272 273 bool shmem_mapping(struct address_space *mapping) 274 { 275 return mapping->a_ops == &shmem_aops; 276 } 277 EXPORT_SYMBOL_GPL(shmem_mapping); 278 279 bool vma_is_anon_shmem(struct vm_area_struct *vma) 280 { 281 return vma->vm_ops == &shmem_anon_vm_ops; 282 } 283 284 bool vma_is_shmem(struct vm_area_struct *vma) 285 { 286 return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops; 287 } 288 289 static LIST_HEAD(shmem_swaplist); 290 static DEFINE_MUTEX(shmem_swaplist_mutex); 291 292 #ifdef CONFIG_TMPFS_QUOTA 293 294 static int shmem_enable_quotas(struct super_block *sb, 295 unsigned short quota_types) 296 { 297 int type, err = 0; 298 299 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY; 300 for (type = 0; type < SHMEM_MAXQUOTAS; type++) { 301 if (!(quota_types & (1 << type))) 302 continue; 303 err = dquot_load_quota_sb(sb, type, QFMT_SHMEM, 304 DQUOT_USAGE_ENABLED | 305 DQUOT_LIMITS_ENABLED); 306 if (err) 307 goto out_err; 308 } 309 return 0; 310 311 out_err: 312 pr_warn("tmpfs: failed to enable quota tracking (type=%d, err=%d)\n", 313 type, err); 314 for (type--; type >= 0; type--) 315 dquot_quota_off(sb, type); 316 return err; 317 } 318 319 static void shmem_disable_quotas(struct super_block *sb) 320 { 321 int type; 322 323 for (type = 0; type < SHMEM_MAXQUOTAS; type++) 324 dquot_quota_off(sb, type); 325 } 326 327 static struct dquot __rcu **shmem_get_dquots(struct inode *inode) 328 { 329 return SHMEM_I(inode)->i_dquot; 330 } 331 #endif /* CONFIG_TMPFS_QUOTA */ 332 333 /* 334 * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and 335 * produces a novel ino for the newly allocated inode. 336 * 337 * It may also be called when making a hard link to permit the space needed by 338 * each dentry. However, in that case, no new inode number is needed since that 339 * internally draws from another pool of inode numbers (currently global 340 * get_next_ino()). This case is indicated by passing NULL as inop. 341 */ 342 #define SHMEM_INO_BATCH 1024 343 static int shmem_reserve_inode(struct super_block *sb, ino_t *inop) 344 { 345 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 346 ino_t ino; 347 348 if (!(sb->s_flags & SB_KERNMOUNT)) { 349 raw_spin_lock(&sbinfo->stat_lock); 350 if (sbinfo->max_inodes) { 351 if (sbinfo->free_ispace < BOGO_INODE_SIZE) { 352 raw_spin_unlock(&sbinfo->stat_lock); 353 return -ENOSPC; 354 } 355 sbinfo->free_ispace -= BOGO_INODE_SIZE; 356 } 357 if (inop) { 358 ino = sbinfo->next_ino++; 359 if (unlikely(is_zero_ino(ino))) 360 ino = sbinfo->next_ino++; 361 if (unlikely(!sbinfo->full_inums && 362 ino > UINT_MAX)) { 363 /* 364 * Emulate get_next_ino uint wraparound for 365 * compatibility 366 */ 367 if (IS_ENABLED(CONFIG_64BIT)) 368 pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n", 369 __func__, MINOR(sb->s_dev)); 370 sbinfo->next_ino = 1; 371 ino = sbinfo->next_ino++; 372 } 373 *inop = ino; 374 } 375 raw_spin_unlock(&sbinfo->stat_lock); 376 } else if (inop) { 377 /* 378 * __shmem_file_setup, one of our callers, is lock-free: it 379 * doesn't hold stat_lock in shmem_reserve_inode since 380 * max_inodes is always 0, and is called from potentially 381 * unknown contexts. As such, use a per-cpu batched allocator 382 * which doesn't require the per-sb stat_lock unless we are at 383 * the batch boundary. 384 * 385 * We don't need to worry about inode{32,64} since SB_KERNMOUNT 386 * shmem mounts are not exposed to userspace, so we don't need 387 * to worry about things like glibc compatibility. 388 */ 389 ino_t *next_ino; 390 391 next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu()); 392 ino = *next_ino; 393 if (unlikely(ino % SHMEM_INO_BATCH == 0)) { 394 raw_spin_lock(&sbinfo->stat_lock); 395 ino = sbinfo->next_ino; 396 sbinfo->next_ino += SHMEM_INO_BATCH; 397 raw_spin_unlock(&sbinfo->stat_lock); 398 if (unlikely(is_zero_ino(ino))) 399 ino++; 400 } 401 *inop = ino; 402 *next_ino = ++ino; 403 put_cpu(); 404 } 405 406 return 0; 407 } 408 409 static void shmem_free_inode(struct super_block *sb, size_t freed_ispace) 410 { 411 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 412 if (sbinfo->max_inodes) { 413 raw_spin_lock(&sbinfo->stat_lock); 414 sbinfo->free_ispace += BOGO_INODE_SIZE + freed_ispace; 415 raw_spin_unlock(&sbinfo->stat_lock); 416 } 417 } 418 419 /** 420 * shmem_recalc_inode - recalculate the block usage of an inode 421 * @inode: inode to recalc 422 * @alloced: the change in number of pages allocated to inode 423 * @swapped: the change in number of pages swapped from inode 424 * 425 * We have to calculate the free blocks since the mm can drop 426 * undirtied hole pages behind our back. 427 * 428 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 429 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 430 */ 431 static void shmem_recalc_inode(struct inode *inode, long alloced, long swapped) 432 { 433 struct shmem_inode_info *info = SHMEM_I(inode); 434 long freed; 435 436 spin_lock(&info->lock); 437 info->alloced += alloced; 438 info->swapped += swapped; 439 freed = info->alloced - info->swapped - 440 READ_ONCE(inode->i_mapping->nrpages); 441 /* 442 * Special case: whereas normally shmem_recalc_inode() is called 443 * after i_mapping->nrpages has already been adjusted (up or down), 444 * shmem_writepage() has to raise swapped before nrpages is lowered - 445 * to stop a racing shmem_recalc_inode() from thinking that a page has 446 * been freed. Compensate here, to avoid the need for a followup call. 447 */ 448 if (swapped > 0) 449 freed += swapped; 450 if (freed > 0) 451 info->alloced -= freed; 452 spin_unlock(&info->lock); 453 454 /* The quota case may block */ 455 if (freed > 0) 456 shmem_inode_unacct_blocks(inode, freed); 457 } 458 459 bool shmem_charge(struct inode *inode, long pages) 460 { 461 struct address_space *mapping = inode->i_mapping; 462 463 if (shmem_inode_acct_blocks(inode, pages)) 464 return false; 465 466 /* nrpages adjustment first, then shmem_recalc_inode() when balanced */ 467 xa_lock_irq(&mapping->i_pages); 468 mapping->nrpages += pages; 469 xa_unlock_irq(&mapping->i_pages); 470 471 shmem_recalc_inode(inode, pages, 0); 472 return true; 473 } 474 475 void shmem_uncharge(struct inode *inode, long pages) 476 { 477 /* pages argument is currently unused: keep it to help debugging */ 478 /* nrpages adjustment done by __filemap_remove_folio() or caller */ 479 480 shmem_recalc_inode(inode, 0, 0); 481 } 482 483 /* 484 * Replace item expected in xarray by a new item, while holding xa_lock. 485 */ 486 static int shmem_replace_entry(struct address_space *mapping, 487 pgoff_t index, void *expected, void *replacement) 488 { 489 XA_STATE(xas, &mapping->i_pages, index); 490 void *item; 491 492 VM_BUG_ON(!expected); 493 VM_BUG_ON(!replacement); 494 item = xas_load(&xas); 495 if (item != expected) 496 return -ENOENT; 497 xas_store(&xas, replacement); 498 return 0; 499 } 500 501 /* 502 * Sometimes, before we decide whether to proceed or to fail, we must check 503 * that an entry was not already brought back from swap by a racing thread. 504 * 505 * Checking page is not enough: by the time a SwapCache page is locked, it 506 * might be reused, and again be SwapCache, using the same swap as before. 507 */ 508 static bool shmem_confirm_swap(struct address_space *mapping, 509 pgoff_t index, swp_entry_t swap) 510 { 511 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap); 512 } 513 514 /* 515 * Definitions for "huge tmpfs": tmpfs mounted with the huge= option 516 * 517 * SHMEM_HUGE_NEVER: 518 * disables huge pages for the mount; 519 * SHMEM_HUGE_ALWAYS: 520 * enables huge pages for the mount; 521 * SHMEM_HUGE_WITHIN_SIZE: 522 * only allocate huge pages if the page will be fully within i_size, 523 * also respect fadvise()/madvise() hints; 524 * SHMEM_HUGE_ADVISE: 525 * only allocate huge pages if requested with fadvise()/madvise(); 526 */ 527 528 #define SHMEM_HUGE_NEVER 0 529 #define SHMEM_HUGE_ALWAYS 1 530 #define SHMEM_HUGE_WITHIN_SIZE 2 531 #define SHMEM_HUGE_ADVISE 3 532 533 /* 534 * Special values. 535 * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled: 536 * 537 * SHMEM_HUGE_DENY: 538 * disables huge on shm_mnt and all mounts, for emergency use; 539 * SHMEM_HUGE_FORCE: 540 * enables huge on shm_mnt and all mounts, w/o needing option, for testing; 541 * 542 */ 543 #define SHMEM_HUGE_DENY (-1) 544 #define SHMEM_HUGE_FORCE (-2) 545 546 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 547 /* ifdef here to avoid bloating shmem.o when not necessary */ 548 549 static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER; 550 551 bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force, 552 struct mm_struct *mm, unsigned long vm_flags) 553 { 554 loff_t i_size; 555 556 if (!S_ISREG(inode->i_mode)) 557 return false; 558 if (mm && ((vm_flags & VM_NOHUGEPAGE) || test_bit(MMF_DISABLE_THP, &mm->flags))) 559 return false; 560 if (shmem_huge == SHMEM_HUGE_DENY) 561 return false; 562 if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE) 563 return true; 564 565 switch (SHMEM_SB(inode->i_sb)->huge) { 566 case SHMEM_HUGE_ALWAYS: 567 return true; 568 case SHMEM_HUGE_WITHIN_SIZE: 569 index = round_up(index + 1, HPAGE_PMD_NR); 570 i_size = round_up(i_size_read(inode), PAGE_SIZE); 571 if (i_size >> PAGE_SHIFT >= index) 572 return true; 573 fallthrough; 574 case SHMEM_HUGE_ADVISE: 575 if (mm && (vm_flags & VM_HUGEPAGE)) 576 return true; 577 fallthrough; 578 default: 579 return false; 580 } 581 } 582 583 #if defined(CONFIG_SYSFS) 584 static int shmem_parse_huge(const char *str) 585 { 586 if (!strcmp(str, "never")) 587 return SHMEM_HUGE_NEVER; 588 if (!strcmp(str, "always")) 589 return SHMEM_HUGE_ALWAYS; 590 if (!strcmp(str, "within_size")) 591 return SHMEM_HUGE_WITHIN_SIZE; 592 if (!strcmp(str, "advise")) 593 return SHMEM_HUGE_ADVISE; 594 if (!strcmp(str, "deny")) 595 return SHMEM_HUGE_DENY; 596 if (!strcmp(str, "force")) 597 return SHMEM_HUGE_FORCE; 598 return -EINVAL; 599 } 600 #endif 601 602 #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS) 603 static const char *shmem_format_huge(int huge) 604 { 605 switch (huge) { 606 case SHMEM_HUGE_NEVER: 607 return "never"; 608 case SHMEM_HUGE_ALWAYS: 609 return "always"; 610 case SHMEM_HUGE_WITHIN_SIZE: 611 return "within_size"; 612 case SHMEM_HUGE_ADVISE: 613 return "advise"; 614 case SHMEM_HUGE_DENY: 615 return "deny"; 616 case SHMEM_HUGE_FORCE: 617 return "force"; 618 default: 619 VM_BUG_ON(1); 620 return "bad_val"; 621 } 622 } 623 #endif 624 625 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 626 struct shrink_control *sc, unsigned long nr_to_split) 627 { 628 LIST_HEAD(list), *pos, *next; 629 LIST_HEAD(to_remove); 630 struct inode *inode; 631 struct shmem_inode_info *info; 632 struct folio *folio; 633 unsigned long batch = sc ? sc->nr_to_scan : 128; 634 int split = 0; 635 636 if (list_empty(&sbinfo->shrinklist)) 637 return SHRINK_STOP; 638 639 spin_lock(&sbinfo->shrinklist_lock); 640 list_for_each_safe(pos, next, &sbinfo->shrinklist) { 641 info = list_entry(pos, struct shmem_inode_info, shrinklist); 642 643 /* pin the inode */ 644 inode = igrab(&info->vfs_inode); 645 646 /* inode is about to be evicted */ 647 if (!inode) { 648 list_del_init(&info->shrinklist); 649 goto next; 650 } 651 652 /* Check if there's anything to gain */ 653 if (round_up(inode->i_size, PAGE_SIZE) == 654 round_up(inode->i_size, HPAGE_PMD_SIZE)) { 655 list_move(&info->shrinklist, &to_remove); 656 goto next; 657 } 658 659 list_move(&info->shrinklist, &list); 660 next: 661 sbinfo->shrinklist_len--; 662 if (!--batch) 663 break; 664 } 665 spin_unlock(&sbinfo->shrinklist_lock); 666 667 list_for_each_safe(pos, next, &to_remove) { 668 info = list_entry(pos, struct shmem_inode_info, shrinklist); 669 inode = &info->vfs_inode; 670 list_del_init(&info->shrinklist); 671 iput(inode); 672 } 673 674 list_for_each_safe(pos, next, &list) { 675 int ret; 676 pgoff_t index; 677 678 info = list_entry(pos, struct shmem_inode_info, shrinklist); 679 inode = &info->vfs_inode; 680 681 if (nr_to_split && split >= nr_to_split) 682 goto move_back; 683 684 index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT; 685 folio = filemap_get_folio(inode->i_mapping, index); 686 if (IS_ERR(folio)) 687 goto drop; 688 689 /* No huge page at the end of the file: nothing to split */ 690 if (!folio_test_large(folio)) { 691 folio_put(folio); 692 goto drop; 693 } 694 695 /* 696 * Move the inode on the list back to shrinklist if we failed 697 * to lock the page at this time. 698 * 699 * Waiting for the lock may lead to deadlock in the 700 * reclaim path. 701 */ 702 if (!folio_trylock(folio)) { 703 folio_put(folio); 704 goto move_back; 705 } 706 707 ret = split_folio(folio); 708 folio_unlock(folio); 709 folio_put(folio); 710 711 /* If split failed move the inode on the list back to shrinklist */ 712 if (ret) 713 goto move_back; 714 715 split++; 716 drop: 717 list_del_init(&info->shrinklist); 718 goto put; 719 move_back: 720 /* 721 * Make sure the inode is either on the global list or deleted 722 * from any local list before iput() since it could be deleted 723 * in another thread once we put the inode (then the local list 724 * is corrupted). 725 */ 726 spin_lock(&sbinfo->shrinklist_lock); 727 list_move(&info->shrinklist, &sbinfo->shrinklist); 728 sbinfo->shrinklist_len++; 729 spin_unlock(&sbinfo->shrinklist_lock); 730 put: 731 iput(inode); 732 } 733 734 return split; 735 } 736 737 static long shmem_unused_huge_scan(struct super_block *sb, 738 struct shrink_control *sc) 739 { 740 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 741 742 if (!READ_ONCE(sbinfo->shrinklist_len)) 743 return SHRINK_STOP; 744 745 return shmem_unused_huge_shrink(sbinfo, sc, 0); 746 } 747 748 static long shmem_unused_huge_count(struct super_block *sb, 749 struct shrink_control *sc) 750 { 751 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 752 return READ_ONCE(sbinfo->shrinklist_len); 753 } 754 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 755 756 #define shmem_huge SHMEM_HUGE_DENY 757 758 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 759 struct shrink_control *sc, unsigned long nr_to_split) 760 { 761 return 0; 762 } 763 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 764 765 /* 766 * Somewhat like filemap_add_folio, but error if expected item has gone. 767 */ 768 static int shmem_add_to_page_cache(struct folio *folio, 769 struct address_space *mapping, 770 pgoff_t index, void *expected, gfp_t gfp) 771 { 772 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); 773 long nr = folio_nr_pages(folio); 774 775 VM_BUG_ON_FOLIO(index != round_down(index, nr), folio); 776 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 777 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio); 778 VM_BUG_ON(expected && folio_test_large(folio)); 779 780 folio_ref_add(folio, nr); 781 folio->mapping = mapping; 782 folio->index = index; 783 784 gfp &= GFP_RECLAIM_MASK; 785 folio_throttle_swaprate(folio, gfp); 786 787 do { 788 xas_lock_irq(&xas); 789 if (expected != xas_find_conflict(&xas)) { 790 xas_set_err(&xas, -EEXIST); 791 goto unlock; 792 } 793 if (expected && xas_find_conflict(&xas)) { 794 xas_set_err(&xas, -EEXIST); 795 goto unlock; 796 } 797 xas_store(&xas, folio); 798 if (xas_error(&xas)) 799 goto unlock; 800 if (folio_test_pmd_mappable(folio)) 801 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr); 802 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr); 803 __lruvec_stat_mod_folio(folio, NR_SHMEM, nr); 804 mapping->nrpages += nr; 805 unlock: 806 xas_unlock_irq(&xas); 807 } while (xas_nomem(&xas, gfp)); 808 809 if (xas_error(&xas)) { 810 folio->mapping = NULL; 811 folio_ref_sub(folio, nr); 812 return xas_error(&xas); 813 } 814 815 return 0; 816 } 817 818 /* 819 * Somewhat like filemap_remove_folio, but substitutes swap for @folio. 820 */ 821 static void shmem_delete_from_page_cache(struct folio *folio, void *radswap) 822 { 823 struct address_space *mapping = folio->mapping; 824 long nr = folio_nr_pages(folio); 825 int error; 826 827 xa_lock_irq(&mapping->i_pages); 828 error = shmem_replace_entry(mapping, folio->index, folio, radswap); 829 folio->mapping = NULL; 830 mapping->nrpages -= nr; 831 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); 832 __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); 833 xa_unlock_irq(&mapping->i_pages); 834 folio_put(folio); 835 BUG_ON(error); 836 } 837 838 /* 839 * Remove swap entry from page cache, free the swap and its page cache. 840 */ 841 static int shmem_free_swap(struct address_space *mapping, 842 pgoff_t index, void *radswap) 843 { 844 void *old; 845 846 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); 847 if (old != radswap) 848 return -ENOENT; 849 free_swap_and_cache(radix_to_swp_entry(radswap)); 850 return 0; 851 } 852 853 /* 854 * Determine (in bytes) how many of the shmem object's pages mapped by the 855 * given offsets are swapped out. 856 * 857 * This is safe to call without i_rwsem or the i_pages lock thanks to RCU, 858 * as long as the inode doesn't go away and racy results are not a problem. 859 */ 860 unsigned long shmem_partial_swap_usage(struct address_space *mapping, 861 pgoff_t start, pgoff_t end) 862 { 863 XA_STATE(xas, &mapping->i_pages, start); 864 struct page *page; 865 unsigned long swapped = 0; 866 unsigned long max = end - 1; 867 868 rcu_read_lock(); 869 xas_for_each(&xas, page, max) { 870 if (xas_retry(&xas, page)) 871 continue; 872 if (xa_is_value(page)) 873 swapped++; 874 if (xas.xa_index == max) 875 break; 876 if (need_resched()) { 877 xas_pause(&xas); 878 cond_resched_rcu(); 879 } 880 } 881 rcu_read_unlock(); 882 883 return swapped << PAGE_SHIFT; 884 } 885 886 /* 887 * Determine (in bytes) how many of the shmem object's pages mapped by the 888 * given vma is swapped out. 889 * 890 * This is safe to call without i_rwsem or the i_pages lock thanks to RCU, 891 * as long as the inode doesn't go away and racy results are not a problem. 892 */ 893 unsigned long shmem_swap_usage(struct vm_area_struct *vma) 894 { 895 struct inode *inode = file_inode(vma->vm_file); 896 struct shmem_inode_info *info = SHMEM_I(inode); 897 struct address_space *mapping = inode->i_mapping; 898 unsigned long swapped; 899 900 /* Be careful as we don't hold info->lock */ 901 swapped = READ_ONCE(info->swapped); 902 903 /* 904 * The easier cases are when the shmem object has nothing in swap, or 905 * the vma maps it whole. Then we can simply use the stats that we 906 * already track. 907 */ 908 if (!swapped) 909 return 0; 910 911 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size) 912 return swapped << PAGE_SHIFT; 913 914 /* Here comes the more involved part */ 915 return shmem_partial_swap_usage(mapping, vma->vm_pgoff, 916 vma->vm_pgoff + vma_pages(vma)); 917 } 918 919 /* 920 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 921 */ 922 void shmem_unlock_mapping(struct address_space *mapping) 923 { 924 struct folio_batch fbatch; 925 pgoff_t index = 0; 926 927 folio_batch_init(&fbatch); 928 /* 929 * Minor point, but we might as well stop if someone else SHM_LOCKs it. 930 */ 931 while (!mapping_unevictable(mapping) && 932 filemap_get_folios(mapping, &index, ~0UL, &fbatch)) { 933 check_move_unevictable_folios(&fbatch); 934 folio_batch_release(&fbatch); 935 cond_resched(); 936 } 937 } 938 939 static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index) 940 { 941 struct folio *folio; 942 943 /* 944 * At first avoid shmem_get_folio(,,,SGP_READ): that fails 945 * beyond i_size, and reports fallocated folios as holes. 946 */ 947 folio = filemap_get_entry(inode->i_mapping, index); 948 if (!folio) 949 return folio; 950 if (!xa_is_value(folio)) { 951 folio_lock(folio); 952 if (folio->mapping == inode->i_mapping) 953 return folio; 954 /* The folio has been swapped out */ 955 folio_unlock(folio); 956 folio_put(folio); 957 } 958 /* 959 * But read a folio back from swap if any of it is within i_size 960 * (although in some cases this is just a waste of time). 961 */ 962 folio = NULL; 963 shmem_get_folio(inode, index, &folio, SGP_READ); 964 return folio; 965 } 966 967 /* 968 * Remove range of pages and swap entries from page cache, and free them. 969 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. 970 */ 971 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, 972 bool unfalloc) 973 { 974 struct address_space *mapping = inode->i_mapping; 975 struct shmem_inode_info *info = SHMEM_I(inode); 976 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; 977 pgoff_t end = (lend + 1) >> PAGE_SHIFT; 978 struct folio_batch fbatch; 979 pgoff_t indices[PAGEVEC_SIZE]; 980 struct folio *folio; 981 bool same_folio; 982 long nr_swaps_freed = 0; 983 pgoff_t index; 984 int i; 985 986 if (lend == -1) 987 end = -1; /* unsigned, so actually very big */ 988 989 if (info->fallocend > start && info->fallocend <= end && !unfalloc) 990 info->fallocend = start; 991 992 folio_batch_init(&fbatch); 993 index = start; 994 while (index < end && find_lock_entries(mapping, &index, end - 1, 995 &fbatch, indices)) { 996 for (i = 0; i < folio_batch_count(&fbatch); i++) { 997 folio = fbatch.folios[i]; 998 999 if (xa_is_value(folio)) { 1000 if (unfalloc) 1001 continue; 1002 nr_swaps_freed += !shmem_free_swap(mapping, 1003 indices[i], folio); 1004 continue; 1005 } 1006 1007 if (!unfalloc || !folio_test_uptodate(folio)) 1008 truncate_inode_folio(mapping, folio); 1009 folio_unlock(folio); 1010 } 1011 folio_batch_remove_exceptionals(&fbatch); 1012 folio_batch_release(&fbatch); 1013 cond_resched(); 1014 } 1015 1016 /* 1017 * When undoing a failed fallocate, we want none of the partial folio 1018 * zeroing and splitting below, but shall want to truncate the whole 1019 * folio when !uptodate indicates that it was added by this fallocate, 1020 * even when [lstart, lend] covers only a part of the folio. 1021 */ 1022 if (unfalloc) 1023 goto whole_folios; 1024 1025 same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT); 1026 folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT); 1027 if (folio) { 1028 same_folio = lend < folio_pos(folio) + folio_size(folio); 1029 folio_mark_dirty(folio); 1030 if (!truncate_inode_partial_folio(folio, lstart, lend)) { 1031 start = folio_next_index(folio); 1032 if (same_folio) 1033 end = folio->index; 1034 } 1035 folio_unlock(folio); 1036 folio_put(folio); 1037 folio = NULL; 1038 } 1039 1040 if (!same_folio) 1041 folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT); 1042 if (folio) { 1043 folio_mark_dirty(folio); 1044 if (!truncate_inode_partial_folio(folio, lstart, lend)) 1045 end = folio->index; 1046 folio_unlock(folio); 1047 folio_put(folio); 1048 } 1049 1050 whole_folios: 1051 1052 index = start; 1053 while (index < end) { 1054 cond_resched(); 1055 1056 if (!find_get_entries(mapping, &index, end - 1, &fbatch, 1057 indices)) { 1058 /* If all gone or hole-punch or unfalloc, we're done */ 1059 if (index == start || end != -1) 1060 break; 1061 /* But if truncating, restart to make sure all gone */ 1062 index = start; 1063 continue; 1064 } 1065 for (i = 0; i < folio_batch_count(&fbatch); i++) { 1066 folio = fbatch.folios[i]; 1067 1068 if (xa_is_value(folio)) { 1069 if (unfalloc) 1070 continue; 1071 if (shmem_free_swap(mapping, indices[i], folio)) { 1072 /* Swap was replaced by page: retry */ 1073 index = indices[i]; 1074 break; 1075 } 1076 nr_swaps_freed++; 1077 continue; 1078 } 1079 1080 folio_lock(folio); 1081 1082 if (!unfalloc || !folio_test_uptodate(folio)) { 1083 if (folio_mapping(folio) != mapping) { 1084 /* Page was replaced by swap: retry */ 1085 folio_unlock(folio); 1086 index = indices[i]; 1087 break; 1088 } 1089 VM_BUG_ON_FOLIO(folio_test_writeback(folio), 1090 folio); 1091 1092 if (!folio_test_large(folio)) { 1093 truncate_inode_folio(mapping, folio); 1094 } else if (truncate_inode_partial_folio(folio, lstart, lend)) { 1095 /* 1096 * If we split a page, reset the loop so 1097 * that we pick up the new sub pages. 1098 * Otherwise the THP was entirely 1099 * dropped or the target range was 1100 * zeroed, so just continue the loop as 1101 * is. 1102 */ 1103 if (!folio_test_large(folio)) { 1104 folio_unlock(folio); 1105 index = start; 1106 break; 1107 } 1108 } 1109 } 1110 folio_unlock(folio); 1111 } 1112 folio_batch_remove_exceptionals(&fbatch); 1113 folio_batch_release(&fbatch); 1114 } 1115 1116 shmem_recalc_inode(inode, 0, -nr_swaps_freed); 1117 } 1118 1119 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 1120 { 1121 shmem_undo_range(inode, lstart, lend, false); 1122 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 1123 inode_inc_iversion(inode); 1124 } 1125 EXPORT_SYMBOL_GPL(shmem_truncate_range); 1126 1127 static int shmem_getattr(struct mnt_idmap *idmap, 1128 const struct path *path, struct kstat *stat, 1129 u32 request_mask, unsigned int query_flags) 1130 { 1131 struct inode *inode = path->dentry->d_inode; 1132 struct shmem_inode_info *info = SHMEM_I(inode); 1133 1134 if (info->alloced - info->swapped != inode->i_mapping->nrpages) 1135 shmem_recalc_inode(inode, 0, 0); 1136 1137 if (info->fsflags & FS_APPEND_FL) 1138 stat->attributes |= STATX_ATTR_APPEND; 1139 if (info->fsflags & FS_IMMUTABLE_FL) 1140 stat->attributes |= STATX_ATTR_IMMUTABLE; 1141 if (info->fsflags & FS_NODUMP_FL) 1142 stat->attributes |= STATX_ATTR_NODUMP; 1143 stat->attributes_mask |= (STATX_ATTR_APPEND | 1144 STATX_ATTR_IMMUTABLE | 1145 STATX_ATTR_NODUMP); 1146 generic_fillattr(idmap, request_mask, inode, stat); 1147 1148 if (shmem_is_huge(inode, 0, false, NULL, 0)) 1149 stat->blksize = HPAGE_PMD_SIZE; 1150 1151 if (request_mask & STATX_BTIME) { 1152 stat->result_mask |= STATX_BTIME; 1153 stat->btime.tv_sec = info->i_crtime.tv_sec; 1154 stat->btime.tv_nsec = info->i_crtime.tv_nsec; 1155 } 1156 1157 return 0; 1158 } 1159 1160 static int shmem_setattr(struct mnt_idmap *idmap, 1161 struct dentry *dentry, struct iattr *attr) 1162 { 1163 struct inode *inode = d_inode(dentry); 1164 struct shmem_inode_info *info = SHMEM_I(inode); 1165 int error; 1166 bool update_mtime = false; 1167 bool update_ctime = true; 1168 1169 error = setattr_prepare(idmap, dentry, attr); 1170 if (error) 1171 return error; 1172 1173 if ((info->seals & F_SEAL_EXEC) && (attr->ia_valid & ATTR_MODE)) { 1174 if ((inode->i_mode ^ attr->ia_mode) & 0111) { 1175 return -EPERM; 1176 } 1177 } 1178 1179 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 1180 loff_t oldsize = inode->i_size; 1181 loff_t newsize = attr->ia_size; 1182 1183 /* protected by i_rwsem */ 1184 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || 1185 (newsize > oldsize && (info->seals & F_SEAL_GROW))) 1186 return -EPERM; 1187 1188 if (newsize != oldsize) { 1189 error = shmem_reacct_size(SHMEM_I(inode)->flags, 1190 oldsize, newsize); 1191 if (error) 1192 return error; 1193 i_size_write(inode, newsize); 1194 update_mtime = true; 1195 } else { 1196 update_ctime = false; 1197 } 1198 if (newsize <= oldsize) { 1199 loff_t holebegin = round_up(newsize, PAGE_SIZE); 1200 if (oldsize > holebegin) 1201 unmap_mapping_range(inode->i_mapping, 1202 holebegin, 0, 1); 1203 if (info->alloced) 1204 shmem_truncate_range(inode, 1205 newsize, (loff_t)-1); 1206 /* unmap again to remove racily COWed private pages */ 1207 if (oldsize > holebegin) 1208 unmap_mapping_range(inode->i_mapping, 1209 holebegin, 0, 1); 1210 } 1211 } 1212 1213 if (is_quota_modification(idmap, inode, attr)) { 1214 error = dquot_initialize(inode); 1215 if (error) 1216 return error; 1217 } 1218 1219 /* Transfer quota accounting */ 1220 if (i_uid_needs_update(idmap, attr, inode) || 1221 i_gid_needs_update(idmap, attr, inode)) { 1222 error = dquot_transfer(idmap, inode, attr); 1223 if (error) 1224 return error; 1225 } 1226 1227 setattr_copy(idmap, inode, attr); 1228 if (attr->ia_valid & ATTR_MODE) 1229 error = posix_acl_chmod(idmap, dentry, inode->i_mode); 1230 if (!error && update_ctime) { 1231 inode_set_ctime_current(inode); 1232 if (update_mtime) 1233 inode_set_mtime_to_ts(inode, inode_get_ctime(inode)); 1234 inode_inc_iversion(inode); 1235 } 1236 return error; 1237 } 1238 1239 static void shmem_evict_inode(struct inode *inode) 1240 { 1241 struct shmem_inode_info *info = SHMEM_I(inode); 1242 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1243 size_t freed = 0; 1244 1245 if (shmem_mapping(inode->i_mapping)) { 1246 shmem_unacct_size(info->flags, inode->i_size); 1247 inode->i_size = 0; 1248 mapping_set_exiting(inode->i_mapping); 1249 shmem_truncate_range(inode, 0, (loff_t)-1); 1250 if (!list_empty(&info->shrinklist)) { 1251 spin_lock(&sbinfo->shrinklist_lock); 1252 if (!list_empty(&info->shrinklist)) { 1253 list_del_init(&info->shrinklist); 1254 sbinfo->shrinklist_len--; 1255 } 1256 spin_unlock(&sbinfo->shrinklist_lock); 1257 } 1258 while (!list_empty(&info->swaplist)) { 1259 /* Wait while shmem_unuse() is scanning this inode... */ 1260 wait_var_event(&info->stop_eviction, 1261 !atomic_read(&info->stop_eviction)); 1262 mutex_lock(&shmem_swaplist_mutex); 1263 /* ...but beware of the race if we peeked too early */ 1264 if (!atomic_read(&info->stop_eviction)) 1265 list_del_init(&info->swaplist); 1266 mutex_unlock(&shmem_swaplist_mutex); 1267 } 1268 } 1269 1270 simple_xattrs_free(&info->xattrs, sbinfo->max_inodes ? &freed : NULL); 1271 shmem_free_inode(inode->i_sb, freed); 1272 WARN_ON(inode->i_blocks); 1273 clear_inode(inode); 1274 #ifdef CONFIG_TMPFS_QUOTA 1275 dquot_free_inode(inode); 1276 dquot_drop(inode); 1277 #endif 1278 } 1279 1280 static int shmem_find_swap_entries(struct address_space *mapping, 1281 pgoff_t start, struct folio_batch *fbatch, 1282 pgoff_t *indices, unsigned int type) 1283 { 1284 XA_STATE(xas, &mapping->i_pages, start); 1285 struct folio *folio; 1286 swp_entry_t entry; 1287 1288 rcu_read_lock(); 1289 xas_for_each(&xas, folio, ULONG_MAX) { 1290 if (xas_retry(&xas, folio)) 1291 continue; 1292 1293 if (!xa_is_value(folio)) 1294 continue; 1295 1296 entry = radix_to_swp_entry(folio); 1297 /* 1298 * swapin error entries can be found in the mapping. But they're 1299 * deliberately ignored here as we've done everything we can do. 1300 */ 1301 if (swp_type(entry) != type) 1302 continue; 1303 1304 indices[folio_batch_count(fbatch)] = xas.xa_index; 1305 if (!folio_batch_add(fbatch, folio)) 1306 break; 1307 1308 if (need_resched()) { 1309 xas_pause(&xas); 1310 cond_resched_rcu(); 1311 } 1312 } 1313 rcu_read_unlock(); 1314 1315 return xas.xa_index; 1316 } 1317 1318 /* 1319 * Move the swapped pages for an inode to page cache. Returns the count 1320 * of pages swapped in, or the error in case of failure. 1321 */ 1322 static int shmem_unuse_swap_entries(struct inode *inode, 1323 struct folio_batch *fbatch, pgoff_t *indices) 1324 { 1325 int i = 0; 1326 int ret = 0; 1327 int error = 0; 1328 struct address_space *mapping = inode->i_mapping; 1329 1330 for (i = 0; i < folio_batch_count(fbatch); i++) { 1331 struct folio *folio = fbatch->folios[i]; 1332 1333 if (!xa_is_value(folio)) 1334 continue; 1335 error = shmem_swapin_folio(inode, indices[i], &folio, SGP_CACHE, 1336 mapping_gfp_mask(mapping), NULL, NULL); 1337 if (error == 0) { 1338 folio_unlock(folio); 1339 folio_put(folio); 1340 ret++; 1341 } 1342 if (error == -ENOMEM) 1343 break; 1344 error = 0; 1345 } 1346 return error ? error : ret; 1347 } 1348 1349 /* 1350 * If swap found in inode, free it and move page from swapcache to filecache. 1351 */ 1352 static int shmem_unuse_inode(struct inode *inode, unsigned int type) 1353 { 1354 struct address_space *mapping = inode->i_mapping; 1355 pgoff_t start = 0; 1356 struct folio_batch fbatch; 1357 pgoff_t indices[PAGEVEC_SIZE]; 1358 int ret = 0; 1359 1360 do { 1361 folio_batch_init(&fbatch); 1362 shmem_find_swap_entries(mapping, start, &fbatch, indices, type); 1363 if (folio_batch_count(&fbatch) == 0) { 1364 ret = 0; 1365 break; 1366 } 1367 1368 ret = shmem_unuse_swap_entries(inode, &fbatch, indices); 1369 if (ret < 0) 1370 break; 1371 1372 start = indices[folio_batch_count(&fbatch) - 1]; 1373 } while (true); 1374 1375 return ret; 1376 } 1377 1378 /* 1379 * Read all the shared memory data that resides in the swap 1380 * device 'type' back into memory, so the swap device can be 1381 * unused. 1382 */ 1383 int shmem_unuse(unsigned int type) 1384 { 1385 struct shmem_inode_info *info, *next; 1386 int error = 0; 1387 1388 if (list_empty(&shmem_swaplist)) 1389 return 0; 1390 1391 mutex_lock(&shmem_swaplist_mutex); 1392 list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) { 1393 if (!info->swapped) { 1394 list_del_init(&info->swaplist); 1395 continue; 1396 } 1397 /* 1398 * Drop the swaplist mutex while searching the inode for swap; 1399 * but before doing so, make sure shmem_evict_inode() will not 1400 * remove placeholder inode from swaplist, nor let it be freed 1401 * (igrab() would protect from unlink, but not from unmount). 1402 */ 1403 atomic_inc(&info->stop_eviction); 1404 mutex_unlock(&shmem_swaplist_mutex); 1405 1406 error = shmem_unuse_inode(&info->vfs_inode, type); 1407 cond_resched(); 1408 1409 mutex_lock(&shmem_swaplist_mutex); 1410 next = list_next_entry(info, swaplist); 1411 if (!info->swapped) 1412 list_del_init(&info->swaplist); 1413 if (atomic_dec_and_test(&info->stop_eviction)) 1414 wake_up_var(&info->stop_eviction); 1415 if (error) 1416 break; 1417 } 1418 mutex_unlock(&shmem_swaplist_mutex); 1419 1420 return error; 1421 } 1422 1423 /* 1424 * Move the page from the page cache to the swap cache. 1425 */ 1426 static int shmem_writepage(struct page *page, struct writeback_control *wbc) 1427 { 1428 struct folio *folio = page_folio(page); 1429 struct address_space *mapping = folio->mapping; 1430 struct inode *inode = mapping->host; 1431 struct shmem_inode_info *info = SHMEM_I(inode); 1432 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1433 swp_entry_t swap; 1434 pgoff_t index; 1435 1436 /* 1437 * Our capabilities prevent regular writeback or sync from ever calling 1438 * shmem_writepage; but a stacking filesystem might use ->writepage of 1439 * its underlying filesystem, in which case tmpfs should write out to 1440 * swap only in response to memory pressure, and not for the writeback 1441 * threads or sync. 1442 */ 1443 if (WARN_ON_ONCE(!wbc->for_reclaim)) 1444 goto redirty; 1445 1446 if (WARN_ON_ONCE((info->flags & VM_LOCKED) || sbinfo->noswap)) 1447 goto redirty; 1448 1449 if (!total_swap_pages) 1450 goto redirty; 1451 1452 /* 1453 * If /sys/kernel/mm/transparent_hugepage/shmem_enabled is "always" or 1454 * "force", drivers/gpu/drm/i915/gem/i915_gem_shmem.c gets huge pages, 1455 * and its shmem_writeback() needs them to be split when swapping. 1456 */ 1457 if (folio_test_large(folio)) { 1458 /* Ensure the subpages are still dirty */ 1459 folio_test_set_dirty(folio); 1460 if (split_huge_page(page) < 0) 1461 goto redirty; 1462 folio = page_folio(page); 1463 folio_clear_dirty(folio); 1464 } 1465 1466 index = folio->index; 1467 1468 /* 1469 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC 1470 * value into swapfile.c, the only way we can correctly account for a 1471 * fallocated folio arriving here is now to initialize it and write it. 1472 * 1473 * That's okay for a folio already fallocated earlier, but if we have 1474 * not yet completed the fallocation, then (a) we want to keep track 1475 * of this folio in case we have to undo it, and (b) it may not be a 1476 * good idea to continue anyway, once we're pushing into swap. So 1477 * reactivate the folio, and let shmem_fallocate() quit when too many. 1478 */ 1479 if (!folio_test_uptodate(folio)) { 1480 if (inode->i_private) { 1481 struct shmem_falloc *shmem_falloc; 1482 spin_lock(&inode->i_lock); 1483 shmem_falloc = inode->i_private; 1484 if (shmem_falloc && 1485 !shmem_falloc->waitq && 1486 index >= shmem_falloc->start && 1487 index < shmem_falloc->next) 1488 shmem_falloc->nr_unswapped++; 1489 else 1490 shmem_falloc = NULL; 1491 spin_unlock(&inode->i_lock); 1492 if (shmem_falloc) 1493 goto redirty; 1494 } 1495 folio_zero_range(folio, 0, folio_size(folio)); 1496 flush_dcache_folio(folio); 1497 folio_mark_uptodate(folio); 1498 } 1499 1500 swap = folio_alloc_swap(folio); 1501 if (!swap.val) 1502 goto redirty; 1503 1504 /* 1505 * Add inode to shmem_unuse()'s list of swapped-out inodes, 1506 * if it's not already there. Do it now before the folio is 1507 * moved to swap cache, when its pagelock no longer protects 1508 * the inode from eviction. But don't unlock the mutex until 1509 * we've incremented swapped, because shmem_unuse_inode() will 1510 * prune a !swapped inode from the swaplist under this mutex. 1511 */ 1512 mutex_lock(&shmem_swaplist_mutex); 1513 if (list_empty(&info->swaplist)) 1514 list_add(&info->swaplist, &shmem_swaplist); 1515 1516 if (add_to_swap_cache(folio, swap, 1517 __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN, 1518 NULL) == 0) { 1519 shmem_recalc_inode(inode, 0, 1); 1520 swap_shmem_alloc(swap); 1521 shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap)); 1522 1523 mutex_unlock(&shmem_swaplist_mutex); 1524 BUG_ON(folio_mapped(folio)); 1525 return swap_writepage(&folio->page, wbc); 1526 } 1527 1528 mutex_unlock(&shmem_swaplist_mutex); 1529 put_swap_folio(folio, swap); 1530 redirty: 1531 folio_mark_dirty(folio); 1532 if (wbc->for_reclaim) 1533 return AOP_WRITEPAGE_ACTIVATE; /* Return with folio locked */ 1534 folio_unlock(folio); 1535 return 0; 1536 } 1537 1538 #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS) 1539 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 1540 { 1541 char buffer[64]; 1542 1543 if (!mpol || mpol->mode == MPOL_DEFAULT) 1544 return; /* show nothing */ 1545 1546 mpol_to_str(buffer, sizeof(buffer), mpol); 1547 1548 seq_printf(seq, ",mpol=%s", buffer); 1549 } 1550 1551 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 1552 { 1553 struct mempolicy *mpol = NULL; 1554 if (sbinfo->mpol) { 1555 raw_spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 1556 mpol = sbinfo->mpol; 1557 mpol_get(mpol); 1558 raw_spin_unlock(&sbinfo->stat_lock); 1559 } 1560 return mpol; 1561 } 1562 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */ 1563 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 1564 { 1565 } 1566 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 1567 { 1568 return NULL; 1569 } 1570 #endif /* CONFIG_NUMA && CONFIG_TMPFS */ 1571 1572 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info, 1573 pgoff_t index, unsigned int order, pgoff_t *ilx); 1574 1575 static struct folio *shmem_swapin_cluster(swp_entry_t swap, gfp_t gfp, 1576 struct shmem_inode_info *info, pgoff_t index) 1577 { 1578 struct mempolicy *mpol; 1579 pgoff_t ilx; 1580 struct folio *folio; 1581 1582 mpol = shmem_get_pgoff_policy(info, index, 0, &ilx); 1583 folio = swap_cluster_readahead(swap, gfp, mpol, ilx); 1584 mpol_cond_put(mpol); 1585 1586 return folio; 1587 } 1588 1589 /* 1590 * Make sure huge_gfp is always more limited than limit_gfp. 1591 * Some of the flags set permissions, while others set limitations. 1592 */ 1593 static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp) 1594 { 1595 gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM; 1596 gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY; 1597 gfp_t zoneflags = limit_gfp & GFP_ZONEMASK; 1598 gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK); 1599 1600 /* Allow allocations only from the originally specified zones. */ 1601 result |= zoneflags; 1602 1603 /* 1604 * Minimize the result gfp by taking the union with the deny flags, 1605 * and the intersection of the allow flags. 1606 */ 1607 result |= (limit_gfp & denyflags); 1608 result |= (huge_gfp & limit_gfp) & allowflags; 1609 1610 return result; 1611 } 1612 1613 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1614 static unsigned long shmem_allowable_huge_orders(struct inode *inode, 1615 struct vm_area_struct *vma, pgoff_t index, 1616 bool global_huge) 1617 { 1618 unsigned long mask = READ_ONCE(huge_shmem_orders_always); 1619 unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size); 1620 unsigned long vm_flags = vma->vm_flags; 1621 /* 1622 * Check all the (large) orders below HPAGE_PMD_ORDER + 1 that 1623 * are enabled for this vma. 1624 */ 1625 unsigned long orders = BIT(PMD_ORDER + 1) - 1; 1626 loff_t i_size; 1627 int order; 1628 1629 if ((vm_flags & VM_NOHUGEPAGE) || 1630 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) 1631 return 0; 1632 1633 /* If the hardware/firmware marked hugepage support disabled. */ 1634 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED)) 1635 return 0; 1636 1637 /* 1638 * Following the 'deny' semantics of the top level, force the huge 1639 * option off from all mounts. 1640 */ 1641 if (shmem_huge == SHMEM_HUGE_DENY) 1642 return 0; 1643 1644 /* 1645 * Only allow inherit orders if the top-level value is 'force', which 1646 * means non-PMD sized THP can not override 'huge' mount option now. 1647 */ 1648 if (shmem_huge == SHMEM_HUGE_FORCE) 1649 return READ_ONCE(huge_shmem_orders_inherit); 1650 1651 /* Allow mTHP that will be fully within i_size. */ 1652 order = highest_order(within_size_orders); 1653 while (within_size_orders) { 1654 index = round_up(index + 1, order); 1655 i_size = round_up(i_size_read(inode), PAGE_SIZE); 1656 if (i_size >> PAGE_SHIFT >= index) { 1657 mask |= within_size_orders; 1658 break; 1659 } 1660 1661 order = next_order(&within_size_orders, order); 1662 } 1663 1664 if (vm_flags & VM_HUGEPAGE) 1665 mask |= READ_ONCE(huge_shmem_orders_madvise); 1666 1667 if (global_huge) 1668 mask |= READ_ONCE(huge_shmem_orders_inherit); 1669 1670 return orders & mask; 1671 } 1672 1673 static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf, 1674 struct address_space *mapping, pgoff_t index, 1675 unsigned long orders) 1676 { 1677 struct vm_area_struct *vma = vmf->vma; 1678 unsigned long pages; 1679 int order; 1680 1681 orders = thp_vma_suitable_orders(vma, vmf->address, orders); 1682 if (!orders) 1683 return 0; 1684 1685 /* Find the highest order that can add into the page cache */ 1686 order = highest_order(orders); 1687 while (orders) { 1688 pages = 1UL << order; 1689 index = round_down(index, pages); 1690 if (!xa_find(&mapping->i_pages, &index, 1691 index + pages - 1, XA_PRESENT)) 1692 break; 1693 order = next_order(&orders, order); 1694 } 1695 1696 return orders; 1697 } 1698 #else 1699 static unsigned long shmem_allowable_huge_orders(struct inode *inode, 1700 struct vm_area_struct *vma, pgoff_t index, 1701 bool global_huge) 1702 { 1703 return 0; 1704 } 1705 1706 static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf, 1707 struct address_space *mapping, pgoff_t index, 1708 unsigned long orders) 1709 { 1710 return 0; 1711 } 1712 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1713 1714 static struct folio *shmem_alloc_folio(gfp_t gfp, int order, 1715 struct shmem_inode_info *info, pgoff_t index) 1716 { 1717 struct mempolicy *mpol; 1718 pgoff_t ilx; 1719 struct folio *folio; 1720 1721 mpol = shmem_get_pgoff_policy(info, index, order, &ilx); 1722 folio = folio_alloc_mpol(gfp, order, mpol, ilx, numa_node_id()); 1723 mpol_cond_put(mpol); 1724 1725 return folio; 1726 } 1727 1728 static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf, 1729 gfp_t gfp, struct inode *inode, pgoff_t index, 1730 struct mm_struct *fault_mm, unsigned long orders) 1731 { 1732 struct address_space *mapping = inode->i_mapping; 1733 struct shmem_inode_info *info = SHMEM_I(inode); 1734 struct vm_area_struct *vma = vmf ? vmf->vma : NULL; 1735 unsigned long suitable_orders = 0; 1736 struct folio *folio = NULL; 1737 long pages; 1738 int error, order; 1739 1740 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 1741 orders = 0; 1742 1743 if (orders > 0) { 1744 if (vma && vma_is_anon_shmem(vma)) { 1745 suitable_orders = shmem_suitable_orders(inode, vmf, 1746 mapping, index, orders); 1747 } else if (orders & BIT(HPAGE_PMD_ORDER)) { 1748 pages = HPAGE_PMD_NR; 1749 suitable_orders = BIT(HPAGE_PMD_ORDER); 1750 index = round_down(index, HPAGE_PMD_NR); 1751 1752 /* 1753 * Check for conflict before waiting on a huge allocation. 1754 * Conflict might be that a huge page has just been allocated 1755 * and added to page cache by a racing thread, or that there 1756 * is already at least one small page in the huge extent. 1757 * Be careful to retry when appropriate, but not forever! 1758 * Elsewhere -EEXIST would be the right code, but not here. 1759 */ 1760 if (xa_find(&mapping->i_pages, &index, 1761 index + HPAGE_PMD_NR - 1, XA_PRESENT)) 1762 return ERR_PTR(-E2BIG); 1763 } 1764 1765 order = highest_order(suitable_orders); 1766 while (suitable_orders) { 1767 pages = 1UL << order; 1768 index = round_down(index, pages); 1769 folio = shmem_alloc_folio(gfp, order, info, index); 1770 if (folio) 1771 goto allocated; 1772 1773 if (pages == HPAGE_PMD_NR) 1774 count_vm_event(THP_FILE_FALLBACK); 1775 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1776 count_mthp_stat(order, MTHP_STAT_FILE_FALLBACK); 1777 #endif 1778 order = next_order(&suitable_orders, order); 1779 } 1780 } else { 1781 pages = 1; 1782 folio = shmem_alloc_folio(gfp, 0, info, index); 1783 } 1784 if (!folio) 1785 return ERR_PTR(-ENOMEM); 1786 1787 allocated: 1788 __folio_set_locked(folio); 1789 __folio_set_swapbacked(folio); 1790 1791 gfp &= GFP_RECLAIM_MASK; 1792 error = mem_cgroup_charge(folio, fault_mm, gfp); 1793 if (error) { 1794 if (xa_find(&mapping->i_pages, &index, 1795 index + pages - 1, XA_PRESENT)) { 1796 error = -EEXIST; 1797 } else if (pages > 1) { 1798 if (pages == HPAGE_PMD_NR) { 1799 count_vm_event(THP_FILE_FALLBACK); 1800 count_vm_event(THP_FILE_FALLBACK_CHARGE); 1801 } 1802 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1803 count_mthp_stat(folio_order(folio), MTHP_STAT_FILE_FALLBACK); 1804 count_mthp_stat(folio_order(folio), MTHP_STAT_FILE_FALLBACK_CHARGE); 1805 #endif 1806 } 1807 goto unlock; 1808 } 1809 1810 error = shmem_add_to_page_cache(folio, mapping, index, NULL, gfp); 1811 if (error) 1812 goto unlock; 1813 1814 error = shmem_inode_acct_blocks(inode, pages); 1815 if (error) { 1816 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1817 long freed; 1818 /* 1819 * Try to reclaim some space by splitting a few 1820 * large folios beyond i_size on the filesystem. 1821 */ 1822 shmem_unused_huge_shrink(sbinfo, NULL, 2); 1823 /* 1824 * And do a shmem_recalc_inode() to account for freed pages: 1825 * except our folio is there in cache, so not quite balanced. 1826 */ 1827 spin_lock(&info->lock); 1828 freed = pages + info->alloced - info->swapped - 1829 READ_ONCE(mapping->nrpages); 1830 if (freed > 0) 1831 info->alloced -= freed; 1832 spin_unlock(&info->lock); 1833 if (freed > 0) 1834 shmem_inode_unacct_blocks(inode, freed); 1835 error = shmem_inode_acct_blocks(inode, pages); 1836 if (error) { 1837 filemap_remove_folio(folio); 1838 goto unlock; 1839 } 1840 } 1841 1842 shmem_recalc_inode(inode, pages, 0); 1843 folio_add_lru(folio); 1844 return folio; 1845 1846 unlock: 1847 folio_unlock(folio); 1848 folio_put(folio); 1849 return ERR_PTR(error); 1850 } 1851 1852 /* 1853 * When a page is moved from swapcache to shmem filecache (either by the 1854 * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of 1855 * shmem_unuse_inode()), it may have been read in earlier from swap, in 1856 * ignorance of the mapping it belongs to. If that mapping has special 1857 * constraints (like the gma500 GEM driver, which requires RAM below 4GB), 1858 * we may need to copy to a suitable page before moving to filecache. 1859 * 1860 * In a future release, this may well be extended to respect cpuset and 1861 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); 1862 * but for now it is a simple matter of zone. 1863 */ 1864 static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp) 1865 { 1866 return folio_zonenum(folio) > gfp_zone(gfp); 1867 } 1868 1869 static int shmem_replace_folio(struct folio **foliop, gfp_t gfp, 1870 struct shmem_inode_info *info, pgoff_t index) 1871 { 1872 struct folio *old, *new; 1873 struct address_space *swap_mapping; 1874 swp_entry_t entry; 1875 pgoff_t swap_index; 1876 int error; 1877 1878 old = *foliop; 1879 entry = old->swap; 1880 swap_index = swap_cache_index(entry); 1881 swap_mapping = swap_address_space(entry); 1882 1883 /* 1884 * We have arrived here because our zones are constrained, so don't 1885 * limit chance of success by further cpuset and node constraints. 1886 */ 1887 gfp &= ~GFP_CONSTRAINT_MASK; 1888 VM_BUG_ON_FOLIO(folio_test_large(old), old); 1889 new = shmem_alloc_folio(gfp, 0, info, index); 1890 if (!new) 1891 return -ENOMEM; 1892 1893 folio_get(new); 1894 folio_copy(new, old); 1895 flush_dcache_folio(new); 1896 1897 __folio_set_locked(new); 1898 __folio_set_swapbacked(new); 1899 folio_mark_uptodate(new); 1900 new->swap = entry; 1901 folio_set_swapcache(new); 1902 1903 /* 1904 * Our caller will very soon move newpage out of swapcache, but it's 1905 * a nice clean interface for us to replace oldpage by newpage there. 1906 */ 1907 xa_lock_irq(&swap_mapping->i_pages); 1908 error = shmem_replace_entry(swap_mapping, swap_index, old, new); 1909 if (!error) { 1910 mem_cgroup_replace_folio(old, new); 1911 __lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1); 1912 __lruvec_stat_mod_folio(new, NR_SHMEM, 1); 1913 __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1); 1914 __lruvec_stat_mod_folio(old, NR_SHMEM, -1); 1915 } 1916 xa_unlock_irq(&swap_mapping->i_pages); 1917 1918 if (unlikely(error)) { 1919 /* 1920 * Is this possible? I think not, now that our callers check 1921 * both PageSwapCache and page_private after getting page lock; 1922 * but be defensive. Reverse old to newpage for clear and free. 1923 */ 1924 old = new; 1925 } else { 1926 folio_add_lru(new); 1927 *foliop = new; 1928 } 1929 1930 folio_clear_swapcache(old); 1931 old->private = NULL; 1932 1933 folio_unlock(old); 1934 folio_put_refs(old, 2); 1935 return error; 1936 } 1937 1938 static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index, 1939 struct folio *folio, swp_entry_t swap) 1940 { 1941 struct address_space *mapping = inode->i_mapping; 1942 swp_entry_t swapin_error; 1943 void *old; 1944 1945 swapin_error = make_poisoned_swp_entry(); 1946 old = xa_cmpxchg_irq(&mapping->i_pages, index, 1947 swp_to_radix_entry(swap), 1948 swp_to_radix_entry(swapin_error), 0); 1949 if (old != swp_to_radix_entry(swap)) 1950 return; 1951 1952 folio_wait_writeback(folio); 1953 delete_from_swap_cache(folio); 1954 /* 1955 * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks 1956 * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks) 1957 * in shmem_evict_inode(). 1958 */ 1959 shmem_recalc_inode(inode, -1, -1); 1960 swap_free(swap); 1961 } 1962 1963 /* 1964 * Swap in the folio pointed to by *foliop. 1965 * Caller has to make sure that *foliop contains a valid swapped folio. 1966 * Returns 0 and the folio in foliop if success. On failure, returns the 1967 * error code and NULL in *foliop. 1968 */ 1969 static int shmem_swapin_folio(struct inode *inode, pgoff_t index, 1970 struct folio **foliop, enum sgp_type sgp, 1971 gfp_t gfp, struct mm_struct *fault_mm, 1972 vm_fault_t *fault_type) 1973 { 1974 struct address_space *mapping = inode->i_mapping; 1975 struct shmem_inode_info *info = SHMEM_I(inode); 1976 struct swap_info_struct *si; 1977 struct folio *folio = NULL; 1978 swp_entry_t swap; 1979 int error; 1980 1981 VM_BUG_ON(!*foliop || !xa_is_value(*foliop)); 1982 swap = radix_to_swp_entry(*foliop); 1983 *foliop = NULL; 1984 1985 if (is_poisoned_swp_entry(swap)) 1986 return -EIO; 1987 1988 si = get_swap_device(swap); 1989 if (!si) { 1990 if (!shmem_confirm_swap(mapping, index, swap)) 1991 return -EEXIST; 1992 else 1993 return -EINVAL; 1994 } 1995 1996 /* Look it up and read it in.. */ 1997 folio = swap_cache_get_folio(swap, NULL, 0); 1998 if (!folio) { 1999 /* Or update major stats only when swapin succeeds?? */ 2000 if (fault_type) { 2001 *fault_type |= VM_FAULT_MAJOR; 2002 count_vm_event(PGMAJFAULT); 2003 count_memcg_event_mm(fault_mm, PGMAJFAULT); 2004 } 2005 /* Here we actually start the io */ 2006 folio = shmem_swapin_cluster(swap, gfp, info, index); 2007 if (!folio) { 2008 error = -ENOMEM; 2009 goto failed; 2010 } 2011 } 2012 2013 /* We have to do this with folio locked to prevent races */ 2014 folio_lock(folio); 2015 if (!folio_test_swapcache(folio) || 2016 folio->swap.val != swap.val || 2017 !shmem_confirm_swap(mapping, index, swap)) { 2018 error = -EEXIST; 2019 goto unlock; 2020 } 2021 if (!folio_test_uptodate(folio)) { 2022 error = -EIO; 2023 goto failed; 2024 } 2025 folio_wait_writeback(folio); 2026 2027 /* 2028 * Some architectures may have to restore extra metadata to the 2029 * folio after reading from swap. 2030 */ 2031 arch_swap_restore(folio_swap(swap, folio), folio); 2032 2033 if (shmem_should_replace_folio(folio, gfp)) { 2034 error = shmem_replace_folio(&folio, gfp, info, index); 2035 if (error) 2036 goto failed; 2037 } 2038 2039 error = shmem_add_to_page_cache(folio, mapping, index, 2040 swp_to_radix_entry(swap), gfp); 2041 if (error) 2042 goto failed; 2043 2044 shmem_recalc_inode(inode, 0, -1); 2045 2046 if (sgp == SGP_WRITE) 2047 folio_mark_accessed(folio); 2048 2049 delete_from_swap_cache(folio); 2050 folio_mark_dirty(folio); 2051 swap_free(swap); 2052 put_swap_device(si); 2053 2054 *foliop = folio; 2055 return 0; 2056 failed: 2057 if (!shmem_confirm_swap(mapping, index, swap)) 2058 error = -EEXIST; 2059 if (error == -EIO) 2060 shmem_set_folio_swapin_error(inode, index, folio, swap); 2061 unlock: 2062 if (folio) { 2063 folio_unlock(folio); 2064 folio_put(folio); 2065 } 2066 put_swap_device(si); 2067 2068 return error; 2069 } 2070 2071 /* 2072 * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate 2073 * 2074 * If we allocate a new one we do not mark it dirty. That's up to the 2075 * vm. If we swap it in we mark it dirty since we also free the swap 2076 * entry since a page cannot live in both the swap and page cache. 2077 * 2078 * vmf and fault_type are only supplied by shmem_fault: otherwise they are NULL. 2079 */ 2080 static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, 2081 struct folio **foliop, enum sgp_type sgp, gfp_t gfp, 2082 struct vm_fault *vmf, vm_fault_t *fault_type) 2083 { 2084 struct vm_area_struct *vma = vmf ? vmf->vma : NULL; 2085 struct mm_struct *fault_mm; 2086 struct folio *folio; 2087 int error; 2088 bool alloced, huge; 2089 unsigned long orders = 0; 2090 2091 if (WARN_ON_ONCE(!shmem_mapping(inode->i_mapping))) 2092 return -EINVAL; 2093 2094 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) 2095 return -EFBIG; 2096 repeat: 2097 if (sgp <= SGP_CACHE && 2098 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) 2099 return -EINVAL; 2100 2101 alloced = false; 2102 fault_mm = vma ? vma->vm_mm : NULL; 2103 2104 folio = filemap_get_entry(inode->i_mapping, index); 2105 if (folio && vma && userfaultfd_minor(vma)) { 2106 if (!xa_is_value(folio)) 2107 folio_put(folio); 2108 *fault_type = handle_userfault(vmf, VM_UFFD_MINOR); 2109 return 0; 2110 } 2111 2112 if (xa_is_value(folio)) { 2113 error = shmem_swapin_folio(inode, index, &folio, 2114 sgp, gfp, fault_mm, fault_type); 2115 if (error == -EEXIST) 2116 goto repeat; 2117 2118 *foliop = folio; 2119 return error; 2120 } 2121 2122 if (folio) { 2123 folio_lock(folio); 2124 2125 /* Has the folio been truncated or swapped out? */ 2126 if (unlikely(folio->mapping != inode->i_mapping)) { 2127 folio_unlock(folio); 2128 folio_put(folio); 2129 goto repeat; 2130 } 2131 if (sgp == SGP_WRITE) 2132 folio_mark_accessed(folio); 2133 if (folio_test_uptodate(folio)) 2134 goto out; 2135 /* fallocated folio */ 2136 if (sgp != SGP_READ) 2137 goto clear; 2138 folio_unlock(folio); 2139 folio_put(folio); 2140 } 2141 2142 /* 2143 * SGP_READ: succeed on hole, with NULL folio, letting caller zero. 2144 * SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail. 2145 */ 2146 *foliop = NULL; 2147 if (sgp == SGP_READ) 2148 return 0; 2149 if (sgp == SGP_NOALLOC) 2150 return -ENOENT; 2151 2152 /* 2153 * Fast cache lookup and swap lookup did not find it: allocate. 2154 */ 2155 2156 if (vma && userfaultfd_missing(vma)) { 2157 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING); 2158 return 0; 2159 } 2160 2161 huge = shmem_is_huge(inode, index, false, fault_mm, 2162 vma ? vma->vm_flags : 0); 2163 /* Find hugepage orders that are allowed for anonymous shmem. */ 2164 if (vma && vma_is_anon_shmem(vma)) 2165 orders = shmem_allowable_huge_orders(inode, vma, index, huge); 2166 else if (huge) 2167 orders = BIT(HPAGE_PMD_ORDER); 2168 2169 if (orders > 0) { 2170 gfp_t huge_gfp; 2171 2172 huge_gfp = vma_thp_gfp_mask(vma); 2173 huge_gfp = limit_gfp_mask(huge_gfp, gfp); 2174 folio = shmem_alloc_and_add_folio(vmf, huge_gfp, 2175 inode, index, fault_mm, orders); 2176 if (!IS_ERR(folio)) { 2177 if (folio_test_pmd_mappable(folio)) 2178 count_vm_event(THP_FILE_ALLOC); 2179 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2180 count_mthp_stat(folio_order(folio), MTHP_STAT_FILE_ALLOC); 2181 #endif 2182 goto alloced; 2183 } 2184 if (PTR_ERR(folio) == -EEXIST) 2185 goto repeat; 2186 } 2187 2188 folio = shmem_alloc_and_add_folio(vmf, gfp, inode, index, fault_mm, 0); 2189 if (IS_ERR(folio)) { 2190 error = PTR_ERR(folio); 2191 if (error == -EEXIST) 2192 goto repeat; 2193 folio = NULL; 2194 goto unlock; 2195 } 2196 2197 alloced: 2198 alloced = true; 2199 if (folio_test_large(folio) && 2200 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < 2201 folio_next_index(folio) - 1) { 2202 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 2203 struct shmem_inode_info *info = SHMEM_I(inode); 2204 /* 2205 * Part of the large folio is beyond i_size: subject 2206 * to shrink under memory pressure. 2207 */ 2208 spin_lock(&sbinfo->shrinklist_lock); 2209 /* 2210 * _careful to defend against unlocked access to 2211 * ->shrink_list in shmem_unused_huge_shrink() 2212 */ 2213 if (list_empty_careful(&info->shrinklist)) { 2214 list_add_tail(&info->shrinklist, 2215 &sbinfo->shrinklist); 2216 sbinfo->shrinklist_len++; 2217 } 2218 spin_unlock(&sbinfo->shrinklist_lock); 2219 } 2220 2221 if (sgp == SGP_WRITE) 2222 folio_set_referenced(folio); 2223 /* 2224 * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio. 2225 */ 2226 if (sgp == SGP_FALLOC) 2227 sgp = SGP_WRITE; 2228 clear: 2229 /* 2230 * Let SGP_WRITE caller clear ends if write does not fill folio; 2231 * but SGP_FALLOC on a folio fallocated earlier must initialize 2232 * it now, lest undo on failure cancel our earlier guarantee. 2233 */ 2234 if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) { 2235 long i, n = folio_nr_pages(folio); 2236 2237 for (i = 0; i < n; i++) 2238 clear_highpage(folio_page(folio, i)); 2239 flush_dcache_folio(folio); 2240 folio_mark_uptodate(folio); 2241 } 2242 2243 /* Perhaps the file has been truncated since we checked */ 2244 if (sgp <= SGP_CACHE && 2245 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 2246 error = -EINVAL; 2247 goto unlock; 2248 } 2249 out: 2250 *foliop = folio; 2251 return 0; 2252 2253 /* 2254 * Error recovery. 2255 */ 2256 unlock: 2257 if (alloced) 2258 filemap_remove_folio(folio); 2259 shmem_recalc_inode(inode, 0, 0); 2260 if (folio) { 2261 folio_unlock(folio); 2262 folio_put(folio); 2263 } 2264 return error; 2265 } 2266 2267 /** 2268 * shmem_get_folio - find, and lock a shmem folio. 2269 * @inode: inode to search 2270 * @index: the page index. 2271 * @foliop: pointer to the folio if found 2272 * @sgp: SGP_* flags to control behavior 2273 * 2274 * Looks up the page cache entry at @inode & @index. If a folio is 2275 * present, it is returned locked with an increased refcount. 2276 * 2277 * If the caller modifies data in the folio, it must call folio_mark_dirty() 2278 * before unlocking the folio to ensure that the folio is not reclaimed. 2279 * There is no need to reserve space before calling folio_mark_dirty(). 2280 * 2281 * When no folio is found, the behavior depends on @sgp: 2282 * - for SGP_READ, *@foliop is %NULL and 0 is returned 2283 * - for SGP_NOALLOC, *@foliop is %NULL and -ENOENT is returned 2284 * - for all other flags a new folio is allocated, inserted into the 2285 * page cache and returned locked in @foliop. 2286 * 2287 * Context: May sleep. 2288 * Return: 0 if successful, else a negative error code. 2289 */ 2290 int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop, 2291 enum sgp_type sgp) 2292 { 2293 return shmem_get_folio_gfp(inode, index, foliop, sgp, 2294 mapping_gfp_mask(inode->i_mapping), NULL, NULL); 2295 } 2296 EXPORT_SYMBOL_GPL(shmem_get_folio); 2297 2298 /* 2299 * This is like autoremove_wake_function, but it removes the wait queue 2300 * entry unconditionally - even if something else had already woken the 2301 * target. 2302 */ 2303 static int synchronous_wake_function(wait_queue_entry_t *wait, 2304 unsigned int mode, int sync, void *key) 2305 { 2306 int ret = default_wake_function(wait, mode, sync, key); 2307 list_del_init(&wait->entry); 2308 return ret; 2309 } 2310 2311 /* 2312 * Trinity finds that probing a hole which tmpfs is punching can 2313 * prevent the hole-punch from ever completing: which in turn 2314 * locks writers out with its hold on i_rwsem. So refrain from 2315 * faulting pages into the hole while it's being punched. Although 2316 * shmem_undo_range() does remove the additions, it may be unable to 2317 * keep up, as each new page needs its own unmap_mapping_range() call, 2318 * and the i_mmap tree grows ever slower to scan if new vmas are added. 2319 * 2320 * It does not matter if we sometimes reach this check just before the 2321 * hole-punch begins, so that one fault then races with the punch: 2322 * we just need to make racing faults a rare case. 2323 * 2324 * The implementation below would be much simpler if we just used a 2325 * standard mutex or completion: but we cannot take i_rwsem in fault, 2326 * and bloating every shmem inode for this unlikely case would be sad. 2327 */ 2328 static vm_fault_t shmem_falloc_wait(struct vm_fault *vmf, struct inode *inode) 2329 { 2330 struct shmem_falloc *shmem_falloc; 2331 struct file *fpin = NULL; 2332 vm_fault_t ret = 0; 2333 2334 spin_lock(&inode->i_lock); 2335 shmem_falloc = inode->i_private; 2336 if (shmem_falloc && 2337 shmem_falloc->waitq && 2338 vmf->pgoff >= shmem_falloc->start && 2339 vmf->pgoff < shmem_falloc->next) { 2340 wait_queue_head_t *shmem_falloc_waitq; 2341 DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function); 2342 2343 ret = VM_FAULT_NOPAGE; 2344 fpin = maybe_unlock_mmap_for_io(vmf, NULL); 2345 shmem_falloc_waitq = shmem_falloc->waitq; 2346 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, 2347 TASK_UNINTERRUPTIBLE); 2348 spin_unlock(&inode->i_lock); 2349 schedule(); 2350 2351 /* 2352 * shmem_falloc_waitq points into the shmem_fallocate() 2353 * stack of the hole-punching task: shmem_falloc_waitq 2354 * is usually invalid by the time we reach here, but 2355 * finish_wait() does not dereference it in that case; 2356 * though i_lock needed lest racing with wake_up_all(). 2357 */ 2358 spin_lock(&inode->i_lock); 2359 finish_wait(shmem_falloc_waitq, &shmem_fault_wait); 2360 } 2361 spin_unlock(&inode->i_lock); 2362 if (fpin) { 2363 fput(fpin); 2364 ret = VM_FAULT_RETRY; 2365 } 2366 return ret; 2367 } 2368 2369 static vm_fault_t shmem_fault(struct vm_fault *vmf) 2370 { 2371 struct inode *inode = file_inode(vmf->vma->vm_file); 2372 gfp_t gfp = mapping_gfp_mask(inode->i_mapping); 2373 struct folio *folio = NULL; 2374 vm_fault_t ret = 0; 2375 int err; 2376 2377 /* 2378 * Trinity finds that probing a hole which tmpfs is punching can 2379 * prevent the hole-punch from ever completing: noted in i_private. 2380 */ 2381 if (unlikely(inode->i_private)) { 2382 ret = shmem_falloc_wait(vmf, inode); 2383 if (ret) 2384 return ret; 2385 } 2386 2387 WARN_ON_ONCE(vmf->page != NULL); 2388 err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE, 2389 gfp, vmf, &ret); 2390 if (err) 2391 return vmf_error(err); 2392 if (folio) { 2393 vmf->page = folio_file_page(folio, vmf->pgoff); 2394 ret |= VM_FAULT_LOCKED; 2395 } 2396 return ret; 2397 } 2398 2399 unsigned long shmem_get_unmapped_area(struct file *file, 2400 unsigned long uaddr, unsigned long len, 2401 unsigned long pgoff, unsigned long flags) 2402 { 2403 unsigned long addr; 2404 unsigned long offset; 2405 unsigned long inflated_len; 2406 unsigned long inflated_addr; 2407 unsigned long inflated_offset; 2408 unsigned long hpage_size; 2409 2410 if (len > TASK_SIZE) 2411 return -ENOMEM; 2412 2413 addr = mm_get_unmapped_area(current->mm, file, uaddr, len, pgoff, 2414 flags); 2415 2416 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 2417 return addr; 2418 if (IS_ERR_VALUE(addr)) 2419 return addr; 2420 if (addr & ~PAGE_MASK) 2421 return addr; 2422 if (addr > TASK_SIZE - len) 2423 return addr; 2424 2425 if (shmem_huge == SHMEM_HUGE_DENY) 2426 return addr; 2427 if (flags & MAP_FIXED) 2428 return addr; 2429 /* 2430 * Our priority is to support MAP_SHARED mapped hugely; 2431 * and support MAP_PRIVATE mapped hugely too, until it is COWed. 2432 * But if caller specified an address hint and we allocated area there 2433 * successfully, respect that as before. 2434 */ 2435 if (uaddr == addr) 2436 return addr; 2437 2438 hpage_size = HPAGE_PMD_SIZE; 2439 if (shmem_huge != SHMEM_HUGE_FORCE) { 2440 struct super_block *sb; 2441 unsigned long __maybe_unused hpage_orders; 2442 int order = 0; 2443 2444 if (file) { 2445 VM_BUG_ON(file->f_op != &shmem_file_operations); 2446 sb = file_inode(file)->i_sb; 2447 } else { 2448 /* 2449 * Called directly from mm/mmap.c, or drivers/char/mem.c 2450 * for "/dev/zero", to create a shared anonymous object. 2451 */ 2452 if (IS_ERR(shm_mnt)) 2453 return addr; 2454 sb = shm_mnt->mnt_sb; 2455 2456 /* 2457 * Find the highest mTHP order used for anonymous shmem to 2458 * provide a suitable alignment address. 2459 */ 2460 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 2461 hpage_orders = READ_ONCE(huge_shmem_orders_always); 2462 hpage_orders |= READ_ONCE(huge_shmem_orders_within_size); 2463 hpage_orders |= READ_ONCE(huge_shmem_orders_madvise); 2464 if (SHMEM_SB(sb)->huge != SHMEM_HUGE_NEVER) 2465 hpage_orders |= READ_ONCE(huge_shmem_orders_inherit); 2466 2467 if (hpage_orders > 0) { 2468 order = highest_order(hpage_orders); 2469 hpage_size = PAGE_SIZE << order; 2470 } 2471 #endif 2472 } 2473 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER && !order) 2474 return addr; 2475 } 2476 2477 if (len < hpage_size) 2478 return addr; 2479 2480 offset = (pgoff << PAGE_SHIFT) & (hpage_size - 1); 2481 if (offset && offset + len < 2 * hpage_size) 2482 return addr; 2483 if ((addr & (hpage_size - 1)) == offset) 2484 return addr; 2485 2486 inflated_len = len + hpage_size - PAGE_SIZE; 2487 if (inflated_len > TASK_SIZE) 2488 return addr; 2489 if (inflated_len < len) 2490 return addr; 2491 2492 inflated_addr = mm_get_unmapped_area(current->mm, NULL, uaddr, 2493 inflated_len, 0, flags); 2494 if (IS_ERR_VALUE(inflated_addr)) 2495 return addr; 2496 if (inflated_addr & ~PAGE_MASK) 2497 return addr; 2498 2499 inflated_offset = inflated_addr & (hpage_size - 1); 2500 inflated_addr += offset - inflated_offset; 2501 if (inflated_offset > offset) 2502 inflated_addr += hpage_size; 2503 2504 if (inflated_addr > TASK_SIZE - len) 2505 return addr; 2506 return inflated_addr; 2507 } 2508 2509 #ifdef CONFIG_NUMA 2510 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 2511 { 2512 struct inode *inode = file_inode(vma->vm_file); 2513 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 2514 } 2515 2516 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 2517 unsigned long addr, pgoff_t *ilx) 2518 { 2519 struct inode *inode = file_inode(vma->vm_file); 2520 pgoff_t index; 2521 2522 /* 2523 * Bias interleave by inode number to distribute better across nodes; 2524 * but this interface is independent of which page order is used, so 2525 * supplies only that bias, letting caller apply the offset (adjusted 2526 * by page order, as in shmem_get_pgoff_policy() and get_vma_policy()). 2527 */ 2528 *ilx = inode->i_ino; 2529 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 2530 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 2531 } 2532 2533 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info, 2534 pgoff_t index, unsigned int order, pgoff_t *ilx) 2535 { 2536 struct mempolicy *mpol; 2537 2538 /* Bias interleave by inode number to distribute better across nodes */ 2539 *ilx = info->vfs_inode.i_ino + (index >> order); 2540 2541 mpol = mpol_shared_policy_lookup(&info->policy, index); 2542 return mpol ? mpol : get_task_policy(current); 2543 } 2544 #else 2545 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info, 2546 pgoff_t index, unsigned int order, pgoff_t *ilx) 2547 { 2548 *ilx = 0; 2549 return NULL; 2550 } 2551 #endif /* CONFIG_NUMA */ 2552 2553 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts) 2554 { 2555 struct inode *inode = file_inode(file); 2556 struct shmem_inode_info *info = SHMEM_I(inode); 2557 int retval = -ENOMEM; 2558 2559 /* 2560 * What serializes the accesses to info->flags? 2561 * ipc_lock_object() when called from shmctl_do_lock(), 2562 * no serialization needed when called from shm_destroy(). 2563 */ 2564 if (lock && !(info->flags & VM_LOCKED)) { 2565 if (!user_shm_lock(inode->i_size, ucounts)) 2566 goto out_nomem; 2567 info->flags |= VM_LOCKED; 2568 mapping_set_unevictable(file->f_mapping); 2569 } 2570 if (!lock && (info->flags & VM_LOCKED) && ucounts) { 2571 user_shm_unlock(inode->i_size, ucounts); 2572 info->flags &= ~VM_LOCKED; 2573 mapping_clear_unevictable(file->f_mapping); 2574 } 2575 retval = 0; 2576 2577 out_nomem: 2578 return retval; 2579 } 2580 2581 static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 2582 { 2583 struct inode *inode = file_inode(file); 2584 struct shmem_inode_info *info = SHMEM_I(inode); 2585 int ret; 2586 2587 ret = seal_check_write(info->seals, vma); 2588 if (ret) 2589 return ret; 2590 2591 /* arm64 - allow memory tagging on RAM-based files */ 2592 vm_flags_set(vma, VM_MTE_ALLOWED); 2593 2594 file_accessed(file); 2595 /* This is anonymous shared memory if it is unlinked at the time of mmap */ 2596 if (inode->i_nlink) 2597 vma->vm_ops = &shmem_vm_ops; 2598 else 2599 vma->vm_ops = &shmem_anon_vm_ops; 2600 return 0; 2601 } 2602 2603 static int shmem_file_open(struct inode *inode, struct file *file) 2604 { 2605 file->f_mode |= FMODE_CAN_ODIRECT; 2606 return generic_file_open(inode, file); 2607 } 2608 2609 #ifdef CONFIG_TMPFS_XATTR 2610 static int shmem_initxattrs(struct inode *, const struct xattr *, void *); 2611 2612 /* 2613 * chattr's fsflags are unrelated to extended attributes, 2614 * but tmpfs has chosen to enable them under the same config option. 2615 */ 2616 static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags) 2617 { 2618 unsigned int i_flags = 0; 2619 2620 if (fsflags & FS_NOATIME_FL) 2621 i_flags |= S_NOATIME; 2622 if (fsflags & FS_APPEND_FL) 2623 i_flags |= S_APPEND; 2624 if (fsflags & FS_IMMUTABLE_FL) 2625 i_flags |= S_IMMUTABLE; 2626 /* 2627 * But FS_NODUMP_FL does not require any action in i_flags. 2628 */ 2629 inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE); 2630 } 2631 #else 2632 static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags) 2633 { 2634 } 2635 #define shmem_initxattrs NULL 2636 #endif 2637 2638 static struct offset_ctx *shmem_get_offset_ctx(struct inode *inode) 2639 { 2640 return &SHMEM_I(inode)->dir_offsets; 2641 } 2642 2643 static struct inode *__shmem_get_inode(struct mnt_idmap *idmap, 2644 struct super_block *sb, 2645 struct inode *dir, umode_t mode, 2646 dev_t dev, unsigned long flags) 2647 { 2648 struct inode *inode; 2649 struct shmem_inode_info *info; 2650 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2651 ino_t ino; 2652 int err; 2653 2654 err = shmem_reserve_inode(sb, &ino); 2655 if (err) 2656 return ERR_PTR(err); 2657 2658 inode = new_inode(sb); 2659 if (!inode) { 2660 shmem_free_inode(sb, 0); 2661 return ERR_PTR(-ENOSPC); 2662 } 2663 2664 inode->i_ino = ino; 2665 inode_init_owner(idmap, inode, dir, mode); 2666 inode->i_blocks = 0; 2667 simple_inode_init_ts(inode); 2668 inode->i_generation = get_random_u32(); 2669 info = SHMEM_I(inode); 2670 memset(info, 0, (char *)inode - (char *)info); 2671 spin_lock_init(&info->lock); 2672 atomic_set(&info->stop_eviction, 0); 2673 info->seals = F_SEAL_SEAL; 2674 info->flags = flags & VM_NORESERVE; 2675 info->i_crtime = inode_get_mtime(inode); 2676 info->fsflags = (dir == NULL) ? 0 : 2677 SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED; 2678 if (info->fsflags) 2679 shmem_set_inode_flags(inode, info->fsflags); 2680 INIT_LIST_HEAD(&info->shrinklist); 2681 INIT_LIST_HEAD(&info->swaplist); 2682 simple_xattrs_init(&info->xattrs); 2683 cache_no_acl(inode); 2684 if (sbinfo->noswap) 2685 mapping_set_unevictable(inode->i_mapping); 2686 mapping_set_large_folios(inode->i_mapping); 2687 2688 switch (mode & S_IFMT) { 2689 default: 2690 inode->i_op = &shmem_special_inode_operations; 2691 init_special_inode(inode, mode, dev); 2692 break; 2693 case S_IFREG: 2694 inode->i_mapping->a_ops = &shmem_aops; 2695 inode->i_op = &shmem_inode_operations; 2696 inode->i_fop = &shmem_file_operations; 2697 mpol_shared_policy_init(&info->policy, 2698 shmem_get_sbmpol(sbinfo)); 2699 break; 2700 case S_IFDIR: 2701 inc_nlink(inode); 2702 /* Some things misbehave if size == 0 on a directory */ 2703 inode->i_size = 2 * BOGO_DIRENT_SIZE; 2704 inode->i_op = &shmem_dir_inode_operations; 2705 inode->i_fop = &simple_offset_dir_operations; 2706 simple_offset_init(shmem_get_offset_ctx(inode)); 2707 break; 2708 case S_IFLNK: 2709 /* 2710 * Must not load anything in the rbtree, 2711 * mpol_free_shared_policy will not be called. 2712 */ 2713 mpol_shared_policy_init(&info->policy, NULL); 2714 break; 2715 } 2716 2717 lockdep_annotate_inode_mutex_key(inode); 2718 return inode; 2719 } 2720 2721 #ifdef CONFIG_TMPFS_QUOTA 2722 static struct inode *shmem_get_inode(struct mnt_idmap *idmap, 2723 struct super_block *sb, struct inode *dir, 2724 umode_t mode, dev_t dev, unsigned long flags) 2725 { 2726 int err; 2727 struct inode *inode; 2728 2729 inode = __shmem_get_inode(idmap, sb, dir, mode, dev, flags); 2730 if (IS_ERR(inode)) 2731 return inode; 2732 2733 err = dquot_initialize(inode); 2734 if (err) 2735 goto errout; 2736 2737 err = dquot_alloc_inode(inode); 2738 if (err) { 2739 dquot_drop(inode); 2740 goto errout; 2741 } 2742 return inode; 2743 2744 errout: 2745 inode->i_flags |= S_NOQUOTA; 2746 iput(inode); 2747 return ERR_PTR(err); 2748 } 2749 #else 2750 static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, 2751 struct super_block *sb, struct inode *dir, 2752 umode_t mode, dev_t dev, unsigned long flags) 2753 { 2754 return __shmem_get_inode(idmap, sb, dir, mode, dev, flags); 2755 } 2756 #endif /* CONFIG_TMPFS_QUOTA */ 2757 2758 #ifdef CONFIG_USERFAULTFD 2759 int shmem_mfill_atomic_pte(pmd_t *dst_pmd, 2760 struct vm_area_struct *dst_vma, 2761 unsigned long dst_addr, 2762 unsigned long src_addr, 2763 uffd_flags_t flags, 2764 struct folio **foliop) 2765 { 2766 struct inode *inode = file_inode(dst_vma->vm_file); 2767 struct shmem_inode_info *info = SHMEM_I(inode); 2768 struct address_space *mapping = inode->i_mapping; 2769 gfp_t gfp = mapping_gfp_mask(mapping); 2770 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); 2771 void *page_kaddr; 2772 struct folio *folio; 2773 int ret; 2774 pgoff_t max_off; 2775 2776 if (shmem_inode_acct_blocks(inode, 1)) { 2777 /* 2778 * We may have got a page, returned -ENOENT triggering a retry, 2779 * and now we find ourselves with -ENOMEM. Release the page, to 2780 * avoid a BUG_ON in our caller. 2781 */ 2782 if (unlikely(*foliop)) { 2783 folio_put(*foliop); 2784 *foliop = NULL; 2785 } 2786 return -ENOMEM; 2787 } 2788 2789 if (!*foliop) { 2790 ret = -ENOMEM; 2791 folio = shmem_alloc_folio(gfp, 0, info, pgoff); 2792 if (!folio) 2793 goto out_unacct_blocks; 2794 2795 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) { 2796 page_kaddr = kmap_local_folio(folio, 0); 2797 /* 2798 * The read mmap_lock is held here. Despite the 2799 * mmap_lock being read recursive a deadlock is still 2800 * possible if a writer has taken a lock. For example: 2801 * 2802 * process A thread 1 takes read lock on own mmap_lock 2803 * process A thread 2 calls mmap, blocks taking write lock 2804 * process B thread 1 takes page fault, read lock on own mmap lock 2805 * process B thread 2 calls mmap, blocks taking write lock 2806 * process A thread 1 blocks taking read lock on process B 2807 * process B thread 1 blocks taking read lock on process A 2808 * 2809 * Disable page faults to prevent potential deadlock 2810 * and retry the copy outside the mmap_lock. 2811 */ 2812 pagefault_disable(); 2813 ret = copy_from_user(page_kaddr, 2814 (const void __user *)src_addr, 2815 PAGE_SIZE); 2816 pagefault_enable(); 2817 kunmap_local(page_kaddr); 2818 2819 /* fallback to copy_from_user outside mmap_lock */ 2820 if (unlikely(ret)) { 2821 *foliop = folio; 2822 ret = -ENOENT; 2823 /* don't free the page */ 2824 goto out_unacct_blocks; 2825 } 2826 2827 flush_dcache_folio(folio); 2828 } else { /* ZEROPAGE */ 2829 clear_user_highpage(&folio->page, dst_addr); 2830 } 2831 } else { 2832 folio = *foliop; 2833 VM_BUG_ON_FOLIO(folio_test_large(folio), folio); 2834 *foliop = NULL; 2835 } 2836 2837 VM_BUG_ON(folio_test_locked(folio)); 2838 VM_BUG_ON(folio_test_swapbacked(folio)); 2839 __folio_set_locked(folio); 2840 __folio_set_swapbacked(folio); 2841 __folio_mark_uptodate(folio); 2842 2843 ret = -EFAULT; 2844 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 2845 if (unlikely(pgoff >= max_off)) 2846 goto out_release; 2847 2848 ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp); 2849 if (ret) 2850 goto out_release; 2851 ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp); 2852 if (ret) 2853 goto out_release; 2854 2855 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, 2856 &folio->page, true, flags); 2857 if (ret) 2858 goto out_delete_from_cache; 2859 2860 shmem_recalc_inode(inode, 1, 0); 2861 folio_unlock(folio); 2862 return 0; 2863 out_delete_from_cache: 2864 filemap_remove_folio(folio); 2865 out_release: 2866 folio_unlock(folio); 2867 folio_put(folio); 2868 out_unacct_blocks: 2869 shmem_inode_unacct_blocks(inode, 1); 2870 return ret; 2871 } 2872 #endif /* CONFIG_USERFAULTFD */ 2873 2874 #ifdef CONFIG_TMPFS 2875 static const struct inode_operations shmem_symlink_inode_operations; 2876 static const struct inode_operations shmem_short_symlink_operations; 2877 2878 static int 2879 shmem_write_begin(struct file *file, struct address_space *mapping, 2880 loff_t pos, unsigned len, 2881 struct page **pagep, void **fsdata) 2882 { 2883 struct inode *inode = mapping->host; 2884 struct shmem_inode_info *info = SHMEM_I(inode); 2885 pgoff_t index = pos >> PAGE_SHIFT; 2886 struct folio *folio; 2887 int ret = 0; 2888 2889 /* i_rwsem is held by caller */ 2890 if (unlikely(info->seals & (F_SEAL_GROW | 2891 F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) { 2892 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) 2893 return -EPERM; 2894 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) 2895 return -EPERM; 2896 } 2897 2898 ret = shmem_get_folio(inode, index, &folio, SGP_WRITE); 2899 if (ret) 2900 return ret; 2901 2902 *pagep = folio_file_page(folio, index); 2903 if (PageHWPoison(*pagep)) { 2904 folio_unlock(folio); 2905 folio_put(folio); 2906 *pagep = NULL; 2907 return -EIO; 2908 } 2909 2910 return 0; 2911 } 2912 2913 static int 2914 shmem_write_end(struct file *file, struct address_space *mapping, 2915 loff_t pos, unsigned len, unsigned copied, 2916 struct page *page, void *fsdata) 2917 { 2918 struct folio *folio = page_folio(page); 2919 struct inode *inode = mapping->host; 2920 2921 if (pos + copied > inode->i_size) 2922 i_size_write(inode, pos + copied); 2923 2924 if (!folio_test_uptodate(folio)) { 2925 if (copied < folio_size(folio)) { 2926 size_t from = offset_in_folio(folio, pos); 2927 folio_zero_segments(folio, 0, from, 2928 from + copied, folio_size(folio)); 2929 } 2930 folio_mark_uptodate(folio); 2931 } 2932 folio_mark_dirty(folio); 2933 folio_unlock(folio); 2934 folio_put(folio); 2935 2936 return copied; 2937 } 2938 2939 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 2940 { 2941 struct file *file = iocb->ki_filp; 2942 struct inode *inode = file_inode(file); 2943 struct address_space *mapping = inode->i_mapping; 2944 pgoff_t index; 2945 unsigned long offset; 2946 int error = 0; 2947 ssize_t retval = 0; 2948 loff_t *ppos = &iocb->ki_pos; 2949 2950 index = *ppos >> PAGE_SHIFT; 2951 offset = *ppos & ~PAGE_MASK; 2952 2953 for (;;) { 2954 struct folio *folio = NULL; 2955 struct page *page = NULL; 2956 pgoff_t end_index; 2957 unsigned long nr, ret; 2958 loff_t i_size = i_size_read(inode); 2959 2960 end_index = i_size >> PAGE_SHIFT; 2961 if (index > end_index) 2962 break; 2963 if (index == end_index) { 2964 nr = i_size & ~PAGE_MASK; 2965 if (nr <= offset) 2966 break; 2967 } 2968 2969 error = shmem_get_folio(inode, index, &folio, SGP_READ); 2970 if (error) { 2971 if (error == -EINVAL) 2972 error = 0; 2973 break; 2974 } 2975 if (folio) { 2976 folio_unlock(folio); 2977 2978 page = folio_file_page(folio, index); 2979 if (PageHWPoison(page)) { 2980 folio_put(folio); 2981 error = -EIO; 2982 break; 2983 } 2984 } 2985 2986 /* 2987 * We must evaluate after, since reads (unlike writes) 2988 * are called without i_rwsem protection against truncate 2989 */ 2990 nr = PAGE_SIZE; 2991 i_size = i_size_read(inode); 2992 end_index = i_size >> PAGE_SHIFT; 2993 if (index == end_index) { 2994 nr = i_size & ~PAGE_MASK; 2995 if (nr <= offset) { 2996 if (folio) 2997 folio_put(folio); 2998 break; 2999 } 3000 } 3001 nr -= offset; 3002 3003 if (folio) { 3004 /* 3005 * If users can be writing to this page using arbitrary 3006 * virtual addresses, take care about potential aliasing 3007 * before reading the page on the kernel side. 3008 */ 3009 if (mapping_writably_mapped(mapping)) 3010 flush_dcache_page(page); 3011 /* 3012 * Mark the page accessed if we read the beginning. 3013 */ 3014 if (!offset) 3015 folio_mark_accessed(folio); 3016 /* 3017 * Ok, we have the page, and it's up-to-date, so 3018 * now we can copy it to user space... 3019 */ 3020 ret = copy_page_to_iter(page, offset, nr, to); 3021 folio_put(folio); 3022 3023 } else if (user_backed_iter(to)) { 3024 /* 3025 * Copy to user tends to be so well optimized, but 3026 * clear_user() not so much, that it is noticeably 3027 * faster to copy the zero page instead of clearing. 3028 */ 3029 ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to); 3030 } else { 3031 /* 3032 * But submitting the same page twice in a row to 3033 * splice() - or others? - can result in confusion: 3034 * so don't attempt that optimization on pipes etc. 3035 */ 3036 ret = iov_iter_zero(nr, to); 3037 } 3038 3039 retval += ret; 3040 offset += ret; 3041 index += offset >> PAGE_SHIFT; 3042 offset &= ~PAGE_MASK; 3043 3044 if (!iov_iter_count(to)) 3045 break; 3046 if (ret < nr) { 3047 error = -EFAULT; 3048 break; 3049 } 3050 cond_resched(); 3051 } 3052 3053 *ppos = ((loff_t) index << PAGE_SHIFT) + offset; 3054 file_accessed(file); 3055 return retval ? retval : error; 3056 } 3057 3058 static ssize_t shmem_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 3059 { 3060 struct file *file = iocb->ki_filp; 3061 struct inode *inode = file->f_mapping->host; 3062 ssize_t ret; 3063 3064 inode_lock(inode); 3065 ret = generic_write_checks(iocb, from); 3066 if (ret <= 0) 3067 goto unlock; 3068 ret = file_remove_privs(file); 3069 if (ret) 3070 goto unlock; 3071 ret = file_update_time(file); 3072 if (ret) 3073 goto unlock; 3074 ret = generic_perform_write(iocb, from); 3075 unlock: 3076 inode_unlock(inode); 3077 return ret; 3078 } 3079 3080 static bool zero_pipe_buf_get(struct pipe_inode_info *pipe, 3081 struct pipe_buffer *buf) 3082 { 3083 return true; 3084 } 3085 3086 static void zero_pipe_buf_release(struct pipe_inode_info *pipe, 3087 struct pipe_buffer *buf) 3088 { 3089 } 3090 3091 static bool zero_pipe_buf_try_steal(struct pipe_inode_info *pipe, 3092 struct pipe_buffer *buf) 3093 { 3094 return false; 3095 } 3096 3097 static const struct pipe_buf_operations zero_pipe_buf_ops = { 3098 .release = zero_pipe_buf_release, 3099 .try_steal = zero_pipe_buf_try_steal, 3100 .get = zero_pipe_buf_get, 3101 }; 3102 3103 static size_t splice_zeropage_into_pipe(struct pipe_inode_info *pipe, 3104 loff_t fpos, size_t size) 3105 { 3106 size_t offset = fpos & ~PAGE_MASK; 3107 3108 size = min_t(size_t, size, PAGE_SIZE - offset); 3109 3110 if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) { 3111 struct pipe_buffer *buf = pipe_head_buf(pipe); 3112 3113 *buf = (struct pipe_buffer) { 3114 .ops = &zero_pipe_buf_ops, 3115 .page = ZERO_PAGE(0), 3116 .offset = offset, 3117 .len = size, 3118 }; 3119 pipe->head++; 3120 } 3121 3122 return size; 3123 } 3124 3125 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, 3126 struct pipe_inode_info *pipe, 3127 size_t len, unsigned int flags) 3128 { 3129 struct inode *inode = file_inode(in); 3130 struct address_space *mapping = inode->i_mapping; 3131 struct folio *folio = NULL; 3132 size_t total_spliced = 0, used, npages, n, part; 3133 loff_t isize; 3134 int error = 0; 3135 3136 /* Work out how much data we can actually add into the pipe */ 3137 used = pipe_occupancy(pipe->head, pipe->tail); 3138 npages = max_t(ssize_t, pipe->max_usage - used, 0); 3139 len = min_t(size_t, len, npages * PAGE_SIZE); 3140 3141 do { 3142 if (*ppos >= i_size_read(inode)) 3143 break; 3144 3145 error = shmem_get_folio(inode, *ppos / PAGE_SIZE, &folio, 3146 SGP_READ); 3147 if (error) { 3148 if (error == -EINVAL) 3149 error = 0; 3150 break; 3151 } 3152 if (folio) { 3153 folio_unlock(folio); 3154 3155 if (folio_test_hwpoison(folio) || 3156 (folio_test_large(folio) && 3157 folio_test_has_hwpoisoned(folio))) { 3158 error = -EIO; 3159 break; 3160 } 3161 } 3162 3163 /* 3164 * i_size must be checked after we know the pages are Uptodate. 3165 * 3166 * Checking i_size after the check allows us to calculate 3167 * the correct value for "nr", which means the zero-filled 3168 * part of the page is not copied back to userspace (unless 3169 * another truncate extends the file - this is desired though). 3170 */ 3171 isize = i_size_read(inode); 3172 if (unlikely(*ppos >= isize)) 3173 break; 3174 part = min_t(loff_t, isize - *ppos, len); 3175 3176 if (folio) { 3177 /* 3178 * If users can be writing to this page using arbitrary 3179 * virtual addresses, take care about potential aliasing 3180 * before reading the page on the kernel side. 3181 */ 3182 if (mapping_writably_mapped(mapping)) 3183 flush_dcache_folio(folio); 3184 folio_mark_accessed(folio); 3185 /* 3186 * Ok, we have the page, and it's up-to-date, so we can 3187 * now splice it into the pipe. 3188 */ 3189 n = splice_folio_into_pipe(pipe, folio, *ppos, part); 3190 folio_put(folio); 3191 folio = NULL; 3192 } else { 3193 n = splice_zeropage_into_pipe(pipe, *ppos, part); 3194 } 3195 3196 if (!n) 3197 break; 3198 len -= n; 3199 total_spliced += n; 3200 *ppos += n; 3201 in->f_ra.prev_pos = *ppos; 3202 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) 3203 break; 3204 3205 cond_resched(); 3206 } while (len); 3207 3208 if (folio) 3209 folio_put(folio); 3210 3211 file_accessed(in); 3212 return total_spliced ? total_spliced : error; 3213 } 3214 3215 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) 3216 { 3217 struct address_space *mapping = file->f_mapping; 3218 struct inode *inode = mapping->host; 3219 3220 if (whence != SEEK_DATA && whence != SEEK_HOLE) 3221 return generic_file_llseek_size(file, offset, whence, 3222 MAX_LFS_FILESIZE, i_size_read(inode)); 3223 if (offset < 0) 3224 return -ENXIO; 3225 3226 inode_lock(inode); 3227 /* We're holding i_rwsem so we can access i_size directly */ 3228 offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence); 3229 if (offset >= 0) 3230 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); 3231 inode_unlock(inode); 3232 return offset; 3233 } 3234 3235 static long shmem_fallocate(struct file *file, int mode, loff_t offset, 3236 loff_t len) 3237 { 3238 struct inode *inode = file_inode(file); 3239 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 3240 struct shmem_inode_info *info = SHMEM_I(inode); 3241 struct shmem_falloc shmem_falloc; 3242 pgoff_t start, index, end, undo_fallocend; 3243 int error; 3244 3245 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 3246 return -EOPNOTSUPP; 3247 3248 inode_lock(inode); 3249 3250 if (mode & FALLOC_FL_PUNCH_HOLE) { 3251 struct address_space *mapping = file->f_mapping; 3252 loff_t unmap_start = round_up(offset, PAGE_SIZE); 3253 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 3254 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq); 3255 3256 /* protected by i_rwsem */ 3257 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { 3258 error = -EPERM; 3259 goto out; 3260 } 3261 3262 shmem_falloc.waitq = &shmem_falloc_waitq; 3263 shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT; 3264 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; 3265 spin_lock(&inode->i_lock); 3266 inode->i_private = &shmem_falloc; 3267 spin_unlock(&inode->i_lock); 3268 3269 if ((u64)unmap_end > (u64)unmap_start) 3270 unmap_mapping_range(mapping, unmap_start, 3271 1 + unmap_end - unmap_start, 0); 3272 shmem_truncate_range(inode, offset, offset + len - 1); 3273 /* No need to unmap again: hole-punching leaves COWed pages */ 3274 3275 spin_lock(&inode->i_lock); 3276 inode->i_private = NULL; 3277 wake_up_all(&shmem_falloc_waitq); 3278 WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head)); 3279 spin_unlock(&inode->i_lock); 3280 error = 0; 3281 goto out; 3282 } 3283 3284 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 3285 error = inode_newsize_ok(inode, offset + len); 3286 if (error) 3287 goto out; 3288 3289 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { 3290 error = -EPERM; 3291 goto out; 3292 } 3293 3294 start = offset >> PAGE_SHIFT; 3295 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 3296 /* Try to avoid a swapstorm if len is impossible to satisfy */ 3297 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 3298 error = -ENOSPC; 3299 goto out; 3300 } 3301 3302 shmem_falloc.waitq = NULL; 3303 shmem_falloc.start = start; 3304 shmem_falloc.next = start; 3305 shmem_falloc.nr_falloced = 0; 3306 shmem_falloc.nr_unswapped = 0; 3307 spin_lock(&inode->i_lock); 3308 inode->i_private = &shmem_falloc; 3309 spin_unlock(&inode->i_lock); 3310 3311 /* 3312 * info->fallocend is only relevant when huge pages might be 3313 * involved: to prevent split_huge_page() freeing fallocated 3314 * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size. 3315 */ 3316 undo_fallocend = info->fallocend; 3317 if (info->fallocend < end) 3318 info->fallocend = end; 3319 3320 for (index = start; index < end; ) { 3321 struct folio *folio; 3322 3323 /* 3324 * Good, the fallocate(2) manpage permits EINTR: we may have 3325 * been interrupted because we are using up too much memory. 3326 */ 3327 if (signal_pending(current)) 3328 error = -EINTR; 3329 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) 3330 error = -ENOMEM; 3331 else 3332 error = shmem_get_folio(inode, index, &folio, 3333 SGP_FALLOC); 3334 if (error) { 3335 info->fallocend = undo_fallocend; 3336 /* Remove the !uptodate folios we added */ 3337 if (index > start) { 3338 shmem_undo_range(inode, 3339 (loff_t)start << PAGE_SHIFT, 3340 ((loff_t)index << PAGE_SHIFT) - 1, true); 3341 } 3342 goto undone; 3343 } 3344 3345 /* 3346 * Here is a more important optimization than it appears: 3347 * a second SGP_FALLOC on the same large folio will clear it, 3348 * making it uptodate and un-undoable if we fail later. 3349 */ 3350 index = folio_next_index(folio); 3351 /* Beware 32-bit wraparound */ 3352 if (!index) 3353 index--; 3354 3355 /* 3356 * Inform shmem_writepage() how far we have reached. 3357 * No need for lock or barrier: we have the page lock. 3358 */ 3359 if (!folio_test_uptodate(folio)) 3360 shmem_falloc.nr_falloced += index - shmem_falloc.next; 3361 shmem_falloc.next = index; 3362 3363 /* 3364 * If !uptodate, leave it that way so that freeable folios 3365 * can be recognized if we need to rollback on error later. 3366 * But mark it dirty so that memory pressure will swap rather 3367 * than free the folios we are allocating (and SGP_CACHE folios 3368 * might still be clean: we now need to mark those dirty too). 3369 */ 3370 folio_mark_dirty(folio); 3371 folio_unlock(folio); 3372 folio_put(folio); 3373 cond_resched(); 3374 } 3375 3376 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 3377 i_size_write(inode, offset + len); 3378 undone: 3379 spin_lock(&inode->i_lock); 3380 inode->i_private = NULL; 3381 spin_unlock(&inode->i_lock); 3382 out: 3383 if (!error) 3384 file_modified(file); 3385 inode_unlock(inode); 3386 return error; 3387 } 3388 3389 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 3390 { 3391 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 3392 3393 buf->f_type = TMPFS_MAGIC; 3394 buf->f_bsize = PAGE_SIZE; 3395 buf->f_namelen = NAME_MAX; 3396 if (sbinfo->max_blocks) { 3397 buf->f_blocks = sbinfo->max_blocks; 3398 buf->f_bavail = 3399 buf->f_bfree = sbinfo->max_blocks - 3400 percpu_counter_sum(&sbinfo->used_blocks); 3401 } 3402 if (sbinfo->max_inodes) { 3403 buf->f_files = sbinfo->max_inodes; 3404 buf->f_ffree = sbinfo->free_ispace / BOGO_INODE_SIZE; 3405 } 3406 /* else leave those fields 0 like simple_statfs */ 3407 3408 buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b); 3409 3410 return 0; 3411 } 3412 3413 /* 3414 * File creation. Allocate an inode, and we're done.. 3415 */ 3416 static int 3417 shmem_mknod(struct mnt_idmap *idmap, struct inode *dir, 3418 struct dentry *dentry, umode_t mode, dev_t dev) 3419 { 3420 struct inode *inode; 3421 int error; 3422 3423 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, dev, VM_NORESERVE); 3424 if (IS_ERR(inode)) 3425 return PTR_ERR(inode); 3426 3427 error = simple_acl_create(dir, inode); 3428 if (error) 3429 goto out_iput; 3430 error = security_inode_init_security(inode, dir, &dentry->d_name, 3431 shmem_initxattrs, NULL); 3432 if (error && error != -EOPNOTSUPP) 3433 goto out_iput; 3434 3435 error = simple_offset_add(shmem_get_offset_ctx(dir), dentry); 3436 if (error) 3437 goto out_iput; 3438 3439 dir->i_size += BOGO_DIRENT_SIZE; 3440 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); 3441 inode_inc_iversion(dir); 3442 d_instantiate(dentry, inode); 3443 dget(dentry); /* Extra count - pin the dentry in core */ 3444 return error; 3445 3446 out_iput: 3447 iput(inode); 3448 return error; 3449 } 3450 3451 static int 3452 shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir, 3453 struct file *file, umode_t mode) 3454 { 3455 struct inode *inode; 3456 int error; 3457 3458 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE); 3459 if (IS_ERR(inode)) { 3460 error = PTR_ERR(inode); 3461 goto err_out; 3462 } 3463 error = security_inode_init_security(inode, dir, NULL, 3464 shmem_initxattrs, NULL); 3465 if (error && error != -EOPNOTSUPP) 3466 goto out_iput; 3467 error = simple_acl_create(dir, inode); 3468 if (error) 3469 goto out_iput; 3470 d_tmpfile(file, inode); 3471 3472 err_out: 3473 return finish_open_simple(file, error); 3474 out_iput: 3475 iput(inode); 3476 return error; 3477 } 3478 3479 static int shmem_mkdir(struct mnt_idmap *idmap, struct inode *dir, 3480 struct dentry *dentry, umode_t mode) 3481 { 3482 int error; 3483 3484 error = shmem_mknod(idmap, dir, dentry, mode | S_IFDIR, 0); 3485 if (error) 3486 return error; 3487 inc_nlink(dir); 3488 return 0; 3489 } 3490 3491 static int shmem_create(struct mnt_idmap *idmap, struct inode *dir, 3492 struct dentry *dentry, umode_t mode, bool excl) 3493 { 3494 return shmem_mknod(idmap, dir, dentry, mode | S_IFREG, 0); 3495 } 3496 3497 /* 3498 * Link a file.. 3499 */ 3500 static int shmem_link(struct dentry *old_dentry, struct inode *dir, 3501 struct dentry *dentry) 3502 { 3503 struct inode *inode = d_inode(old_dentry); 3504 int ret = 0; 3505 3506 /* 3507 * No ordinary (disk based) filesystem counts links as inodes; 3508 * but each new link needs a new dentry, pinning lowmem, and 3509 * tmpfs dentries cannot be pruned until they are unlinked. 3510 * But if an O_TMPFILE file is linked into the tmpfs, the 3511 * first link must skip that, to get the accounting right. 3512 */ 3513 if (inode->i_nlink) { 3514 ret = shmem_reserve_inode(inode->i_sb, NULL); 3515 if (ret) 3516 goto out; 3517 } 3518 3519 ret = simple_offset_add(shmem_get_offset_ctx(dir), dentry); 3520 if (ret) { 3521 if (inode->i_nlink) 3522 shmem_free_inode(inode->i_sb, 0); 3523 goto out; 3524 } 3525 3526 dir->i_size += BOGO_DIRENT_SIZE; 3527 inode_set_mtime_to_ts(dir, 3528 inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode))); 3529 inode_inc_iversion(dir); 3530 inc_nlink(inode); 3531 ihold(inode); /* New dentry reference */ 3532 dget(dentry); /* Extra pinning count for the created dentry */ 3533 d_instantiate(dentry, inode); 3534 out: 3535 return ret; 3536 } 3537 3538 static int shmem_unlink(struct inode *dir, struct dentry *dentry) 3539 { 3540 struct inode *inode = d_inode(dentry); 3541 3542 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 3543 shmem_free_inode(inode->i_sb, 0); 3544 3545 simple_offset_remove(shmem_get_offset_ctx(dir), dentry); 3546 3547 dir->i_size -= BOGO_DIRENT_SIZE; 3548 inode_set_mtime_to_ts(dir, 3549 inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode))); 3550 inode_inc_iversion(dir); 3551 drop_nlink(inode); 3552 dput(dentry); /* Undo the count from "create" - does all the work */ 3553 return 0; 3554 } 3555 3556 static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 3557 { 3558 if (!simple_offset_empty(dentry)) 3559 return -ENOTEMPTY; 3560 3561 drop_nlink(d_inode(dentry)); 3562 drop_nlink(dir); 3563 return shmem_unlink(dir, dentry); 3564 } 3565 3566 static int shmem_whiteout(struct mnt_idmap *idmap, 3567 struct inode *old_dir, struct dentry *old_dentry) 3568 { 3569 struct dentry *whiteout; 3570 int error; 3571 3572 whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name); 3573 if (!whiteout) 3574 return -ENOMEM; 3575 3576 error = shmem_mknod(idmap, old_dir, whiteout, 3577 S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); 3578 dput(whiteout); 3579 if (error) 3580 return error; 3581 3582 /* 3583 * Cheat and hash the whiteout while the old dentry is still in 3584 * place, instead of playing games with FS_RENAME_DOES_D_MOVE. 3585 * 3586 * d_lookup() will consistently find one of them at this point, 3587 * not sure which one, but that isn't even important. 3588 */ 3589 d_rehash(whiteout); 3590 return 0; 3591 } 3592 3593 /* 3594 * The VFS layer already does all the dentry stuff for rename, 3595 * we just have to decrement the usage count for the target if 3596 * it exists so that the VFS layer correctly free's it when it 3597 * gets overwritten. 3598 */ 3599 static int shmem_rename2(struct mnt_idmap *idmap, 3600 struct inode *old_dir, struct dentry *old_dentry, 3601 struct inode *new_dir, struct dentry *new_dentry, 3602 unsigned int flags) 3603 { 3604 struct inode *inode = d_inode(old_dentry); 3605 int they_are_dirs = S_ISDIR(inode->i_mode); 3606 int error; 3607 3608 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 3609 return -EINVAL; 3610 3611 if (flags & RENAME_EXCHANGE) 3612 return simple_offset_rename_exchange(old_dir, old_dentry, 3613 new_dir, new_dentry); 3614 3615 if (!simple_offset_empty(new_dentry)) 3616 return -ENOTEMPTY; 3617 3618 if (flags & RENAME_WHITEOUT) { 3619 error = shmem_whiteout(idmap, old_dir, old_dentry); 3620 if (error) 3621 return error; 3622 } 3623 3624 error = simple_offset_rename(old_dir, old_dentry, new_dir, new_dentry); 3625 if (error) 3626 return error; 3627 3628 if (d_really_is_positive(new_dentry)) { 3629 (void) shmem_unlink(new_dir, new_dentry); 3630 if (they_are_dirs) { 3631 drop_nlink(d_inode(new_dentry)); 3632 drop_nlink(old_dir); 3633 } 3634 } else if (they_are_dirs) { 3635 drop_nlink(old_dir); 3636 inc_nlink(new_dir); 3637 } 3638 3639 old_dir->i_size -= BOGO_DIRENT_SIZE; 3640 new_dir->i_size += BOGO_DIRENT_SIZE; 3641 simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry); 3642 inode_inc_iversion(old_dir); 3643 inode_inc_iversion(new_dir); 3644 return 0; 3645 } 3646 3647 static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir, 3648 struct dentry *dentry, const char *symname) 3649 { 3650 int error; 3651 int len; 3652 struct inode *inode; 3653 struct folio *folio; 3654 3655 len = strlen(symname) + 1; 3656 if (len > PAGE_SIZE) 3657 return -ENAMETOOLONG; 3658 3659 inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0, 3660 VM_NORESERVE); 3661 if (IS_ERR(inode)) 3662 return PTR_ERR(inode); 3663 3664 error = security_inode_init_security(inode, dir, &dentry->d_name, 3665 shmem_initxattrs, NULL); 3666 if (error && error != -EOPNOTSUPP) 3667 goto out_iput; 3668 3669 error = simple_offset_add(shmem_get_offset_ctx(dir), dentry); 3670 if (error) 3671 goto out_iput; 3672 3673 inode->i_size = len-1; 3674 if (len <= SHORT_SYMLINK_LEN) { 3675 inode->i_link = kmemdup(symname, len, GFP_KERNEL); 3676 if (!inode->i_link) { 3677 error = -ENOMEM; 3678 goto out_remove_offset; 3679 } 3680 inode->i_op = &shmem_short_symlink_operations; 3681 } else { 3682 inode_nohighmem(inode); 3683 inode->i_mapping->a_ops = &shmem_aops; 3684 error = shmem_get_folio(inode, 0, &folio, SGP_WRITE); 3685 if (error) 3686 goto out_remove_offset; 3687 inode->i_op = &shmem_symlink_inode_operations; 3688 memcpy(folio_address(folio), symname, len); 3689 folio_mark_uptodate(folio); 3690 folio_mark_dirty(folio); 3691 folio_unlock(folio); 3692 folio_put(folio); 3693 } 3694 dir->i_size += BOGO_DIRENT_SIZE; 3695 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); 3696 inode_inc_iversion(dir); 3697 d_instantiate(dentry, inode); 3698 dget(dentry); 3699 return 0; 3700 3701 out_remove_offset: 3702 simple_offset_remove(shmem_get_offset_ctx(dir), dentry); 3703 out_iput: 3704 iput(inode); 3705 return error; 3706 } 3707 3708 static void shmem_put_link(void *arg) 3709 { 3710 folio_mark_accessed(arg); 3711 folio_put(arg); 3712 } 3713 3714 static const char *shmem_get_link(struct dentry *dentry, struct inode *inode, 3715 struct delayed_call *done) 3716 { 3717 struct folio *folio = NULL; 3718 int error; 3719 3720 if (!dentry) { 3721 folio = filemap_get_folio(inode->i_mapping, 0); 3722 if (IS_ERR(folio)) 3723 return ERR_PTR(-ECHILD); 3724 if (PageHWPoison(folio_page(folio, 0)) || 3725 !folio_test_uptodate(folio)) { 3726 folio_put(folio); 3727 return ERR_PTR(-ECHILD); 3728 } 3729 } else { 3730 error = shmem_get_folio(inode, 0, &folio, SGP_READ); 3731 if (error) 3732 return ERR_PTR(error); 3733 if (!folio) 3734 return ERR_PTR(-ECHILD); 3735 if (PageHWPoison(folio_page(folio, 0))) { 3736 folio_unlock(folio); 3737 folio_put(folio); 3738 return ERR_PTR(-ECHILD); 3739 } 3740 folio_unlock(folio); 3741 } 3742 set_delayed_call(done, shmem_put_link, folio); 3743 return folio_address(folio); 3744 } 3745 3746 #ifdef CONFIG_TMPFS_XATTR 3747 3748 static int shmem_fileattr_get(struct dentry *dentry, struct fileattr *fa) 3749 { 3750 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); 3751 3752 fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE); 3753 3754 return 0; 3755 } 3756 3757 static int shmem_fileattr_set(struct mnt_idmap *idmap, 3758 struct dentry *dentry, struct fileattr *fa) 3759 { 3760 struct inode *inode = d_inode(dentry); 3761 struct shmem_inode_info *info = SHMEM_I(inode); 3762 3763 if (fileattr_has_fsx(fa)) 3764 return -EOPNOTSUPP; 3765 if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE) 3766 return -EOPNOTSUPP; 3767 3768 info->fsflags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) | 3769 (fa->flags & SHMEM_FL_USER_MODIFIABLE); 3770 3771 shmem_set_inode_flags(inode, info->fsflags); 3772 inode_set_ctime_current(inode); 3773 inode_inc_iversion(inode); 3774 return 0; 3775 } 3776 3777 /* 3778 * Superblocks without xattr inode operations may get some security.* xattr 3779 * support from the LSM "for free". As soon as we have any other xattrs 3780 * like ACLs, we also need to implement the security.* handlers at 3781 * filesystem level, though. 3782 */ 3783 3784 /* 3785 * Callback for security_inode_init_security() for acquiring xattrs. 3786 */ 3787 static int shmem_initxattrs(struct inode *inode, 3788 const struct xattr *xattr_array, void *fs_info) 3789 { 3790 struct shmem_inode_info *info = SHMEM_I(inode); 3791 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 3792 const struct xattr *xattr; 3793 struct simple_xattr *new_xattr; 3794 size_t ispace = 0; 3795 size_t len; 3796 3797 if (sbinfo->max_inodes) { 3798 for (xattr = xattr_array; xattr->name != NULL; xattr++) { 3799 ispace += simple_xattr_space(xattr->name, 3800 xattr->value_len + XATTR_SECURITY_PREFIX_LEN); 3801 } 3802 if (ispace) { 3803 raw_spin_lock(&sbinfo->stat_lock); 3804 if (sbinfo->free_ispace < ispace) 3805 ispace = 0; 3806 else 3807 sbinfo->free_ispace -= ispace; 3808 raw_spin_unlock(&sbinfo->stat_lock); 3809 if (!ispace) 3810 return -ENOSPC; 3811 } 3812 } 3813 3814 for (xattr = xattr_array; xattr->name != NULL; xattr++) { 3815 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); 3816 if (!new_xattr) 3817 break; 3818 3819 len = strlen(xattr->name) + 1; 3820 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 3821 GFP_KERNEL_ACCOUNT); 3822 if (!new_xattr->name) { 3823 kvfree(new_xattr); 3824 break; 3825 } 3826 3827 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, 3828 XATTR_SECURITY_PREFIX_LEN); 3829 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, 3830 xattr->name, len); 3831 3832 simple_xattr_add(&info->xattrs, new_xattr); 3833 } 3834 3835 if (xattr->name != NULL) { 3836 if (ispace) { 3837 raw_spin_lock(&sbinfo->stat_lock); 3838 sbinfo->free_ispace += ispace; 3839 raw_spin_unlock(&sbinfo->stat_lock); 3840 } 3841 simple_xattrs_free(&info->xattrs, NULL); 3842 return -ENOMEM; 3843 } 3844 3845 return 0; 3846 } 3847 3848 static int shmem_xattr_handler_get(const struct xattr_handler *handler, 3849 struct dentry *unused, struct inode *inode, 3850 const char *name, void *buffer, size_t size) 3851 { 3852 struct shmem_inode_info *info = SHMEM_I(inode); 3853 3854 name = xattr_full_name(handler, name); 3855 return simple_xattr_get(&info->xattrs, name, buffer, size); 3856 } 3857 3858 static int shmem_xattr_handler_set(const struct xattr_handler *handler, 3859 struct mnt_idmap *idmap, 3860 struct dentry *unused, struct inode *inode, 3861 const char *name, const void *value, 3862 size_t size, int flags) 3863 { 3864 struct shmem_inode_info *info = SHMEM_I(inode); 3865 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 3866 struct simple_xattr *old_xattr; 3867 size_t ispace = 0; 3868 3869 name = xattr_full_name(handler, name); 3870 if (value && sbinfo->max_inodes) { 3871 ispace = simple_xattr_space(name, size); 3872 raw_spin_lock(&sbinfo->stat_lock); 3873 if (sbinfo->free_ispace < ispace) 3874 ispace = 0; 3875 else 3876 sbinfo->free_ispace -= ispace; 3877 raw_spin_unlock(&sbinfo->stat_lock); 3878 if (!ispace) 3879 return -ENOSPC; 3880 } 3881 3882 old_xattr = simple_xattr_set(&info->xattrs, name, value, size, flags); 3883 if (!IS_ERR(old_xattr)) { 3884 ispace = 0; 3885 if (old_xattr && sbinfo->max_inodes) 3886 ispace = simple_xattr_space(old_xattr->name, 3887 old_xattr->size); 3888 simple_xattr_free(old_xattr); 3889 old_xattr = NULL; 3890 inode_set_ctime_current(inode); 3891 inode_inc_iversion(inode); 3892 } 3893 if (ispace) { 3894 raw_spin_lock(&sbinfo->stat_lock); 3895 sbinfo->free_ispace += ispace; 3896 raw_spin_unlock(&sbinfo->stat_lock); 3897 } 3898 return PTR_ERR(old_xattr); 3899 } 3900 3901 static const struct xattr_handler shmem_security_xattr_handler = { 3902 .prefix = XATTR_SECURITY_PREFIX, 3903 .get = shmem_xattr_handler_get, 3904 .set = shmem_xattr_handler_set, 3905 }; 3906 3907 static const struct xattr_handler shmem_trusted_xattr_handler = { 3908 .prefix = XATTR_TRUSTED_PREFIX, 3909 .get = shmem_xattr_handler_get, 3910 .set = shmem_xattr_handler_set, 3911 }; 3912 3913 static const struct xattr_handler shmem_user_xattr_handler = { 3914 .prefix = XATTR_USER_PREFIX, 3915 .get = shmem_xattr_handler_get, 3916 .set = shmem_xattr_handler_set, 3917 }; 3918 3919 static const struct xattr_handler * const shmem_xattr_handlers[] = { 3920 &shmem_security_xattr_handler, 3921 &shmem_trusted_xattr_handler, 3922 &shmem_user_xattr_handler, 3923 NULL 3924 }; 3925 3926 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 3927 { 3928 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); 3929 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size); 3930 } 3931 #endif /* CONFIG_TMPFS_XATTR */ 3932 3933 static const struct inode_operations shmem_short_symlink_operations = { 3934 .getattr = shmem_getattr, 3935 .setattr = shmem_setattr, 3936 .get_link = simple_get_link, 3937 #ifdef CONFIG_TMPFS_XATTR 3938 .listxattr = shmem_listxattr, 3939 #endif 3940 }; 3941 3942 static const struct inode_operations shmem_symlink_inode_operations = { 3943 .getattr = shmem_getattr, 3944 .setattr = shmem_setattr, 3945 .get_link = shmem_get_link, 3946 #ifdef CONFIG_TMPFS_XATTR 3947 .listxattr = shmem_listxattr, 3948 #endif 3949 }; 3950 3951 static struct dentry *shmem_get_parent(struct dentry *child) 3952 { 3953 return ERR_PTR(-ESTALE); 3954 } 3955 3956 static int shmem_match(struct inode *ino, void *vfh) 3957 { 3958 __u32 *fh = vfh; 3959 __u64 inum = fh[2]; 3960 inum = (inum << 32) | fh[1]; 3961 return ino->i_ino == inum && fh[0] == ino->i_generation; 3962 } 3963 3964 /* Find any alias of inode, but prefer a hashed alias */ 3965 static struct dentry *shmem_find_alias(struct inode *inode) 3966 { 3967 struct dentry *alias = d_find_alias(inode); 3968 3969 return alias ?: d_find_any_alias(inode); 3970 } 3971 3972 static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 3973 struct fid *fid, int fh_len, int fh_type) 3974 { 3975 struct inode *inode; 3976 struct dentry *dentry = NULL; 3977 u64 inum; 3978 3979 if (fh_len < 3) 3980 return NULL; 3981 3982 inum = fid->raw[2]; 3983 inum = (inum << 32) | fid->raw[1]; 3984 3985 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 3986 shmem_match, fid->raw); 3987 if (inode) { 3988 dentry = shmem_find_alias(inode); 3989 iput(inode); 3990 } 3991 3992 return dentry; 3993 } 3994 3995 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, 3996 struct inode *parent) 3997 { 3998 if (*len < 3) { 3999 *len = 3; 4000 return FILEID_INVALID; 4001 } 4002 4003 if (inode_unhashed(inode)) { 4004 /* Unfortunately insert_inode_hash is not idempotent, 4005 * so as we hash inodes here rather than at creation 4006 * time, we need a lock to ensure we only try 4007 * to do it once 4008 */ 4009 static DEFINE_SPINLOCK(lock); 4010 spin_lock(&lock); 4011 if (inode_unhashed(inode)) 4012 __insert_inode_hash(inode, 4013 inode->i_ino + inode->i_generation); 4014 spin_unlock(&lock); 4015 } 4016 4017 fh[0] = inode->i_generation; 4018 fh[1] = inode->i_ino; 4019 fh[2] = ((__u64)inode->i_ino) >> 32; 4020 4021 *len = 3; 4022 return 1; 4023 } 4024 4025 static const struct export_operations shmem_export_ops = { 4026 .get_parent = shmem_get_parent, 4027 .encode_fh = shmem_encode_fh, 4028 .fh_to_dentry = shmem_fh_to_dentry, 4029 }; 4030 4031 enum shmem_param { 4032 Opt_gid, 4033 Opt_huge, 4034 Opt_mode, 4035 Opt_mpol, 4036 Opt_nr_blocks, 4037 Opt_nr_inodes, 4038 Opt_size, 4039 Opt_uid, 4040 Opt_inode32, 4041 Opt_inode64, 4042 Opt_noswap, 4043 Opt_quota, 4044 Opt_usrquota, 4045 Opt_grpquota, 4046 Opt_usrquota_block_hardlimit, 4047 Opt_usrquota_inode_hardlimit, 4048 Opt_grpquota_block_hardlimit, 4049 Opt_grpquota_inode_hardlimit, 4050 }; 4051 4052 static const struct constant_table shmem_param_enums_huge[] = { 4053 {"never", SHMEM_HUGE_NEVER }, 4054 {"always", SHMEM_HUGE_ALWAYS }, 4055 {"within_size", SHMEM_HUGE_WITHIN_SIZE }, 4056 {"advise", SHMEM_HUGE_ADVISE }, 4057 {} 4058 }; 4059 4060 const struct fs_parameter_spec shmem_fs_parameters[] = { 4061 fsparam_u32 ("gid", Opt_gid), 4062 fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge), 4063 fsparam_u32oct("mode", Opt_mode), 4064 fsparam_string("mpol", Opt_mpol), 4065 fsparam_string("nr_blocks", Opt_nr_blocks), 4066 fsparam_string("nr_inodes", Opt_nr_inodes), 4067 fsparam_string("size", Opt_size), 4068 fsparam_u32 ("uid", Opt_uid), 4069 fsparam_flag ("inode32", Opt_inode32), 4070 fsparam_flag ("inode64", Opt_inode64), 4071 fsparam_flag ("noswap", Opt_noswap), 4072 #ifdef CONFIG_TMPFS_QUOTA 4073 fsparam_flag ("quota", Opt_quota), 4074 fsparam_flag ("usrquota", Opt_usrquota), 4075 fsparam_flag ("grpquota", Opt_grpquota), 4076 fsparam_string("usrquota_block_hardlimit", Opt_usrquota_block_hardlimit), 4077 fsparam_string("usrquota_inode_hardlimit", Opt_usrquota_inode_hardlimit), 4078 fsparam_string("grpquota_block_hardlimit", Opt_grpquota_block_hardlimit), 4079 fsparam_string("grpquota_inode_hardlimit", Opt_grpquota_inode_hardlimit), 4080 #endif 4081 {} 4082 }; 4083 4084 static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param) 4085 { 4086 struct shmem_options *ctx = fc->fs_private; 4087 struct fs_parse_result result; 4088 unsigned long long size; 4089 char *rest; 4090 int opt; 4091 kuid_t kuid; 4092 kgid_t kgid; 4093 4094 opt = fs_parse(fc, shmem_fs_parameters, param, &result); 4095 if (opt < 0) 4096 return opt; 4097 4098 switch (opt) { 4099 case Opt_size: 4100 size = memparse(param->string, &rest); 4101 if (*rest == '%') { 4102 size <<= PAGE_SHIFT; 4103 size *= totalram_pages(); 4104 do_div(size, 100); 4105 rest++; 4106 } 4107 if (*rest) 4108 goto bad_value; 4109 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE); 4110 ctx->seen |= SHMEM_SEEN_BLOCKS; 4111 break; 4112 case Opt_nr_blocks: 4113 ctx->blocks = memparse(param->string, &rest); 4114 if (*rest || ctx->blocks > LONG_MAX) 4115 goto bad_value; 4116 ctx->seen |= SHMEM_SEEN_BLOCKS; 4117 break; 4118 case Opt_nr_inodes: 4119 ctx->inodes = memparse(param->string, &rest); 4120 if (*rest || ctx->inodes > ULONG_MAX / BOGO_INODE_SIZE) 4121 goto bad_value; 4122 ctx->seen |= SHMEM_SEEN_INODES; 4123 break; 4124 case Opt_mode: 4125 ctx->mode = result.uint_32 & 07777; 4126 break; 4127 case Opt_uid: 4128 kuid = make_kuid(current_user_ns(), result.uint_32); 4129 if (!uid_valid(kuid)) 4130 goto bad_value; 4131 4132 /* 4133 * The requested uid must be representable in the 4134 * filesystem's idmapping. 4135 */ 4136 if (!kuid_has_mapping(fc->user_ns, kuid)) 4137 goto bad_value; 4138 4139 ctx->uid = kuid; 4140 break; 4141 case Opt_gid: 4142 kgid = make_kgid(current_user_ns(), result.uint_32); 4143 if (!gid_valid(kgid)) 4144 goto bad_value; 4145 4146 /* 4147 * The requested gid must be representable in the 4148 * filesystem's idmapping. 4149 */ 4150 if (!kgid_has_mapping(fc->user_ns, kgid)) 4151 goto bad_value; 4152 4153 ctx->gid = kgid; 4154 break; 4155 case Opt_huge: 4156 ctx->huge = result.uint_32; 4157 if (ctx->huge != SHMEM_HUGE_NEVER && 4158 !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 4159 has_transparent_hugepage())) 4160 goto unsupported_parameter; 4161 ctx->seen |= SHMEM_SEEN_HUGE; 4162 break; 4163 case Opt_mpol: 4164 if (IS_ENABLED(CONFIG_NUMA)) { 4165 mpol_put(ctx->mpol); 4166 ctx->mpol = NULL; 4167 if (mpol_parse_str(param->string, &ctx->mpol)) 4168 goto bad_value; 4169 break; 4170 } 4171 goto unsupported_parameter; 4172 case Opt_inode32: 4173 ctx->full_inums = false; 4174 ctx->seen |= SHMEM_SEEN_INUMS; 4175 break; 4176 case Opt_inode64: 4177 if (sizeof(ino_t) < 8) { 4178 return invalfc(fc, 4179 "Cannot use inode64 with <64bit inums in kernel\n"); 4180 } 4181 ctx->full_inums = true; 4182 ctx->seen |= SHMEM_SEEN_INUMS; 4183 break; 4184 case Opt_noswap: 4185 if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) { 4186 return invalfc(fc, 4187 "Turning off swap in unprivileged tmpfs mounts unsupported"); 4188 } 4189 ctx->noswap = true; 4190 ctx->seen |= SHMEM_SEEN_NOSWAP; 4191 break; 4192 case Opt_quota: 4193 if (fc->user_ns != &init_user_ns) 4194 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported"); 4195 ctx->seen |= SHMEM_SEEN_QUOTA; 4196 ctx->quota_types |= (QTYPE_MASK_USR | QTYPE_MASK_GRP); 4197 break; 4198 case Opt_usrquota: 4199 if (fc->user_ns != &init_user_ns) 4200 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported"); 4201 ctx->seen |= SHMEM_SEEN_QUOTA; 4202 ctx->quota_types |= QTYPE_MASK_USR; 4203 break; 4204 case Opt_grpquota: 4205 if (fc->user_ns != &init_user_ns) 4206 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported"); 4207 ctx->seen |= SHMEM_SEEN_QUOTA; 4208 ctx->quota_types |= QTYPE_MASK_GRP; 4209 break; 4210 case Opt_usrquota_block_hardlimit: 4211 size = memparse(param->string, &rest); 4212 if (*rest || !size) 4213 goto bad_value; 4214 if (size > SHMEM_QUOTA_MAX_SPC_LIMIT) 4215 return invalfc(fc, 4216 "User quota block hardlimit too large."); 4217 ctx->qlimits.usrquota_bhardlimit = size; 4218 break; 4219 case Opt_grpquota_block_hardlimit: 4220 size = memparse(param->string, &rest); 4221 if (*rest || !size) 4222 goto bad_value; 4223 if (size > SHMEM_QUOTA_MAX_SPC_LIMIT) 4224 return invalfc(fc, 4225 "Group quota block hardlimit too large."); 4226 ctx->qlimits.grpquota_bhardlimit = size; 4227 break; 4228 case Opt_usrquota_inode_hardlimit: 4229 size = memparse(param->string, &rest); 4230 if (*rest || !size) 4231 goto bad_value; 4232 if (size > SHMEM_QUOTA_MAX_INO_LIMIT) 4233 return invalfc(fc, 4234 "User quota inode hardlimit too large."); 4235 ctx->qlimits.usrquota_ihardlimit = size; 4236 break; 4237 case Opt_grpquota_inode_hardlimit: 4238 size = memparse(param->string, &rest); 4239 if (*rest || !size) 4240 goto bad_value; 4241 if (size > SHMEM_QUOTA_MAX_INO_LIMIT) 4242 return invalfc(fc, 4243 "Group quota inode hardlimit too large."); 4244 ctx->qlimits.grpquota_ihardlimit = size; 4245 break; 4246 } 4247 return 0; 4248 4249 unsupported_parameter: 4250 return invalfc(fc, "Unsupported parameter '%s'", param->key); 4251 bad_value: 4252 return invalfc(fc, "Bad value for '%s'", param->key); 4253 } 4254 4255 static int shmem_parse_options(struct fs_context *fc, void *data) 4256 { 4257 char *options = data; 4258 4259 if (options) { 4260 int err = security_sb_eat_lsm_opts(options, &fc->security); 4261 if (err) 4262 return err; 4263 } 4264 4265 while (options != NULL) { 4266 char *this_char = options; 4267 for (;;) { 4268 /* 4269 * NUL-terminate this option: unfortunately, 4270 * mount options form a comma-separated list, 4271 * but mpol's nodelist may also contain commas. 4272 */ 4273 options = strchr(options, ','); 4274 if (options == NULL) 4275 break; 4276 options++; 4277 if (!isdigit(*options)) { 4278 options[-1] = '\0'; 4279 break; 4280 } 4281 } 4282 if (*this_char) { 4283 char *value = strchr(this_char, '='); 4284 size_t len = 0; 4285 int err; 4286 4287 if (value) { 4288 *value++ = '\0'; 4289 len = strlen(value); 4290 } 4291 err = vfs_parse_fs_string(fc, this_char, value, len); 4292 if (err < 0) 4293 return err; 4294 } 4295 } 4296 return 0; 4297 } 4298 4299 /* 4300 * Reconfigure a shmem filesystem. 4301 */ 4302 static int shmem_reconfigure(struct fs_context *fc) 4303 { 4304 struct shmem_options *ctx = fc->fs_private; 4305 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb); 4306 unsigned long used_isp; 4307 struct mempolicy *mpol = NULL; 4308 const char *err; 4309 4310 raw_spin_lock(&sbinfo->stat_lock); 4311 used_isp = sbinfo->max_inodes * BOGO_INODE_SIZE - sbinfo->free_ispace; 4312 4313 if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) { 4314 if (!sbinfo->max_blocks) { 4315 err = "Cannot retroactively limit size"; 4316 goto out; 4317 } 4318 if (percpu_counter_compare(&sbinfo->used_blocks, 4319 ctx->blocks) > 0) { 4320 err = "Too small a size for current use"; 4321 goto out; 4322 } 4323 } 4324 if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) { 4325 if (!sbinfo->max_inodes) { 4326 err = "Cannot retroactively limit inodes"; 4327 goto out; 4328 } 4329 if (ctx->inodes * BOGO_INODE_SIZE < used_isp) { 4330 err = "Too few inodes for current use"; 4331 goto out; 4332 } 4333 } 4334 4335 if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums && 4336 sbinfo->next_ino > UINT_MAX) { 4337 err = "Current inum too high to switch to 32-bit inums"; 4338 goto out; 4339 } 4340 if ((ctx->seen & SHMEM_SEEN_NOSWAP) && ctx->noswap && !sbinfo->noswap) { 4341 err = "Cannot disable swap on remount"; 4342 goto out; 4343 } 4344 if (!(ctx->seen & SHMEM_SEEN_NOSWAP) && !ctx->noswap && sbinfo->noswap) { 4345 err = "Cannot enable swap on remount if it was disabled on first mount"; 4346 goto out; 4347 } 4348 4349 if (ctx->seen & SHMEM_SEEN_QUOTA && 4350 !sb_any_quota_loaded(fc->root->d_sb)) { 4351 err = "Cannot enable quota on remount"; 4352 goto out; 4353 } 4354 4355 #ifdef CONFIG_TMPFS_QUOTA 4356 #define CHANGED_LIMIT(name) \ 4357 (ctx->qlimits.name## hardlimit && \ 4358 (ctx->qlimits.name## hardlimit != sbinfo->qlimits.name## hardlimit)) 4359 4360 if (CHANGED_LIMIT(usrquota_b) || CHANGED_LIMIT(usrquota_i) || 4361 CHANGED_LIMIT(grpquota_b) || CHANGED_LIMIT(grpquota_i)) { 4362 err = "Cannot change global quota limit on remount"; 4363 goto out; 4364 } 4365 #endif /* CONFIG_TMPFS_QUOTA */ 4366 4367 if (ctx->seen & SHMEM_SEEN_HUGE) 4368 sbinfo->huge = ctx->huge; 4369 if (ctx->seen & SHMEM_SEEN_INUMS) 4370 sbinfo->full_inums = ctx->full_inums; 4371 if (ctx->seen & SHMEM_SEEN_BLOCKS) 4372 sbinfo->max_blocks = ctx->blocks; 4373 if (ctx->seen & SHMEM_SEEN_INODES) { 4374 sbinfo->max_inodes = ctx->inodes; 4375 sbinfo->free_ispace = ctx->inodes * BOGO_INODE_SIZE - used_isp; 4376 } 4377 4378 /* 4379 * Preserve previous mempolicy unless mpol remount option was specified. 4380 */ 4381 if (ctx->mpol) { 4382 mpol = sbinfo->mpol; 4383 sbinfo->mpol = ctx->mpol; /* transfers initial ref */ 4384 ctx->mpol = NULL; 4385 } 4386 4387 if (ctx->noswap) 4388 sbinfo->noswap = true; 4389 4390 raw_spin_unlock(&sbinfo->stat_lock); 4391 mpol_put(mpol); 4392 return 0; 4393 out: 4394 raw_spin_unlock(&sbinfo->stat_lock); 4395 return invalfc(fc, "%s", err); 4396 } 4397 4398 static int shmem_show_options(struct seq_file *seq, struct dentry *root) 4399 { 4400 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); 4401 struct mempolicy *mpol; 4402 4403 if (sbinfo->max_blocks != shmem_default_max_blocks()) 4404 seq_printf(seq, ",size=%luk", K(sbinfo->max_blocks)); 4405 if (sbinfo->max_inodes != shmem_default_max_inodes()) 4406 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 4407 if (sbinfo->mode != (0777 | S_ISVTX)) 4408 seq_printf(seq, ",mode=%03ho", sbinfo->mode); 4409 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 4410 seq_printf(seq, ",uid=%u", 4411 from_kuid_munged(&init_user_ns, sbinfo->uid)); 4412 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 4413 seq_printf(seq, ",gid=%u", 4414 from_kgid_munged(&init_user_ns, sbinfo->gid)); 4415 4416 /* 4417 * Showing inode{64,32} might be useful even if it's the system default, 4418 * since then people don't have to resort to checking both here and 4419 * /proc/config.gz to confirm 64-bit inums were successfully applied 4420 * (which may not even exist if IKCONFIG_PROC isn't enabled). 4421 * 4422 * We hide it when inode64 isn't the default and we are using 32-bit 4423 * inodes, since that probably just means the feature isn't even under 4424 * consideration. 4425 * 4426 * As such: 4427 * 4428 * +-----------------+-----------------+ 4429 * | TMPFS_INODE64=y | TMPFS_INODE64=n | 4430 * +------------------+-----------------+-----------------+ 4431 * | full_inums=true | show | show | 4432 * | full_inums=false | show | hide | 4433 * +------------------+-----------------+-----------------+ 4434 * 4435 */ 4436 if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums) 4437 seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32)); 4438 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4439 /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */ 4440 if (sbinfo->huge) 4441 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); 4442 #endif 4443 mpol = shmem_get_sbmpol(sbinfo); 4444 shmem_show_mpol(seq, mpol); 4445 mpol_put(mpol); 4446 if (sbinfo->noswap) 4447 seq_printf(seq, ",noswap"); 4448 #ifdef CONFIG_TMPFS_QUOTA 4449 if (sb_has_quota_active(root->d_sb, USRQUOTA)) 4450 seq_printf(seq, ",usrquota"); 4451 if (sb_has_quota_active(root->d_sb, GRPQUOTA)) 4452 seq_printf(seq, ",grpquota"); 4453 if (sbinfo->qlimits.usrquota_bhardlimit) 4454 seq_printf(seq, ",usrquota_block_hardlimit=%lld", 4455 sbinfo->qlimits.usrquota_bhardlimit); 4456 if (sbinfo->qlimits.grpquota_bhardlimit) 4457 seq_printf(seq, ",grpquota_block_hardlimit=%lld", 4458 sbinfo->qlimits.grpquota_bhardlimit); 4459 if (sbinfo->qlimits.usrquota_ihardlimit) 4460 seq_printf(seq, ",usrquota_inode_hardlimit=%lld", 4461 sbinfo->qlimits.usrquota_ihardlimit); 4462 if (sbinfo->qlimits.grpquota_ihardlimit) 4463 seq_printf(seq, ",grpquota_inode_hardlimit=%lld", 4464 sbinfo->qlimits.grpquota_ihardlimit); 4465 #endif 4466 return 0; 4467 } 4468 4469 #endif /* CONFIG_TMPFS */ 4470 4471 static void shmem_put_super(struct super_block *sb) 4472 { 4473 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 4474 4475 #ifdef CONFIG_TMPFS_QUOTA 4476 shmem_disable_quotas(sb); 4477 #endif 4478 free_percpu(sbinfo->ino_batch); 4479 percpu_counter_destroy(&sbinfo->used_blocks); 4480 mpol_put(sbinfo->mpol); 4481 kfree(sbinfo); 4482 sb->s_fs_info = NULL; 4483 } 4484 4485 static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) 4486 { 4487 struct shmem_options *ctx = fc->fs_private; 4488 struct inode *inode; 4489 struct shmem_sb_info *sbinfo; 4490 int error = -ENOMEM; 4491 4492 /* Round up to L1_CACHE_BYTES to resist false sharing */ 4493 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 4494 L1_CACHE_BYTES), GFP_KERNEL); 4495 if (!sbinfo) 4496 return error; 4497 4498 sb->s_fs_info = sbinfo; 4499 4500 #ifdef CONFIG_TMPFS 4501 /* 4502 * Per default we only allow half of the physical ram per 4503 * tmpfs instance, limiting inodes to one per page of lowmem; 4504 * but the internal instance is left unlimited. 4505 */ 4506 if (!(sb->s_flags & SB_KERNMOUNT)) { 4507 if (!(ctx->seen & SHMEM_SEEN_BLOCKS)) 4508 ctx->blocks = shmem_default_max_blocks(); 4509 if (!(ctx->seen & SHMEM_SEEN_INODES)) 4510 ctx->inodes = shmem_default_max_inodes(); 4511 if (!(ctx->seen & SHMEM_SEEN_INUMS)) 4512 ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64); 4513 sbinfo->noswap = ctx->noswap; 4514 } else { 4515 sb->s_flags |= SB_NOUSER; 4516 } 4517 sb->s_export_op = &shmem_export_ops; 4518 sb->s_flags |= SB_NOSEC | SB_I_VERSION; 4519 #else 4520 sb->s_flags |= SB_NOUSER; 4521 #endif 4522 sbinfo->max_blocks = ctx->blocks; 4523 sbinfo->max_inodes = ctx->inodes; 4524 sbinfo->free_ispace = sbinfo->max_inodes * BOGO_INODE_SIZE; 4525 if (sb->s_flags & SB_KERNMOUNT) { 4526 sbinfo->ino_batch = alloc_percpu(ino_t); 4527 if (!sbinfo->ino_batch) 4528 goto failed; 4529 } 4530 sbinfo->uid = ctx->uid; 4531 sbinfo->gid = ctx->gid; 4532 sbinfo->full_inums = ctx->full_inums; 4533 sbinfo->mode = ctx->mode; 4534 sbinfo->huge = ctx->huge; 4535 sbinfo->mpol = ctx->mpol; 4536 ctx->mpol = NULL; 4537 4538 raw_spin_lock_init(&sbinfo->stat_lock); 4539 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) 4540 goto failed; 4541 spin_lock_init(&sbinfo->shrinklist_lock); 4542 INIT_LIST_HEAD(&sbinfo->shrinklist); 4543 4544 sb->s_maxbytes = MAX_LFS_FILESIZE; 4545 sb->s_blocksize = PAGE_SIZE; 4546 sb->s_blocksize_bits = PAGE_SHIFT; 4547 sb->s_magic = TMPFS_MAGIC; 4548 sb->s_op = &shmem_ops; 4549 sb->s_time_gran = 1; 4550 #ifdef CONFIG_TMPFS_XATTR 4551 sb->s_xattr = shmem_xattr_handlers; 4552 #endif 4553 #ifdef CONFIG_TMPFS_POSIX_ACL 4554 sb->s_flags |= SB_POSIXACL; 4555 #endif 4556 uuid_t uuid; 4557 uuid_gen(&uuid); 4558 super_set_uuid(sb, uuid.b, sizeof(uuid)); 4559 4560 #ifdef CONFIG_TMPFS_QUOTA 4561 if (ctx->seen & SHMEM_SEEN_QUOTA) { 4562 sb->dq_op = &shmem_quota_operations; 4563 sb->s_qcop = &dquot_quotactl_sysfile_ops; 4564 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP; 4565 4566 /* Copy the default limits from ctx into sbinfo */ 4567 memcpy(&sbinfo->qlimits, &ctx->qlimits, 4568 sizeof(struct shmem_quota_limits)); 4569 4570 if (shmem_enable_quotas(sb, ctx->quota_types)) 4571 goto failed; 4572 } 4573 #endif /* CONFIG_TMPFS_QUOTA */ 4574 4575 inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL, 4576 S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 4577 if (IS_ERR(inode)) { 4578 error = PTR_ERR(inode); 4579 goto failed; 4580 } 4581 inode->i_uid = sbinfo->uid; 4582 inode->i_gid = sbinfo->gid; 4583 sb->s_root = d_make_root(inode); 4584 if (!sb->s_root) 4585 goto failed; 4586 return 0; 4587 4588 failed: 4589 shmem_put_super(sb); 4590 return error; 4591 } 4592 4593 static int shmem_get_tree(struct fs_context *fc) 4594 { 4595 return get_tree_nodev(fc, shmem_fill_super); 4596 } 4597 4598 static void shmem_free_fc(struct fs_context *fc) 4599 { 4600 struct shmem_options *ctx = fc->fs_private; 4601 4602 if (ctx) { 4603 mpol_put(ctx->mpol); 4604 kfree(ctx); 4605 } 4606 } 4607 4608 static const struct fs_context_operations shmem_fs_context_ops = { 4609 .free = shmem_free_fc, 4610 .get_tree = shmem_get_tree, 4611 #ifdef CONFIG_TMPFS 4612 .parse_monolithic = shmem_parse_options, 4613 .parse_param = shmem_parse_one, 4614 .reconfigure = shmem_reconfigure, 4615 #endif 4616 }; 4617 4618 static struct kmem_cache *shmem_inode_cachep __ro_after_init; 4619 4620 static struct inode *shmem_alloc_inode(struct super_block *sb) 4621 { 4622 struct shmem_inode_info *info; 4623 info = alloc_inode_sb(sb, shmem_inode_cachep, GFP_KERNEL); 4624 if (!info) 4625 return NULL; 4626 return &info->vfs_inode; 4627 } 4628 4629 static void shmem_free_in_core_inode(struct inode *inode) 4630 { 4631 if (S_ISLNK(inode->i_mode)) 4632 kfree(inode->i_link); 4633 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 4634 } 4635 4636 static void shmem_destroy_inode(struct inode *inode) 4637 { 4638 if (S_ISREG(inode->i_mode)) 4639 mpol_free_shared_policy(&SHMEM_I(inode)->policy); 4640 if (S_ISDIR(inode->i_mode)) 4641 simple_offset_destroy(shmem_get_offset_ctx(inode)); 4642 } 4643 4644 static void shmem_init_inode(void *foo) 4645 { 4646 struct shmem_inode_info *info = foo; 4647 inode_init_once(&info->vfs_inode); 4648 } 4649 4650 static void __init shmem_init_inodecache(void) 4651 { 4652 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 4653 sizeof(struct shmem_inode_info), 4654 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode); 4655 } 4656 4657 static void __init shmem_destroy_inodecache(void) 4658 { 4659 kmem_cache_destroy(shmem_inode_cachep); 4660 } 4661 4662 /* Keep the page in page cache instead of truncating it */ 4663 static int shmem_error_remove_folio(struct address_space *mapping, 4664 struct folio *folio) 4665 { 4666 return 0; 4667 } 4668 4669 static const struct address_space_operations shmem_aops = { 4670 .writepage = shmem_writepage, 4671 .dirty_folio = noop_dirty_folio, 4672 #ifdef CONFIG_TMPFS 4673 .write_begin = shmem_write_begin, 4674 .write_end = shmem_write_end, 4675 #endif 4676 #ifdef CONFIG_MIGRATION 4677 .migrate_folio = migrate_folio, 4678 #endif 4679 .error_remove_folio = shmem_error_remove_folio, 4680 }; 4681 4682 static const struct file_operations shmem_file_operations = { 4683 .mmap = shmem_mmap, 4684 .open = shmem_file_open, 4685 .get_unmapped_area = shmem_get_unmapped_area, 4686 #ifdef CONFIG_TMPFS 4687 .llseek = shmem_file_llseek, 4688 .read_iter = shmem_file_read_iter, 4689 .write_iter = shmem_file_write_iter, 4690 .fsync = noop_fsync, 4691 .splice_read = shmem_file_splice_read, 4692 .splice_write = iter_file_splice_write, 4693 .fallocate = shmem_fallocate, 4694 #endif 4695 }; 4696 4697 static const struct inode_operations shmem_inode_operations = { 4698 .getattr = shmem_getattr, 4699 .setattr = shmem_setattr, 4700 #ifdef CONFIG_TMPFS_XATTR 4701 .listxattr = shmem_listxattr, 4702 .set_acl = simple_set_acl, 4703 .fileattr_get = shmem_fileattr_get, 4704 .fileattr_set = shmem_fileattr_set, 4705 #endif 4706 }; 4707 4708 static const struct inode_operations shmem_dir_inode_operations = { 4709 #ifdef CONFIG_TMPFS 4710 .getattr = shmem_getattr, 4711 .create = shmem_create, 4712 .lookup = simple_lookup, 4713 .link = shmem_link, 4714 .unlink = shmem_unlink, 4715 .symlink = shmem_symlink, 4716 .mkdir = shmem_mkdir, 4717 .rmdir = shmem_rmdir, 4718 .mknod = shmem_mknod, 4719 .rename = shmem_rename2, 4720 .tmpfile = shmem_tmpfile, 4721 .get_offset_ctx = shmem_get_offset_ctx, 4722 #endif 4723 #ifdef CONFIG_TMPFS_XATTR 4724 .listxattr = shmem_listxattr, 4725 .fileattr_get = shmem_fileattr_get, 4726 .fileattr_set = shmem_fileattr_set, 4727 #endif 4728 #ifdef CONFIG_TMPFS_POSIX_ACL 4729 .setattr = shmem_setattr, 4730 .set_acl = simple_set_acl, 4731 #endif 4732 }; 4733 4734 static const struct inode_operations shmem_special_inode_operations = { 4735 .getattr = shmem_getattr, 4736 #ifdef CONFIG_TMPFS_XATTR 4737 .listxattr = shmem_listxattr, 4738 #endif 4739 #ifdef CONFIG_TMPFS_POSIX_ACL 4740 .setattr = shmem_setattr, 4741 .set_acl = simple_set_acl, 4742 #endif 4743 }; 4744 4745 static const struct super_operations shmem_ops = { 4746 .alloc_inode = shmem_alloc_inode, 4747 .free_inode = shmem_free_in_core_inode, 4748 .destroy_inode = shmem_destroy_inode, 4749 #ifdef CONFIG_TMPFS 4750 .statfs = shmem_statfs, 4751 .show_options = shmem_show_options, 4752 #endif 4753 #ifdef CONFIG_TMPFS_QUOTA 4754 .get_dquots = shmem_get_dquots, 4755 #endif 4756 .evict_inode = shmem_evict_inode, 4757 .drop_inode = generic_delete_inode, 4758 .put_super = shmem_put_super, 4759 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4760 .nr_cached_objects = shmem_unused_huge_count, 4761 .free_cached_objects = shmem_unused_huge_scan, 4762 #endif 4763 }; 4764 4765 static const struct vm_operations_struct shmem_vm_ops = { 4766 .fault = shmem_fault, 4767 .map_pages = filemap_map_pages, 4768 #ifdef CONFIG_NUMA 4769 .set_policy = shmem_set_policy, 4770 .get_policy = shmem_get_policy, 4771 #endif 4772 }; 4773 4774 static const struct vm_operations_struct shmem_anon_vm_ops = { 4775 .fault = shmem_fault, 4776 .map_pages = filemap_map_pages, 4777 #ifdef CONFIG_NUMA 4778 .set_policy = shmem_set_policy, 4779 .get_policy = shmem_get_policy, 4780 #endif 4781 }; 4782 4783 int shmem_init_fs_context(struct fs_context *fc) 4784 { 4785 struct shmem_options *ctx; 4786 4787 ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL); 4788 if (!ctx) 4789 return -ENOMEM; 4790 4791 ctx->mode = 0777 | S_ISVTX; 4792 ctx->uid = current_fsuid(); 4793 ctx->gid = current_fsgid(); 4794 4795 fc->fs_private = ctx; 4796 fc->ops = &shmem_fs_context_ops; 4797 return 0; 4798 } 4799 4800 static struct file_system_type shmem_fs_type = { 4801 .owner = THIS_MODULE, 4802 .name = "tmpfs", 4803 .init_fs_context = shmem_init_fs_context, 4804 #ifdef CONFIG_TMPFS 4805 .parameters = shmem_fs_parameters, 4806 #endif 4807 .kill_sb = kill_litter_super, 4808 .fs_flags = FS_USERNS_MOUNT | FS_ALLOW_IDMAP, 4809 }; 4810 4811 void __init shmem_init(void) 4812 { 4813 int error; 4814 4815 shmem_init_inodecache(); 4816 4817 #ifdef CONFIG_TMPFS_QUOTA 4818 error = register_quota_format(&shmem_quota_format); 4819 if (error < 0) { 4820 pr_err("Could not register quota format\n"); 4821 goto out3; 4822 } 4823 #endif 4824 4825 error = register_filesystem(&shmem_fs_type); 4826 if (error) { 4827 pr_err("Could not register tmpfs\n"); 4828 goto out2; 4829 } 4830 4831 shm_mnt = kern_mount(&shmem_fs_type); 4832 if (IS_ERR(shm_mnt)) { 4833 error = PTR_ERR(shm_mnt); 4834 pr_err("Could not kern_mount tmpfs\n"); 4835 goto out1; 4836 } 4837 4838 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4839 if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY) 4840 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 4841 else 4842 shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */ 4843 4844 /* 4845 * Default to setting PMD-sized THP to inherit the global setting and 4846 * disable all other multi-size THPs. 4847 */ 4848 huge_shmem_orders_inherit = BIT(HPAGE_PMD_ORDER); 4849 #endif 4850 return; 4851 4852 out1: 4853 unregister_filesystem(&shmem_fs_type); 4854 out2: 4855 #ifdef CONFIG_TMPFS_QUOTA 4856 unregister_quota_format(&shmem_quota_format); 4857 out3: 4858 #endif 4859 shmem_destroy_inodecache(); 4860 shm_mnt = ERR_PTR(error); 4861 } 4862 4863 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS) 4864 static ssize_t shmem_enabled_show(struct kobject *kobj, 4865 struct kobj_attribute *attr, char *buf) 4866 { 4867 static const int values[] = { 4868 SHMEM_HUGE_ALWAYS, 4869 SHMEM_HUGE_WITHIN_SIZE, 4870 SHMEM_HUGE_ADVISE, 4871 SHMEM_HUGE_NEVER, 4872 SHMEM_HUGE_DENY, 4873 SHMEM_HUGE_FORCE, 4874 }; 4875 int len = 0; 4876 int i; 4877 4878 for (i = 0; i < ARRAY_SIZE(values); i++) { 4879 len += sysfs_emit_at(buf, len, 4880 shmem_huge == values[i] ? "%s[%s]" : "%s%s", 4881 i ? " " : "", shmem_format_huge(values[i])); 4882 } 4883 len += sysfs_emit_at(buf, len, "\n"); 4884 4885 return len; 4886 } 4887 4888 static ssize_t shmem_enabled_store(struct kobject *kobj, 4889 struct kobj_attribute *attr, const char *buf, size_t count) 4890 { 4891 char tmp[16]; 4892 int huge; 4893 4894 if (count + 1 > sizeof(tmp)) 4895 return -EINVAL; 4896 memcpy(tmp, buf, count); 4897 tmp[count] = '\0'; 4898 if (count && tmp[count - 1] == '\n') 4899 tmp[count - 1] = '\0'; 4900 4901 huge = shmem_parse_huge(tmp); 4902 if (huge == -EINVAL) 4903 return -EINVAL; 4904 if (!has_transparent_hugepage() && 4905 huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY) 4906 return -EINVAL; 4907 4908 /* Do not override huge allocation policy with non-PMD sized mTHP */ 4909 if (huge == SHMEM_HUGE_FORCE && 4910 huge_shmem_orders_inherit != BIT(HPAGE_PMD_ORDER)) 4911 return -EINVAL; 4912 4913 shmem_huge = huge; 4914 if (shmem_huge > SHMEM_HUGE_DENY) 4915 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 4916 return count; 4917 } 4918 4919 struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled); 4920 static DEFINE_SPINLOCK(huge_shmem_orders_lock); 4921 4922 static ssize_t thpsize_shmem_enabled_show(struct kobject *kobj, 4923 struct kobj_attribute *attr, char *buf) 4924 { 4925 int order = to_thpsize(kobj)->order; 4926 const char *output; 4927 4928 if (test_bit(order, &huge_shmem_orders_always)) 4929 output = "[always] inherit within_size advise never"; 4930 else if (test_bit(order, &huge_shmem_orders_inherit)) 4931 output = "always [inherit] within_size advise never"; 4932 else if (test_bit(order, &huge_shmem_orders_within_size)) 4933 output = "always inherit [within_size] advise never"; 4934 else if (test_bit(order, &huge_shmem_orders_madvise)) 4935 output = "always inherit within_size [advise] never"; 4936 else 4937 output = "always inherit within_size advise [never]"; 4938 4939 return sysfs_emit(buf, "%s\n", output); 4940 } 4941 4942 static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj, 4943 struct kobj_attribute *attr, 4944 const char *buf, size_t count) 4945 { 4946 int order = to_thpsize(kobj)->order; 4947 ssize_t ret = count; 4948 4949 if (sysfs_streq(buf, "always")) { 4950 spin_lock(&huge_shmem_orders_lock); 4951 clear_bit(order, &huge_shmem_orders_inherit); 4952 clear_bit(order, &huge_shmem_orders_madvise); 4953 clear_bit(order, &huge_shmem_orders_within_size); 4954 set_bit(order, &huge_shmem_orders_always); 4955 spin_unlock(&huge_shmem_orders_lock); 4956 } else if (sysfs_streq(buf, "inherit")) { 4957 /* Do not override huge allocation policy with non-PMD sized mTHP */ 4958 if (shmem_huge == SHMEM_HUGE_FORCE && 4959 order != HPAGE_PMD_ORDER) 4960 return -EINVAL; 4961 4962 spin_lock(&huge_shmem_orders_lock); 4963 clear_bit(order, &huge_shmem_orders_always); 4964 clear_bit(order, &huge_shmem_orders_madvise); 4965 clear_bit(order, &huge_shmem_orders_within_size); 4966 set_bit(order, &huge_shmem_orders_inherit); 4967 spin_unlock(&huge_shmem_orders_lock); 4968 } else if (sysfs_streq(buf, "within_size")) { 4969 spin_lock(&huge_shmem_orders_lock); 4970 clear_bit(order, &huge_shmem_orders_always); 4971 clear_bit(order, &huge_shmem_orders_inherit); 4972 clear_bit(order, &huge_shmem_orders_madvise); 4973 set_bit(order, &huge_shmem_orders_within_size); 4974 spin_unlock(&huge_shmem_orders_lock); 4975 } else if (sysfs_streq(buf, "madvise")) { 4976 spin_lock(&huge_shmem_orders_lock); 4977 clear_bit(order, &huge_shmem_orders_always); 4978 clear_bit(order, &huge_shmem_orders_inherit); 4979 clear_bit(order, &huge_shmem_orders_within_size); 4980 set_bit(order, &huge_shmem_orders_madvise); 4981 spin_unlock(&huge_shmem_orders_lock); 4982 } else if (sysfs_streq(buf, "never")) { 4983 spin_lock(&huge_shmem_orders_lock); 4984 clear_bit(order, &huge_shmem_orders_always); 4985 clear_bit(order, &huge_shmem_orders_inherit); 4986 clear_bit(order, &huge_shmem_orders_within_size); 4987 clear_bit(order, &huge_shmem_orders_madvise); 4988 spin_unlock(&huge_shmem_orders_lock); 4989 } else { 4990 ret = -EINVAL; 4991 } 4992 4993 return ret; 4994 } 4995 4996 struct kobj_attribute thpsize_shmem_enabled_attr = 4997 __ATTR(shmem_enabled, 0644, thpsize_shmem_enabled_show, thpsize_shmem_enabled_store); 4998 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */ 4999 5000 #else /* !CONFIG_SHMEM */ 5001 5002 /* 5003 * tiny-shmem: simple shmemfs and tmpfs using ramfs code 5004 * 5005 * This is intended for small system where the benefits of the full 5006 * shmem code (swap-backed and resource-limited) are outweighed by 5007 * their complexity. On systems without swap this code should be 5008 * effectively equivalent, but much lighter weight. 5009 */ 5010 5011 static struct file_system_type shmem_fs_type = { 5012 .name = "tmpfs", 5013 .init_fs_context = ramfs_init_fs_context, 5014 .parameters = ramfs_fs_parameters, 5015 .kill_sb = ramfs_kill_sb, 5016 .fs_flags = FS_USERNS_MOUNT, 5017 }; 5018 5019 void __init shmem_init(void) 5020 { 5021 BUG_ON(register_filesystem(&shmem_fs_type) != 0); 5022 5023 shm_mnt = kern_mount(&shmem_fs_type); 5024 BUG_ON(IS_ERR(shm_mnt)); 5025 } 5026 5027 int shmem_unuse(unsigned int type) 5028 { 5029 return 0; 5030 } 5031 5032 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts) 5033 { 5034 return 0; 5035 } 5036 5037 void shmem_unlock_mapping(struct address_space *mapping) 5038 { 5039 } 5040 5041 #ifdef CONFIG_MMU 5042 unsigned long shmem_get_unmapped_area(struct file *file, 5043 unsigned long addr, unsigned long len, 5044 unsigned long pgoff, unsigned long flags) 5045 { 5046 return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags); 5047 } 5048 #endif 5049 5050 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 5051 { 5052 truncate_inode_pages_range(inode->i_mapping, lstart, lend); 5053 } 5054 EXPORT_SYMBOL_GPL(shmem_truncate_range); 5055 5056 #define shmem_vm_ops generic_file_vm_ops 5057 #define shmem_anon_vm_ops generic_file_vm_ops 5058 #define shmem_file_operations ramfs_file_operations 5059 #define shmem_acct_size(flags, size) 0 5060 #define shmem_unacct_size(flags, size) do {} while (0) 5061 5062 static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, 5063 struct super_block *sb, struct inode *dir, 5064 umode_t mode, dev_t dev, unsigned long flags) 5065 { 5066 struct inode *inode = ramfs_get_inode(sb, dir, mode, dev); 5067 return inode ? inode : ERR_PTR(-ENOSPC); 5068 } 5069 5070 #endif /* CONFIG_SHMEM */ 5071 5072 /* common code */ 5073 5074 static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, 5075 loff_t size, unsigned long flags, unsigned int i_flags) 5076 { 5077 struct inode *inode; 5078 struct file *res; 5079 5080 if (IS_ERR(mnt)) 5081 return ERR_CAST(mnt); 5082 5083 if (size < 0 || size > MAX_LFS_FILESIZE) 5084 return ERR_PTR(-EINVAL); 5085 5086 if (shmem_acct_size(flags, size)) 5087 return ERR_PTR(-ENOMEM); 5088 5089 if (is_idmapped_mnt(mnt)) 5090 return ERR_PTR(-EINVAL); 5091 5092 inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL, 5093 S_IFREG | S_IRWXUGO, 0, flags); 5094 if (IS_ERR(inode)) { 5095 shmem_unacct_size(flags, size); 5096 return ERR_CAST(inode); 5097 } 5098 inode->i_flags |= i_flags; 5099 inode->i_size = size; 5100 clear_nlink(inode); /* It is unlinked */ 5101 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); 5102 if (!IS_ERR(res)) 5103 res = alloc_file_pseudo(inode, mnt, name, O_RDWR, 5104 &shmem_file_operations); 5105 if (IS_ERR(res)) 5106 iput(inode); 5107 return res; 5108 } 5109 5110 /** 5111 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be 5112 * kernel internal. There will be NO LSM permission checks against the 5113 * underlying inode. So users of this interface must do LSM checks at a 5114 * higher layer. The users are the big_key and shm implementations. LSM 5115 * checks are provided at the key or shm level rather than the inode. 5116 * @name: name for dentry (to be seen in /proc/<pid>/maps 5117 * @size: size to be set for the file 5118 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 5119 */ 5120 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags) 5121 { 5122 return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE); 5123 } 5124 EXPORT_SYMBOL_GPL(shmem_kernel_file_setup); 5125 5126 /** 5127 * shmem_file_setup - get an unlinked file living in tmpfs 5128 * @name: name for dentry (to be seen in /proc/<pid>/maps 5129 * @size: size to be set for the file 5130 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 5131 */ 5132 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 5133 { 5134 return __shmem_file_setup(shm_mnt, name, size, flags, 0); 5135 } 5136 EXPORT_SYMBOL_GPL(shmem_file_setup); 5137 5138 /** 5139 * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs 5140 * @mnt: the tmpfs mount where the file will be created 5141 * @name: name for dentry (to be seen in /proc/<pid>/maps 5142 * @size: size to be set for the file 5143 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 5144 */ 5145 struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name, 5146 loff_t size, unsigned long flags) 5147 { 5148 return __shmem_file_setup(mnt, name, size, flags, 0); 5149 } 5150 EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt); 5151 5152 /** 5153 * shmem_zero_setup - setup a shared anonymous mapping 5154 * @vma: the vma to be mmapped is prepared by do_mmap 5155 */ 5156 int shmem_zero_setup(struct vm_area_struct *vma) 5157 { 5158 struct file *file; 5159 loff_t size = vma->vm_end - vma->vm_start; 5160 5161 /* 5162 * Cloning a new file under mmap_lock leads to a lock ordering conflict 5163 * between XFS directory reading and selinux: since this file is only 5164 * accessible to the user through its mapping, use S_PRIVATE flag to 5165 * bypass file security, in the same way as shmem_kernel_file_setup(). 5166 */ 5167 file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags); 5168 if (IS_ERR(file)) 5169 return PTR_ERR(file); 5170 5171 if (vma->vm_file) 5172 fput(vma->vm_file); 5173 vma->vm_file = file; 5174 vma->vm_ops = &shmem_anon_vm_ops; 5175 5176 return 0; 5177 } 5178 5179 /** 5180 * shmem_read_folio_gfp - read into page cache, using specified page allocation flags. 5181 * @mapping: the folio's address_space 5182 * @index: the folio index 5183 * @gfp: the page allocator flags to use if allocating 5184 * 5185 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 5186 * with any new page allocations done using the specified allocation flags. 5187 * But read_cache_page_gfp() uses the ->read_folio() method: which does not 5188 * suit tmpfs, since it may have pages in swapcache, and needs to find those 5189 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 5190 * 5191 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 5192 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 5193 */ 5194 struct folio *shmem_read_folio_gfp(struct address_space *mapping, 5195 pgoff_t index, gfp_t gfp) 5196 { 5197 #ifdef CONFIG_SHMEM 5198 struct inode *inode = mapping->host; 5199 struct folio *folio; 5200 int error; 5201 5202 error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE, 5203 gfp, NULL, NULL); 5204 if (error) 5205 return ERR_PTR(error); 5206 5207 folio_unlock(folio); 5208 return folio; 5209 #else 5210 /* 5211 * The tiny !SHMEM case uses ramfs without swap 5212 */ 5213 return mapping_read_folio_gfp(mapping, index, gfp); 5214 #endif 5215 } 5216 EXPORT_SYMBOL_GPL(shmem_read_folio_gfp); 5217 5218 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 5219 pgoff_t index, gfp_t gfp) 5220 { 5221 struct folio *folio = shmem_read_folio_gfp(mapping, index, gfp); 5222 struct page *page; 5223 5224 if (IS_ERR(folio)) 5225 return &folio->page; 5226 5227 page = folio_file_page(folio, index); 5228 if (PageHWPoison(page)) { 5229 folio_put(folio); 5230 return ERR_PTR(-EIO); 5231 } 5232 5233 return page; 5234 } 5235 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 5236