1 /* 2 * Resizable virtual memory filesystem for Linux. 3 * 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. 9 * Copyright (C) 2002-2011 Hugh Dickins. 10 * Copyright (C) 2011 Google Inc. 11 * Copyright (C) 2002-2005 VERITAS Software Corporation. 12 * Copyright (C) 2004 Andi Kleen, SuSE Labs 13 * 14 * Extended attribute support for tmpfs: 15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 17 * 18 * tiny-shmem: 19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20 * 21 * This file is released under the GPL. 22 */ 23 24 #include <linux/fs.h> 25 #include <linux/init.h> 26 #include <linux/vfs.h> 27 #include <linux/mount.h> 28 #include <linux/ramfs.h> 29 #include <linux/pagemap.h> 30 #include <linux/file.h> 31 #include <linux/mm.h> 32 #include <linux/export.h> 33 #include <linux/swap.h> 34 #include <linux/aio.h> 35 36 static struct vfsmount *shm_mnt; 37 38 #ifdef CONFIG_SHMEM 39 /* 40 * This virtual memory filesystem is heavily based on the ramfs. It 41 * extends ramfs by the ability to use swap and honor resource limits 42 * which makes it a completely usable filesystem. 43 */ 44 45 #include <linux/xattr.h> 46 #include <linux/exportfs.h> 47 #include <linux/posix_acl.h> 48 #include <linux/posix_acl_xattr.h> 49 #include <linux/mman.h> 50 #include <linux/string.h> 51 #include <linux/slab.h> 52 #include <linux/backing-dev.h> 53 #include <linux/shmem_fs.h> 54 #include <linux/writeback.h> 55 #include <linux/blkdev.h> 56 #include <linux/pagevec.h> 57 #include <linux/percpu_counter.h> 58 #include <linux/falloc.h> 59 #include <linux/splice.h> 60 #include <linux/security.h> 61 #include <linux/swapops.h> 62 #include <linux/mempolicy.h> 63 #include <linux/namei.h> 64 #include <linux/ctype.h> 65 #include <linux/migrate.h> 66 #include <linux/highmem.h> 67 #include <linux/seq_file.h> 68 #include <linux/magic.h> 69 70 #include <asm/uaccess.h> 71 #include <asm/pgtable.h> 72 73 #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) 74 #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) 75 76 /* Pretend that each entry is of this size in directory's i_size */ 77 #define BOGO_DIRENT_SIZE 20 78 79 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 80 #define SHORT_SYMLINK_LEN 128 81 82 /* 83 * shmem_fallocate communicates with shmem_fault or shmem_writepage via 84 * inode->i_private (with i_mutex making sure that it has only one user at 85 * a time): we would prefer not to enlarge the shmem inode just for that. 86 */ 87 struct shmem_falloc { 88 int mode; /* FALLOC_FL mode currently operating */ 89 pgoff_t start; /* start of range currently being fallocated */ 90 pgoff_t next; /* the next page offset to be fallocated */ 91 pgoff_t nr_falloced; /* how many new pages have been fallocated */ 92 pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 93 }; 94 95 /* Flag allocation requirements to shmem_getpage */ 96 enum sgp_type { 97 SGP_READ, /* don't exceed i_size, don't allocate page */ 98 SGP_CACHE, /* don't exceed i_size, may allocate page */ 99 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */ 100 SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ 101 SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ 102 }; 103 104 #ifdef CONFIG_TMPFS 105 static unsigned long shmem_default_max_blocks(void) 106 { 107 return totalram_pages / 2; 108 } 109 110 static unsigned long shmem_default_max_inodes(void) 111 { 112 return min(totalram_pages - totalhigh_pages, totalram_pages / 2); 113 } 114 #endif 115 116 static bool shmem_should_replace_page(struct page *page, gfp_t gfp); 117 static int shmem_replace_page(struct page **pagep, gfp_t gfp, 118 struct shmem_inode_info *info, pgoff_t index); 119 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 120 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type); 121 122 static inline int shmem_getpage(struct inode *inode, pgoff_t index, 123 struct page **pagep, enum sgp_type sgp, int *fault_type) 124 { 125 return shmem_getpage_gfp(inode, index, pagep, sgp, 126 mapping_gfp_mask(inode->i_mapping), fault_type); 127 } 128 129 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 130 { 131 return sb->s_fs_info; 132 } 133 134 /* 135 * shmem_file_setup pre-accounts the whole fixed size of a VM object, 136 * for shared memory and for shared anonymous (/dev/zero) mappings 137 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 138 * consistent with the pre-accounting of private mappings ... 139 */ 140 static inline int shmem_acct_size(unsigned long flags, loff_t size) 141 { 142 return (flags & VM_NORESERVE) ? 143 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); 144 } 145 146 static inline void shmem_unacct_size(unsigned long flags, loff_t size) 147 { 148 if (!(flags & VM_NORESERVE)) 149 vm_unacct_memory(VM_ACCT(size)); 150 } 151 152 /* 153 * ... whereas tmpfs objects are accounted incrementally as 154 * pages are allocated, in order to allow huge sparse files. 155 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 156 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 157 */ 158 static inline int shmem_acct_block(unsigned long flags) 159 { 160 return (flags & VM_NORESERVE) ? 161 security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0; 162 } 163 164 static inline void shmem_unacct_blocks(unsigned long flags, long pages) 165 { 166 if (flags & VM_NORESERVE) 167 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); 168 } 169 170 static const struct super_operations shmem_ops; 171 static const struct address_space_operations shmem_aops; 172 static const struct file_operations shmem_file_operations; 173 static const struct inode_operations shmem_inode_operations; 174 static const struct inode_operations shmem_dir_inode_operations; 175 static const struct inode_operations shmem_special_inode_operations; 176 static const struct vm_operations_struct shmem_vm_ops; 177 178 static struct backing_dev_info shmem_backing_dev_info __read_mostly = { 179 .ra_pages = 0, /* No readahead */ 180 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, 181 }; 182 183 static LIST_HEAD(shmem_swaplist); 184 static DEFINE_MUTEX(shmem_swaplist_mutex); 185 186 static int shmem_reserve_inode(struct super_block *sb) 187 { 188 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 189 if (sbinfo->max_inodes) { 190 spin_lock(&sbinfo->stat_lock); 191 if (!sbinfo->free_inodes) { 192 spin_unlock(&sbinfo->stat_lock); 193 return -ENOSPC; 194 } 195 sbinfo->free_inodes--; 196 spin_unlock(&sbinfo->stat_lock); 197 } 198 return 0; 199 } 200 201 static void shmem_free_inode(struct super_block *sb) 202 { 203 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 204 if (sbinfo->max_inodes) { 205 spin_lock(&sbinfo->stat_lock); 206 sbinfo->free_inodes++; 207 spin_unlock(&sbinfo->stat_lock); 208 } 209 } 210 211 /** 212 * shmem_recalc_inode - recalculate the block usage of an inode 213 * @inode: inode to recalc 214 * 215 * We have to calculate the free blocks since the mm can drop 216 * undirtied hole pages behind our back. 217 * 218 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 219 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 220 * 221 * It has to be called with the spinlock held. 222 */ 223 static void shmem_recalc_inode(struct inode *inode) 224 { 225 struct shmem_inode_info *info = SHMEM_I(inode); 226 long freed; 227 228 freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 229 if (freed > 0) { 230 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 231 if (sbinfo->max_blocks) 232 percpu_counter_add(&sbinfo->used_blocks, -freed); 233 info->alloced -= freed; 234 inode->i_blocks -= freed * BLOCKS_PER_PAGE; 235 shmem_unacct_blocks(info->flags, freed); 236 } 237 } 238 239 /* 240 * Replace item expected in radix tree by a new item, while holding tree lock. 241 */ 242 static int shmem_radix_tree_replace(struct address_space *mapping, 243 pgoff_t index, void *expected, void *replacement) 244 { 245 void **pslot; 246 void *item; 247 248 VM_BUG_ON(!expected); 249 VM_BUG_ON(!replacement); 250 pslot = radix_tree_lookup_slot(&mapping->page_tree, index); 251 if (!pslot) 252 return -ENOENT; 253 item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock); 254 if (item != expected) 255 return -ENOENT; 256 radix_tree_replace_slot(pslot, replacement); 257 return 0; 258 } 259 260 /* 261 * Sometimes, before we decide whether to proceed or to fail, we must check 262 * that an entry was not already brought back from swap by a racing thread. 263 * 264 * Checking page is not enough: by the time a SwapCache page is locked, it 265 * might be reused, and again be SwapCache, using the same swap as before. 266 */ 267 static bool shmem_confirm_swap(struct address_space *mapping, 268 pgoff_t index, swp_entry_t swap) 269 { 270 void *item; 271 272 rcu_read_lock(); 273 item = radix_tree_lookup(&mapping->page_tree, index); 274 rcu_read_unlock(); 275 return item == swp_to_radix_entry(swap); 276 } 277 278 /* 279 * Like add_to_page_cache_locked, but error if expected item has gone. 280 */ 281 static int shmem_add_to_page_cache(struct page *page, 282 struct address_space *mapping, 283 pgoff_t index, gfp_t gfp, void *expected) 284 { 285 int error; 286 287 VM_BUG_ON_PAGE(!PageLocked(page), page); 288 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 289 290 page_cache_get(page); 291 page->mapping = mapping; 292 page->index = index; 293 294 spin_lock_irq(&mapping->tree_lock); 295 if (!expected) 296 error = radix_tree_insert(&mapping->page_tree, index, page); 297 else 298 error = shmem_radix_tree_replace(mapping, index, expected, 299 page); 300 if (!error) { 301 mapping->nrpages++; 302 __inc_zone_page_state(page, NR_FILE_PAGES); 303 __inc_zone_page_state(page, NR_SHMEM); 304 spin_unlock_irq(&mapping->tree_lock); 305 } else { 306 page->mapping = NULL; 307 spin_unlock_irq(&mapping->tree_lock); 308 page_cache_release(page); 309 } 310 return error; 311 } 312 313 /* 314 * Like delete_from_page_cache, but substitutes swap for page. 315 */ 316 static void shmem_delete_from_page_cache(struct page *page, void *radswap) 317 { 318 struct address_space *mapping = page->mapping; 319 int error; 320 321 spin_lock_irq(&mapping->tree_lock); 322 error = shmem_radix_tree_replace(mapping, page->index, page, radswap); 323 page->mapping = NULL; 324 mapping->nrpages--; 325 __dec_zone_page_state(page, NR_FILE_PAGES); 326 __dec_zone_page_state(page, NR_SHMEM); 327 spin_unlock_irq(&mapping->tree_lock); 328 page_cache_release(page); 329 BUG_ON(error); 330 } 331 332 /* 333 * Remove swap entry from radix tree, free the swap and its page cache. 334 */ 335 static int shmem_free_swap(struct address_space *mapping, 336 pgoff_t index, void *radswap) 337 { 338 void *old; 339 340 spin_lock_irq(&mapping->tree_lock); 341 old = radix_tree_delete_item(&mapping->page_tree, index, radswap); 342 spin_unlock_irq(&mapping->tree_lock); 343 if (old != radswap) 344 return -ENOENT; 345 free_swap_and_cache(radix_to_swp_entry(radswap)); 346 return 0; 347 } 348 349 /* 350 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 351 */ 352 void shmem_unlock_mapping(struct address_space *mapping) 353 { 354 struct pagevec pvec; 355 pgoff_t indices[PAGEVEC_SIZE]; 356 pgoff_t index = 0; 357 358 pagevec_init(&pvec, 0); 359 /* 360 * Minor point, but we might as well stop if someone else SHM_LOCKs it. 361 */ 362 while (!mapping_unevictable(mapping)) { 363 /* 364 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it 365 * has finished, if it hits a row of PAGEVEC_SIZE swap entries. 366 */ 367 pvec.nr = find_get_entries(mapping, index, 368 PAGEVEC_SIZE, pvec.pages, indices); 369 if (!pvec.nr) 370 break; 371 index = indices[pvec.nr - 1] + 1; 372 pagevec_remove_exceptionals(&pvec); 373 check_move_unevictable_pages(pvec.pages, pvec.nr); 374 pagevec_release(&pvec); 375 cond_resched(); 376 } 377 } 378 379 /* 380 * Remove range of pages and swap entries from radix tree, and free them. 381 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. 382 */ 383 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, 384 bool unfalloc) 385 { 386 struct address_space *mapping = inode->i_mapping; 387 struct shmem_inode_info *info = SHMEM_I(inode); 388 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 389 pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT; 390 unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1); 391 unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1); 392 struct pagevec pvec; 393 pgoff_t indices[PAGEVEC_SIZE]; 394 long nr_swaps_freed = 0; 395 pgoff_t index; 396 int i; 397 398 if (lend == -1) 399 end = -1; /* unsigned, so actually very big */ 400 401 pagevec_init(&pvec, 0); 402 index = start; 403 while (index < end) { 404 pvec.nr = find_get_entries(mapping, index, 405 min(end - index, (pgoff_t)PAGEVEC_SIZE), 406 pvec.pages, indices); 407 if (!pvec.nr) 408 break; 409 mem_cgroup_uncharge_start(); 410 for (i = 0; i < pagevec_count(&pvec); i++) { 411 struct page *page = pvec.pages[i]; 412 413 index = indices[i]; 414 if (index >= end) 415 break; 416 417 if (radix_tree_exceptional_entry(page)) { 418 if (unfalloc) 419 continue; 420 nr_swaps_freed += !shmem_free_swap(mapping, 421 index, page); 422 continue; 423 } 424 425 if (!trylock_page(page)) 426 continue; 427 if (!unfalloc || !PageUptodate(page)) { 428 if (page->mapping == mapping) { 429 VM_BUG_ON_PAGE(PageWriteback(page), page); 430 truncate_inode_page(mapping, page); 431 } 432 } 433 unlock_page(page); 434 } 435 pagevec_remove_exceptionals(&pvec); 436 pagevec_release(&pvec); 437 mem_cgroup_uncharge_end(); 438 cond_resched(); 439 index++; 440 } 441 442 if (partial_start) { 443 struct page *page = NULL; 444 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL); 445 if (page) { 446 unsigned int top = PAGE_CACHE_SIZE; 447 if (start > end) { 448 top = partial_end; 449 partial_end = 0; 450 } 451 zero_user_segment(page, partial_start, top); 452 set_page_dirty(page); 453 unlock_page(page); 454 page_cache_release(page); 455 } 456 } 457 if (partial_end) { 458 struct page *page = NULL; 459 shmem_getpage(inode, end, &page, SGP_READ, NULL); 460 if (page) { 461 zero_user_segment(page, 0, partial_end); 462 set_page_dirty(page); 463 unlock_page(page); 464 page_cache_release(page); 465 } 466 } 467 if (start >= end) 468 return; 469 470 index = start; 471 for ( ; ; ) { 472 cond_resched(); 473 474 pvec.nr = find_get_entries(mapping, index, 475 min(end - index, (pgoff_t)PAGEVEC_SIZE), 476 pvec.pages, indices); 477 if (!pvec.nr) { 478 if (index == start || unfalloc) 479 break; 480 index = start; 481 continue; 482 } 483 if ((index == start || unfalloc) && indices[0] >= end) { 484 pagevec_remove_exceptionals(&pvec); 485 pagevec_release(&pvec); 486 break; 487 } 488 mem_cgroup_uncharge_start(); 489 for (i = 0; i < pagevec_count(&pvec); i++) { 490 struct page *page = pvec.pages[i]; 491 492 index = indices[i]; 493 if (index >= end) 494 break; 495 496 if (radix_tree_exceptional_entry(page)) { 497 if (unfalloc) 498 continue; 499 nr_swaps_freed += !shmem_free_swap(mapping, 500 index, page); 501 continue; 502 } 503 504 lock_page(page); 505 if (!unfalloc || !PageUptodate(page)) { 506 if (page->mapping == mapping) { 507 VM_BUG_ON_PAGE(PageWriteback(page), page); 508 truncate_inode_page(mapping, page); 509 } 510 } 511 unlock_page(page); 512 } 513 pagevec_remove_exceptionals(&pvec); 514 pagevec_release(&pvec); 515 mem_cgroup_uncharge_end(); 516 index++; 517 } 518 519 spin_lock(&info->lock); 520 info->swapped -= nr_swaps_freed; 521 shmem_recalc_inode(inode); 522 spin_unlock(&info->lock); 523 } 524 525 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 526 { 527 shmem_undo_range(inode, lstart, lend, false); 528 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 529 } 530 EXPORT_SYMBOL_GPL(shmem_truncate_range); 531 532 static int shmem_setattr(struct dentry *dentry, struct iattr *attr) 533 { 534 struct inode *inode = dentry->d_inode; 535 int error; 536 537 error = inode_change_ok(inode, attr); 538 if (error) 539 return error; 540 541 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 542 loff_t oldsize = inode->i_size; 543 loff_t newsize = attr->ia_size; 544 545 if (newsize != oldsize) { 546 i_size_write(inode, newsize); 547 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 548 } 549 if (newsize < oldsize) { 550 loff_t holebegin = round_up(newsize, PAGE_SIZE); 551 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); 552 shmem_truncate_range(inode, newsize, (loff_t)-1); 553 /* unmap again to remove racily COWed private pages */ 554 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); 555 } 556 } 557 558 setattr_copy(inode, attr); 559 if (attr->ia_valid & ATTR_MODE) 560 error = posix_acl_chmod(inode, inode->i_mode); 561 return error; 562 } 563 564 static void shmem_evict_inode(struct inode *inode) 565 { 566 struct shmem_inode_info *info = SHMEM_I(inode); 567 568 if (inode->i_mapping->a_ops == &shmem_aops) { 569 shmem_unacct_size(info->flags, inode->i_size); 570 inode->i_size = 0; 571 shmem_truncate_range(inode, 0, (loff_t)-1); 572 if (!list_empty(&info->swaplist)) { 573 mutex_lock(&shmem_swaplist_mutex); 574 list_del_init(&info->swaplist); 575 mutex_unlock(&shmem_swaplist_mutex); 576 } 577 } else 578 kfree(info->symlink); 579 580 simple_xattrs_free(&info->xattrs); 581 WARN_ON(inode->i_blocks); 582 shmem_free_inode(inode->i_sb); 583 clear_inode(inode); 584 } 585 586 /* 587 * If swap found in inode, free it and move page from swapcache to filecache. 588 */ 589 static int shmem_unuse_inode(struct shmem_inode_info *info, 590 swp_entry_t swap, struct page **pagep) 591 { 592 struct address_space *mapping = info->vfs_inode.i_mapping; 593 void *radswap; 594 pgoff_t index; 595 gfp_t gfp; 596 int error = 0; 597 598 radswap = swp_to_radix_entry(swap); 599 index = radix_tree_locate_item(&mapping->page_tree, radswap); 600 if (index == -1) 601 return 0; 602 603 /* 604 * Move _head_ to start search for next from here. 605 * But be careful: shmem_evict_inode checks list_empty without taking 606 * mutex, and there's an instant in list_move_tail when info->swaplist 607 * would appear empty, if it were the only one on shmem_swaplist. 608 */ 609 if (shmem_swaplist.next != &info->swaplist) 610 list_move_tail(&shmem_swaplist, &info->swaplist); 611 612 gfp = mapping_gfp_mask(mapping); 613 if (shmem_should_replace_page(*pagep, gfp)) { 614 mutex_unlock(&shmem_swaplist_mutex); 615 error = shmem_replace_page(pagep, gfp, info, index); 616 mutex_lock(&shmem_swaplist_mutex); 617 /* 618 * We needed to drop mutex to make that restrictive page 619 * allocation, but the inode might have been freed while we 620 * dropped it: although a racing shmem_evict_inode() cannot 621 * complete without emptying the radix_tree, our page lock 622 * on this swapcache page is not enough to prevent that - 623 * free_swap_and_cache() of our swap entry will only 624 * trylock_page(), removing swap from radix_tree whatever. 625 * 626 * We must not proceed to shmem_add_to_page_cache() if the 627 * inode has been freed, but of course we cannot rely on 628 * inode or mapping or info to check that. However, we can 629 * safely check if our swap entry is still in use (and here 630 * it can't have got reused for another page): if it's still 631 * in use, then the inode cannot have been freed yet, and we 632 * can safely proceed (if it's no longer in use, that tells 633 * nothing about the inode, but we don't need to unuse swap). 634 */ 635 if (!page_swapcount(*pagep)) 636 error = -ENOENT; 637 } 638 639 /* 640 * We rely on shmem_swaplist_mutex, not only to protect the swaplist, 641 * but also to hold up shmem_evict_inode(): so inode cannot be freed 642 * beneath us (pagelock doesn't help until the page is in pagecache). 643 */ 644 if (!error) 645 error = shmem_add_to_page_cache(*pagep, mapping, index, 646 GFP_NOWAIT, radswap); 647 if (error != -ENOMEM) { 648 /* 649 * Truncation and eviction use free_swap_and_cache(), which 650 * only does trylock page: if we raced, best clean up here. 651 */ 652 delete_from_swap_cache(*pagep); 653 set_page_dirty(*pagep); 654 if (!error) { 655 spin_lock(&info->lock); 656 info->swapped--; 657 spin_unlock(&info->lock); 658 swap_free(swap); 659 } 660 error = 1; /* not an error, but entry was found */ 661 } 662 return error; 663 } 664 665 /* 666 * Search through swapped inodes to find and replace swap by page. 667 */ 668 int shmem_unuse(swp_entry_t swap, struct page *page) 669 { 670 struct list_head *this, *next; 671 struct shmem_inode_info *info; 672 int found = 0; 673 int error = 0; 674 675 /* 676 * There's a faint possibility that swap page was replaced before 677 * caller locked it: caller will come back later with the right page. 678 */ 679 if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val)) 680 goto out; 681 682 /* 683 * Charge page using GFP_KERNEL while we can wait, before taking 684 * the shmem_swaplist_mutex which might hold up shmem_writepage(). 685 * Charged back to the user (not to caller) when swap account is used. 686 */ 687 error = mem_cgroup_charge_file(page, current->mm, GFP_KERNEL); 688 if (error) 689 goto out; 690 /* No radix_tree_preload: swap entry keeps a place for page in tree */ 691 692 mutex_lock(&shmem_swaplist_mutex); 693 list_for_each_safe(this, next, &shmem_swaplist) { 694 info = list_entry(this, struct shmem_inode_info, swaplist); 695 if (info->swapped) 696 found = shmem_unuse_inode(info, swap, &page); 697 else 698 list_del_init(&info->swaplist); 699 cond_resched(); 700 if (found) 701 break; 702 } 703 mutex_unlock(&shmem_swaplist_mutex); 704 705 if (found < 0) 706 error = found; 707 out: 708 unlock_page(page); 709 page_cache_release(page); 710 return error; 711 } 712 713 /* 714 * Move the page from the page cache to the swap cache. 715 */ 716 static int shmem_writepage(struct page *page, struct writeback_control *wbc) 717 { 718 struct shmem_inode_info *info; 719 struct address_space *mapping; 720 struct inode *inode; 721 swp_entry_t swap; 722 pgoff_t index; 723 724 BUG_ON(!PageLocked(page)); 725 mapping = page->mapping; 726 index = page->index; 727 inode = mapping->host; 728 info = SHMEM_I(inode); 729 if (info->flags & VM_LOCKED) 730 goto redirty; 731 if (!total_swap_pages) 732 goto redirty; 733 734 /* 735 * shmem_backing_dev_info's capabilities prevent regular writeback or 736 * sync from ever calling shmem_writepage; but a stacking filesystem 737 * might use ->writepage of its underlying filesystem, in which case 738 * tmpfs should write out to swap only in response to memory pressure, 739 * and not for the writeback threads or sync. 740 */ 741 if (!wbc->for_reclaim) { 742 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 743 goto redirty; 744 } 745 746 /* 747 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC 748 * value into swapfile.c, the only way we can correctly account for a 749 * fallocated page arriving here is now to initialize it and write it. 750 * 751 * That's okay for a page already fallocated earlier, but if we have 752 * not yet completed the fallocation, then (a) we want to keep track 753 * of this page in case we have to undo it, and (b) it may not be a 754 * good idea to continue anyway, once we're pushing into swap. So 755 * reactivate the page, and let shmem_fallocate() quit when too many. 756 */ 757 if (!PageUptodate(page)) { 758 if (inode->i_private) { 759 struct shmem_falloc *shmem_falloc; 760 spin_lock(&inode->i_lock); 761 shmem_falloc = inode->i_private; 762 if (shmem_falloc && 763 !shmem_falloc->mode && 764 index >= shmem_falloc->start && 765 index < shmem_falloc->next) 766 shmem_falloc->nr_unswapped++; 767 else 768 shmem_falloc = NULL; 769 spin_unlock(&inode->i_lock); 770 if (shmem_falloc) 771 goto redirty; 772 } 773 clear_highpage(page); 774 flush_dcache_page(page); 775 SetPageUptodate(page); 776 } 777 778 swap = get_swap_page(); 779 if (!swap.val) 780 goto redirty; 781 782 /* 783 * Add inode to shmem_unuse()'s list of swapped-out inodes, 784 * if it's not already there. Do it now before the page is 785 * moved to swap cache, when its pagelock no longer protects 786 * the inode from eviction. But don't unlock the mutex until 787 * we've incremented swapped, because shmem_unuse_inode() will 788 * prune a !swapped inode from the swaplist under this mutex. 789 */ 790 mutex_lock(&shmem_swaplist_mutex); 791 if (list_empty(&info->swaplist)) 792 list_add_tail(&info->swaplist, &shmem_swaplist); 793 794 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { 795 swap_shmem_alloc(swap); 796 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); 797 798 spin_lock(&info->lock); 799 info->swapped++; 800 shmem_recalc_inode(inode); 801 spin_unlock(&info->lock); 802 803 mutex_unlock(&shmem_swaplist_mutex); 804 BUG_ON(page_mapped(page)); 805 swap_writepage(page, wbc); 806 return 0; 807 } 808 809 mutex_unlock(&shmem_swaplist_mutex); 810 swapcache_free(swap, NULL); 811 redirty: 812 set_page_dirty(page); 813 if (wbc->for_reclaim) 814 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ 815 unlock_page(page); 816 return 0; 817 } 818 819 #ifdef CONFIG_NUMA 820 #ifdef CONFIG_TMPFS 821 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 822 { 823 char buffer[64]; 824 825 if (!mpol || mpol->mode == MPOL_DEFAULT) 826 return; /* show nothing */ 827 828 mpol_to_str(buffer, sizeof(buffer), mpol); 829 830 seq_printf(seq, ",mpol=%s", buffer); 831 } 832 833 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 834 { 835 struct mempolicy *mpol = NULL; 836 if (sbinfo->mpol) { 837 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 838 mpol = sbinfo->mpol; 839 mpol_get(mpol); 840 spin_unlock(&sbinfo->stat_lock); 841 } 842 return mpol; 843 } 844 #endif /* CONFIG_TMPFS */ 845 846 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 847 struct shmem_inode_info *info, pgoff_t index) 848 { 849 struct vm_area_struct pvma; 850 struct page *page; 851 852 /* Create a pseudo vma that just contains the policy */ 853 pvma.vm_start = 0; 854 /* Bias interleave by inode number to distribute better across nodes */ 855 pvma.vm_pgoff = index + info->vfs_inode.i_ino; 856 pvma.vm_ops = NULL; 857 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); 858 859 page = swapin_readahead(swap, gfp, &pvma, 0); 860 861 /* Drop reference taken by mpol_shared_policy_lookup() */ 862 mpol_cond_put(pvma.vm_policy); 863 864 return page; 865 } 866 867 static struct page *shmem_alloc_page(gfp_t gfp, 868 struct shmem_inode_info *info, pgoff_t index) 869 { 870 struct vm_area_struct pvma; 871 struct page *page; 872 873 /* Create a pseudo vma that just contains the policy */ 874 pvma.vm_start = 0; 875 /* Bias interleave by inode number to distribute better across nodes */ 876 pvma.vm_pgoff = index + info->vfs_inode.i_ino; 877 pvma.vm_ops = NULL; 878 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); 879 880 page = alloc_page_vma(gfp, &pvma, 0); 881 882 /* Drop reference taken by mpol_shared_policy_lookup() */ 883 mpol_cond_put(pvma.vm_policy); 884 885 return page; 886 } 887 #else /* !CONFIG_NUMA */ 888 #ifdef CONFIG_TMPFS 889 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 890 { 891 } 892 #endif /* CONFIG_TMPFS */ 893 894 static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 895 struct shmem_inode_info *info, pgoff_t index) 896 { 897 return swapin_readahead(swap, gfp, NULL, 0); 898 } 899 900 static inline struct page *shmem_alloc_page(gfp_t gfp, 901 struct shmem_inode_info *info, pgoff_t index) 902 { 903 return alloc_page(gfp); 904 } 905 #endif /* CONFIG_NUMA */ 906 907 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS) 908 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 909 { 910 return NULL; 911 } 912 #endif 913 914 /* 915 * When a page is moved from swapcache to shmem filecache (either by the 916 * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of 917 * shmem_unuse_inode()), it may have been read in earlier from swap, in 918 * ignorance of the mapping it belongs to. If that mapping has special 919 * constraints (like the gma500 GEM driver, which requires RAM below 4GB), 920 * we may need to copy to a suitable page before moving to filecache. 921 * 922 * In a future release, this may well be extended to respect cpuset and 923 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); 924 * but for now it is a simple matter of zone. 925 */ 926 static bool shmem_should_replace_page(struct page *page, gfp_t gfp) 927 { 928 return page_zonenum(page) > gfp_zone(gfp); 929 } 930 931 static int shmem_replace_page(struct page **pagep, gfp_t gfp, 932 struct shmem_inode_info *info, pgoff_t index) 933 { 934 struct page *oldpage, *newpage; 935 struct address_space *swap_mapping; 936 pgoff_t swap_index; 937 int error; 938 939 oldpage = *pagep; 940 swap_index = page_private(oldpage); 941 swap_mapping = page_mapping(oldpage); 942 943 /* 944 * We have arrived here because our zones are constrained, so don't 945 * limit chance of success by further cpuset and node constraints. 946 */ 947 gfp &= ~GFP_CONSTRAINT_MASK; 948 newpage = shmem_alloc_page(gfp, info, index); 949 if (!newpage) 950 return -ENOMEM; 951 952 page_cache_get(newpage); 953 copy_highpage(newpage, oldpage); 954 flush_dcache_page(newpage); 955 956 __set_page_locked(newpage); 957 SetPageUptodate(newpage); 958 SetPageSwapBacked(newpage); 959 set_page_private(newpage, swap_index); 960 SetPageSwapCache(newpage); 961 962 /* 963 * Our caller will very soon move newpage out of swapcache, but it's 964 * a nice clean interface for us to replace oldpage by newpage there. 965 */ 966 spin_lock_irq(&swap_mapping->tree_lock); 967 error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, 968 newpage); 969 if (!error) { 970 __inc_zone_page_state(newpage, NR_FILE_PAGES); 971 __dec_zone_page_state(oldpage, NR_FILE_PAGES); 972 } 973 spin_unlock_irq(&swap_mapping->tree_lock); 974 975 if (unlikely(error)) { 976 /* 977 * Is this possible? I think not, now that our callers check 978 * both PageSwapCache and page_private after getting page lock; 979 * but be defensive. Reverse old to newpage for clear and free. 980 */ 981 oldpage = newpage; 982 } else { 983 mem_cgroup_replace_page_cache(oldpage, newpage); 984 lru_cache_add_anon(newpage); 985 *pagep = newpage; 986 } 987 988 ClearPageSwapCache(oldpage); 989 set_page_private(oldpage, 0); 990 991 unlock_page(oldpage); 992 page_cache_release(oldpage); 993 page_cache_release(oldpage); 994 return error; 995 } 996 997 /* 998 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate 999 * 1000 * If we allocate a new one we do not mark it dirty. That's up to the 1001 * vm. If we swap it in we mark it dirty since we also free the swap 1002 * entry since a page cannot live in both the swap and page cache 1003 */ 1004 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 1005 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type) 1006 { 1007 struct address_space *mapping = inode->i_mapping; 1008 struct shmem_inode_info *info; 1009 struct shmem_sb_info *sbinfo; 1010 struct page *page; 1011 swp_entry_t swap; 1012 int error; 1013 int once = 0; 1014 int alloced = 0; 1015 1016 if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT)) 1017 return -EFBIG; 1018 repeat: 1019 swap.val = 0; 1020 page = find_lock_entry(mapping, index); 1021 if (radix_tree_exceptional_entry(page)) { 1022 swap = radix_to_swp_entry(page); 1023 page = NULL; 1024 } 1025 1026 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1027 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1028 error = -EINVAL; 1029 goto failed; 1030 } 1031 1032 if (page && sgp == SGP_WRITE) 1033 mark_page_accessed(page); 1034 1035 /* fallocated page? */ 1036 if (page && !PageUptodate(page)) { 1037 if (sgp != SGP_READ) 1038 goto clear; 1039 unlock_page(page); 1040 page_cache_release(page); 1041 page = NULL; 1042 } 1043 if (page || (sgp == SGP_READ && !swap.val)) { 1044 *pagep = page; 1045 return 0; 1046 } 1047 1048 /* 1049 * Fast cache lookup did not find it: 1050 * bring it back from swap or allocate. 1051 */ 1052 info = SHMEM_I(inode); 1053 sbinfo = SHMEM_SB(inode->i_sb); 1054 1055 if (swap.val) { 1056 /* Look it up and read it in.. */ 1057 page = lookup_swap_cache(swap); 1058 if (!page) { 1059 /* here we actually do the io */ 1060 if (fault_type) 1061 *fault_type |= VM_FAULT_MAJOR; 1062 page = shmem_swapin(swap, gfp, info, index); 1063 if (!page) { 1064 error = -ENOMEM; 1065 goto failed; 1066 } 1067 } 1068 1069 /* We have to do this with page locked to prevent races */ 1070 lock_page(page); 1071 if (!PageSwapCache(page) || page_private(page) != swap.val || 1072 !shmem_confirm_swap(mapping, index, swap)) { 1073 error = -EEXIST; /* try again */ 1074 goto unlock; 1075 } 1076 if (!PageUptodate(page)) { 1077 error = -EIO; 1078 goto failed; 1079 } 1080 wait_on_page_writeback(page); 1081 1082 if (shmem_should_replace_page(page, gfp)) { 1083 error = shmem_replace_page(&page, gfp, info, index); 1084 if (error) 1085 goto failed; 1086 } 1087 1088 error = mem_cgroup_charge_file(page, current->mm, 1089 gfp & GFP_RECLAIM_MASK); 1090 if (!error) { 1091 error = shmem_add_to_page_cache(page, mapping, index, 1092 gfp, swp_to_radix_entry(swap)); 1093 /* 1094 * We already confirmed swap under page lock, and make 1095 * no memory allocation here, so usually no possibility 1096 * of error; but free_swap_and_cache() only trylocks a 1097 * page, so it is just possible that the entry has been 1098 * truncated or holepunched since swap was confirmed. 1099 * shmem_undo_range() will have done some of the 1100 * unaccounting, now delete_from_swap_cache() will do 1101 * the rest (including mem_cgroup_uncharge_swapcache). 1102 * Reset swap.val? No, leave it so "failed" goes back to 1103 * "repeat": reading a hole and writing should succeed. 1104 */ 1105 if (error) 1106 delete_from_swap_cache(page); 1107 } 1108 if (error) 1109 goto failed; 1110 1111 spin_lock(&info->lock); 1112 info->swapped--; 1113 shmem_recalc_inode(inode); 1114 spin_unlock(&info->lock); 1115 1116 if (sgp == SGP_WRITE) 1117 mark_page_accessed(page); 1118 1119 delete_from_swap_cache(page); 1120 set_page_dirty(page); 1121 swap_free(swap); 1122 1123 } else { 1124 if (shmem_acct_block(info->flags)) { 1125 error = -ENOSPC; 1126 goto failed; 1127 } 1128 if (sbinfo->max_blocks) { 1129 if (percpu_counter_compare(&sbinfo->used_blocks, 1130 sbinfo->max_blocks) >= 0) { 1131 error = -ENOSPC; 1132 goto unacct; 1133 } 1134 percpu_counter_inc(&sbinfo->used_blocks); 1135 } 1136 1137 page = shmem_alloc_page(gfp, info, index); 1138 if (!page) { 1139 error = -ENOMEM; 1140 goto decused; 1141 } 1142 1143 __SetPageSwapBacked(page); 1144 __set_page_locked(page); 1145 if (sgp == SGP_WRITE) 1146 init_page_accessed(page); 1147 1148 error = mem_cgroup_charge_file(page, current->mm, 1149 gfp & GFP_RECLAIM_MASK); 1150 if (error) 1151 goto decused; 1152 error = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK); 1153 if (!error) { 1154 error = shmem_add_to_page_cache(page, mapping, index, 1155 gfp, NULL); 1156 radix_tree_preload_end(); 1157 } 1158 if (error) { 1159 mem_cgroup_uncharge_cache_page(page); 1160 goto decused; 1161 } 1162 lru_cache_add_anon(page); 1163 1164 spin_lock(&info->lock); 1165 info->alloced++; 1166 inode->i_blocks += BLOCKS_PER_PAGE; 1167 shmem_recalc_inode(inode); 1168 spin_unlock(&info->lock); 1169 alloced = true; 1170 1171 /* 1172 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page. 1173 */ 1174 if (sgp == SGP_FALLOC) 1175 sgp = SGP_WRITE; 1176 clear: 1177 /* 1178 * Let SGP_WRITE caller clear ends if write does not fill page; 1179 * but SGP_FALLOC on a page fallocated earlier must initialize 1180 * it now, lest undo on failure cancel our earlier guarantee. 1181 */ 1182 if (sgp != SGP_WRITE) { 1183 clear_highpage(page); 1184 flush_dcache_page(page); 1185 SetPageUptodate(page); 1186 } 1187 if (sgp == SGP_DIRTY) 1188 set_page_dirty(page); 1189 } 1190 1191 /* Perhaps the file has been truncated since we checked */ 1192 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1193 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1194 error = -EINVAL; 1195 if (alloced) 1196 goto trunc; 1197 else 1198 goto failed; 1199 } 1200 *pagep = page; 1201 return 0; 1202 1203 /* 1204 * Error recovery. 1205 */ 1206 trunc: 1207 info = SHMEM_I(inode); 1208 ClearPageDirty(page); 1209 delete_from_page_cache(page); 1210 spin_lock(&info->lock); 1211 info->alloced--; 1212 inode->i_blocks -= BLOCKS_PER_PAGE; 1213 spin_unlock(&info->lock); 1214 decused: 1215 sbinfo = SHMEM_SB(inode->i_sb); 1216 if (sbinfo->max_blocks) 1217 percpu_counter_add(&sbinfo->used_blocks, -1); 1218 unacct: 1219 shmem_unacct_blocks(info->flags, 1); 1220 failed: 1221 if (swap.val && error != -EINVAL && 1222 !shmem_confirm_swap(mapping, index, swap)) 1223 error = -EEXIST; 1224 unlock: 1225 if (page) { 1226 unlock_page(page); 1227 page_cache_release(page); 1228 } 1229 if (error == -ENOSPC && !once++) { 1230 info = SHMEM_I(inode); 1231 spin_lock(&info->lock); 1232 shmem_recalc_inode(inode); 1233 spin_unlock(&info->lock); 1234 goto repeat; 1235 } 1236 if (error == -EEXIST) /* from above or from radix_tree_insert */ 1237 goto repeat; 1238 return error; 1239 } 1240 1241 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1242 { 1243 struct inode *inode = file_inode(vma->vm_file); 1244 int error; 1245 int ret = VM_FAULT_LOCKED; 1246 1247 /* 1248 * Trinity finds that probing a hole which tmpfs is punching can 1249 * prevent the hole-punch from ever completing: which in turn 1250 * locks writers out with its hold on i_mutex. So refrain from 1251 * faulting pages into the hole while it's being punched, and 1252 * wait on i_mutex to be released if vmf->flags permits. 1253 */ 1254 if (unlikely(inode->i_private)) { 1255 struct shmem_falloc *shmem_falloc; 1256 1257 spin_lock(&inode->i_lock); 1258 shmem_falloc = inode->i_private; 1259 if (!shmem_falloc || 1260 shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE || 1261 vmf->pgoff < shmem_falloc->start || 1262 vmf->pgoff >= shmem_falloc->next) 1263 shmem_falloc = NULL; 1264 spin_unlock(&inode->i_lock); 1265 /* 1266 * i_lock has protected us from taking shmem_falloc seriously 1267 * once return from shmem_fallocate() went back up that stack. 1268 * i_lock does not serialize with i_mutex at all, but it does 1269 * not matter if sometimes we wait unnecessarily, or sometimes 1270 * miss out on waiting: we just need to make those cases rare. 1271 */ 1272 if (shmem_falloc) { 1273 if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) && 1274 !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { 1275 up_read(&vma->vm_mm->mmap_sem); 1276 mutex_lock(&inode->i_mutex); 1277 mutex_unlock(&inode->i_mutex); 1278 return VM_FAULT_RETRY; 1279 } 1280 /* cond_resched? Leave that to GUP or return to user */ 1281 return VM_FAULT_NOPAGE; 1282 } 1283 } 1284 1285 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); 1286 if (error) 1287 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 1288 1289 if (ret & VM_FAULT_MAJOR) { 1290 count_vm_event(PGMAJFAULT); 1291 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 1292 } 1293 return ret; 1294 } 1295 1296 #ifdef CONFIG_NUMA 1297 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 1298 { 1299 struct inode *inode = file_inode(vma->vm_file); 1300 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 1301 } 1302 1303 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 1304 unsigned long addr) 1305 { 1306 struct inode *inode = file_inode(vma->vm_file); 1307 pgoff_t index; 1308 1309 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 1310 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 1311 } 1312 #endif 1313 1314 int shmem_lock(struct file *file, int lock, struct user_struct *user) 1315 { 1316 struct inode *inode = file_inode(file); 1317 struct shmem_inode_info *info = SHMEM_I(inode); 1318 int retval = -ENOMEM; 1319 1320 spin_lock(&info->lock); 1321 if (lock && !(info->flags & VM_LOCKED)) { 1322 if (!user_shm_lock(inode->i_size, user)) 1323 goto out_nomem; 1324 info->flags |= VM_LOCKED; 1325 mapping_set_unevictable(file->f_mapping); 1326 } 1327 if (!lock && (info->flags & VM_LOCKED) && user) { 1328 user_shm_unlock(inode->i_size, user); 1329 info->flags &= ~VM_LOCKED; 1330 mapping_clear_unevictable(file->f_mapping); 1331 } 1332 retval = 0; 1333 1334 out_nomem: 1335 spin_unlock(&info->lock); 1336 return retval; 1337 } 1338 1339 static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 1340 { 1341 file_accessed(file); 1342 vma->vm_ops = &shmem_vm_ops; 1343 return 0; 1344 } 1345 1346 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, 1347 umode_t mode, dev_t dev, unsigned long flags) 1348 { 1349 struct inode *inode; 1350 struct shmem_inode_info *info; 1351 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 1352 1353 if (shmem_reserve_inode(sb)) 1354 return NULL; 1355 1356 inode = new_inode(sb); 1357 if (inode) { 1358 inode->i_ino = get_next_ino(); 1359 inode_init_owner(inode, dir, mode); 1360 inode->i_blocks = 0; 1361 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; 1362 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1363 inode->i_generation = get_seconds(); 1364 info = SHMEM_I(inode); 1365 memset(info, 0, (char *)inode - (char *)info); 1366 spin_lock_init(&info->lock); 1367 info->flags = flags & VM_NORESERVE; 1368 INIT_LIST_HEAD(&info->swaplist); 1369 simple_xattrs_init(&info->xattrs); 1370 cache_no_acl(inode); 1371 1372 switch (mode & S_IFMT) { 1373 default: 1374 inode->i_op = &shmem_special_inode_operations; 1375 init_special_inode(inode, mode, dev); 1376 break; 1377 case S_IFREG: 1378 inode->i_mapping->a_ops = &shmem_aops; 1379 inode->i_op = &shmem_inode_operations; 1380 inode->i_fop = &shmem_file_operations; 1381 mpol_shared_policy_init(&info->policy, 1382 shmem_get_sbmpol(sbinfo)); 1383 break; 1384 case S_IFDIR: 1385 inc_nlink(inode); 1386 /* Some things misbehave if size == 0 on a directory */ 1387 inode->i_size = 2 * BOGO_DIRENT_SIZE; 1388 inode->i_op = &shmem_dir_inode_operations; 1389 inode->i_fop = &simple_dir_operations; 1390 break; 1391 case S_IFLNK: 1392 /* 1393 * Must not load anything in the rbtree, 1394 * mpol_free_shared_policy will not be called. 1395 */ 1396 mpol_shared_policy_init(&info->policy, NULL); 1397 break; 1398 } 1399 } else 1400 shmem_free_inode(sb); 1401 return inode; 1402 } 1403 1404 bool shmem_mapping(struct address_space *mapping) 1405 { 1406 return mapping->backing_dev_info == &shmem_backing_dev_info; 1407 } 1408 1409 #ifdef CONFIG_TMPFS 1410 static const struct inode_operations shmem_symlink_inode_operations; 1411 static const struct inode_operations shmem_short_symlink_operations; 1412 1413 #ifdef CONFIG_TMPFS_XATTR 1414 static int shmem_initxattrs(struct inode *, const struct xattr *, void *); 1415 #else 1416 #define shmem_initxattrs NULL 1417 #endif 1418 1419 static int 1420 shmem_write_begin(struct file *file, struct address_space *mapping, 1421 loff_t pos, unsigned len, unsigned flags, 1422 struct page **pagep, void **fsdata) 1423 { 1424 struct inode *inode = mapping->host; 1425 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1426 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); 1427 } 1428 1429 static int 1430 shmem_write_end(struct file *file, struct address_space *mapping, 1431 loff_t pos, unsigned len, unsigned copied, 1432 struct page *page, void *fsdata) 1433 { 1434 struct inode *inode = mapping->host; 1435 1436 if (pos + copied > inode->i_size) 1437 i_size_write(inode, pos + copied); 1438 1439 if (!PageUptodate(page)) { 1440 if (copied < PAGE_CACHE_SIZE) { 1441 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1442 zero_user_segments(page, 0, from, 1443 from + copied, PAGE_CACHE_SIZE); 1444 } 1445 SetPageUptodate(page); 1446 } 1447 set_page_dirty(page); 1448 unlock_page(page); 1449 page_cache_release(page); 1450 1451 return copied; 1452 } 1453 1454 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 1455 { 1456 struct file *file = iocb->ki_filp; 1457 struct inode *inode = file_inode(file); 1458 struct address_space *mapping = inode->i_mapping; 1459 pgoff_t index; 1460 unsigned long offset; 1461 enum sgp_type sgp = SGP_READ; 1462 int error = 0; 1463 ssize_t retval = 0; 1464 loff_t *ppos = &iocb->ki_pos; 1465 1466 /* 1467 * Might this read be for a stacking filesystem? Then when reading 1468 * holes of a sparse file, we actually need to allocate those pages, 1469 * and even mark them dirty, so it cannot exceed the max_blocks limit. 1470 */ 1471 if (segment_eq(get_fs(), KERNEL_DS)) 1472 sgp = SGP_DIRTY; 1473 1474 index = *ppos >> PAGE_CACHE_SHIFT; 1475 offset = *ppos & ~PAGE_CACHE_MASK; 1476 1477 for (;;) { 1478 struct page *page = NULL; 1479 pgoff_t end_index; 1480 unsigned long nr, ret; 1481 loff_t i_size = i_size_read(inode); 1482 1483 end_index = i_size >> PAGE_CACHE_SHIFT; 1484 if (index > end_index) 1485 break; 1486 if (index == end_index) { 1487 nr = i_size & ~PAGE_CACHE_MASK; 1488 if (nr <= offset) 1489 break; 1490 } 1491 1492 error = shmem_getpage(inode, index, &page, sgp, NULL); 1493 if (error) { 1494 if (error == -EINVAL) 1495 error = 0; 1496 break; 1497 } 1498 if (page) 1499 unlock_page(page); 1500 1501 /* 1502 * We must evaluate after, since reads (unlike writes) 1503 * are called without i_mutex protection against truncate 1504 */ 1505 nr = PAGE_CACHE_SIZE; 1506 i_size = i_size_read(inode); 1507 end_index = i_size >> PAGE_CACHE_SHIFT; 1508 if (index == end_index) { 1509 nr = i_size & ~PAGE_CACHE_MASK; 1510 if (nr <= offset) { 1511 if (page) 1512 page_cache_release(page); 1513 break; 1514 } 1515 } 1516 nr -= offset; 1517 1518 if (page) { 1519 /* 1520 * If users can be writing to this page using arbitrary 1521 * virtual addresses, take care about potential aliasing 1522 * before reading the page on the kernel side. 1523 */ 1524 if (mapping_writably_mapped(mapping)) 1525 flush_dcache_page(page); 1526 /* 1527 * Mark the page accessed if we read the beginning. 1528 */ 1529 if (!offset) 1530 mark_page_accessed(page); 1531 } else { 1532 page = ZERO_PAGE(0); 1533 page_cache_get(page); 1534 } 1535 1536 /* 1537 * Ok, we have the page, and it's up-to-date, so 1538 * now we can copy it to user space... 1539 */ 1540 ret = copy_page_to_iter(page, offset, nr, to); 1541 retval += ret; 1542 offset += ret; 1543 index += offset >> PAGE_CACHE_SHIFT; 1544 offset &= ~PAGE_CACHE_MASK; 1545 1546 page_cache_release(page); 1547 if (!iov_iter_count(to)) 1548 break; 1549 if (ret < nr) { 1550 error = -EFAULT; 1551 break; 1552 } 1553 cond_resched(); 1554 } 1555 1556 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 1557 file_accessed(file); 1558 return retval ? retval : error; 1559 } 1560 1561 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, 1562 struct pipe_inode_info *pipe, size_t len, 1563 unsigned int flags) 1564 { 1565 struct address_space *mapping = in->f_mapping; 1566 struct inode *inode = mapping->host; 1567 unsigned int loff, nr_pages, req_pages; 1568 struct page *pages[PIPE_DEF_BUFFERS]; 1569 struct partial_page partial[PIPE_DEF_BUFFERS]; 1570 struct page *page; 1571 pgoff_t index, end_index; 1572 loff_t isize, left; 1573 int error, page_nr; 1574 struct splice_pipe_desc spd = { 1575 .pages = pages, 1576 .partial = partial, 1577 .nr_pages_max = PIPE_DEF_BUFFERS, 1578 .flags = flags, 1579 .ops = &page_cache_pipe_buf_ops, 1580 .spd_release = spd_release_page, 1581 }; 1582 1583 isize = i_size_read(inode); 1584 if (unlikely(*ppos >= isize)) 1585 return 0; 1586 1587 left = isize - *ppos; 1588 if (unlikely(left < len)) 1589 len = left; 1590 1591 if (splice_grow_spd(pipe, &spd)) 1592 return -ENOMEM; 1593 1594 index = *ppos >> PAGE_CACHE_SHIFT; 1595 loff = *ppos & ~PAGE_CACHE_MASK; 1596 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1597 nr_pages = min(req_pages, spd.nr_pages_max); 1598 1599 spd.nr_pages = find_get_pages_contig(mapping, index, 1600 nr_pages, spd.pages); 1601 index += spd.nr_pages; 1602 error = 0; 1603 1604 while (spd.nr_pages < nr_pages) { 1605 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL); 1606 if (error) 1607 break; 1608 unlock_page(page); 1609 spd.pages[spd.nr_pages++] = page; 1610 index++; 1611 } 1612 1613 index = *ppos >> PAGE_CACHE_SHIFT; 1614 nr_pages = spd.nr_pages; 1615 spd.nr_pages = 0; 1616 1617 for (page_nr = 0; page_nr < nr_pages; page_nr++) { 1618 unsigned int this_len; 1619 1620 if (!len) 1621 break; 1622 1623 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); 1624 page = spd.pages[page_nr]; 1625 1626 if (!PageUptodate(page) || page->mapping != mapping) { 1627 error = shmem_getpage(inode, index, &page, 1628 SGP_CACHE, NULL); 1629 if (error) 1630 break; 1631 unlock_page(page); 1632 page_cache_release(spd.pages[page_nr]); 1633 spd.pages[page_nr] = page; 1634 } 1635 1636 isize = i_size_read(inode); 1637 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1638 if (unlikely(!isize || index > end_index)) 1639 break; 1640 1641 if (end_index == index) { 1642 unsigned int plen; 1643 1644 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1645 if (plen <= loff) 1646 break; 1647 1648 this_len = min(this_len, plen - loff); 1649 len = this_len; 1650 } 1651 1652 spd.partial[page_nr].offset = loff; 1653 spd.partial[page_nr].len = this_len; 1654 len -= this_len; 1655 loff = 0; 1656 spd.nr_pages++; 1657 index++; 1658 } 1659 1660 while (page_nr < nr_pages) 1661 page_cache_release(spd.pages[page_nr++]); 1662 1663 if (spd.nr_pages) 1664 error = splice_to_pipe(pipe, &spd); 1665 1666 splice_shrink_spd(&spd); 1667 1668 if (error > 0) { 1669 *ppos += error; 1670 file_accessed(in); 1671 } 1672 return error; 1673 } 1674 1675 /* 1676 * llseek SEEK_DATA or SEEK_HOLE through the radix_tree. 1677 */ 1678 static pgoff_t shmem_seek_hole_data(struct address_space *mapping, 1679 pgoff_t index, pgoff_t end, int whence) 1680 { 1681 struct page *page; 1682 struct pagevec pvec; 1683 pgoff_t indices[PAGEVEC_SIZE]; 1684 bool done = false; 1685 int i; 1686 1687 pagevec_init(&pvec, 0); 1688 pvec.nr = 1; /* start small: we may be there already */ 1689 while (!done) { 1690 pvec.nr = find_get_entries(mapping, index, 1691 pvec.nr, pvec.pages, indices); 1692 if (!pvec.nr) { 1693 if (whence == SEEK_DATA) 1694 index = end; 1695 break; 1696 } 1697 for (i = 0; i < pvec.nr; i++, index++) { 1698 if (index < indices[i]) { 1699 if (whence == SEEK_HOLE) { 1700 done = true; 1701 break; 1702 } 1703 index = indices[i]; 1704 } 1705 page = pvec.pages[i]; 1706 if (page && !radix_tree_exceptional_entry(page)) { 1707 if (!PageUptodate(page)) 1708 page = NULL; 1709 } 1710 if (index >= end || 1711 (page && whence == SEEK_DATA) || 1712 (!page && whence == SEEK_HOLE)) { 1713 done = true; 1714 break; 1715 } 1716 } 1717 pagevec_remove_exceptionals(&pvec); 1718 pagevec_release(&pvec); 1719 pvec.nr = PAGEVEC_SIZE; 1720 cond_resched(); 1721 } 1722 return index; 1723 } 1724 1725 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) 1726 { 1727 struct address_space *mapping = file->f_mapping; 1728 struct inode *inode = mapping->host; 1729 pgoff_t start, end; 1730 loff_t new_offset; 1731 1732 if (whence != SEEK_DATA && whence != SEEK_HOLE) 1733 return generic_file_llseek_size(file, offset, whence, 1734 MAX_LFS_FILESIZE, i_size_read(inode)); 1735 mutex_lock(&inode->i_mutex); 1736 /* We're holding i_mutex so we can access i_size directly */ 1737 1738 if (offset < 0) 1739 offset = -EINVAL; 1740 else if (offset >= inode->i_size) 1741 offset = -ENXIO; 1742 else { 1743 start = offset >> PAGE_CACHE_SHIFT; 1744 end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1745 new_offset = shmem_seek_hole_data(mapping, start, end, whence); 1746 new_offset <<= PAGE_CACHE_SHIFT; 1747 if (new_offset > offset) { 1748 if (new_offset < inode->i_size) 1749 offset = new_offset; 1750 else if (whence == SEEK_DATA) 1751 offset = -ENXIO; 1752 else 1753 offset = inode->i_size; 1754 } 1755 } 1756 1757 if (offset >= 0) 1758 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); 1759 mutex_unlock(&inode->i_mutex); 1760 return offset; 1761 } 1762 1763 static long shmem_fallocate(struct file *file, int mode, loff_t offset, 1764 loff_t len) 1765 { 1766 struct inode *inode = file_inode(file); 1767 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1768 struct shmem_falloc shmem_falloc; 1769 pgoff_t start, index, end; 1770 int error; 1771 1772 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 1773 return -EOPNOTSUPP; 1774 1775 mutex_lock(&inode->i_mutex); 1776 1777 shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE; 1778 1779 if (mode & FALLOC_FL_PUNCH_HOLE) { 1780 struct address_space *mapping = file->f_mapping; 1781 loff_t unmap_start = round_up(offset, PAGE_SIZE); 1782 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 1783 1784 shmem_falloc.start = unmap_start >> PAGE_SHIFT; 1785 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; 1786 spin_lock(&inode->i_lock); 1787 inode->i_private = &shmem_falloc; 1788 spin_unlock(&inode->i_lock); 1789 1790 if ((u64)unmap_end > (u64)unmap_start) 1791 unmap_mapping_range(mapping, unmap_start, 1792 1 + unmap_end - unmap_start, 0); 1793 shmem_truncate_range(inode, offset, offset + len - 1); 1794 /* No need to unmap again: hole-punching leaves COWed pages */ 1795 error = 0; 1796 goto undone; 1797 } 1798 1799 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 1800 error = inode_newsize_ok(inode, offset + len); 1801 if (error) 1802 goto out; 1803 1804 start = offset >> PAGE_CACHE_SHIFT; 1805 end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1806 /* Try to avoid a swapstorm if len is impossible to satisfy */ 1807 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 1808 error = -ENOSPC; 1809 goto out; 1810 } 1811 1812 shmem_falloc.start = start; 1813 shmem_falloc.next = start; 1814 shmem_falloc.nr_falloced = 0; 1815 shmem_falloc.nr_unswapped = 0; 1816 spin_lock(&inode->i_lock); 1817 inode->i_private = &shmem_falloc; 1818 spin_unlock(&inode->i_lock); 1819 1820 for (index = start; index < end; index++) { 1821 struct page *page; 1822 1823 /* 1824 * Good, the fallocate(2) manpage permits EINTR: we may have 1825 * been interrupted because we are using up too much memory. 1826 */ 1827 if (signal_pending(current)) 1828 error = -EINTR; 1829 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) 1830 error = -ENOMEM; 1831 else 1832 error = shmem_getpage(inode, index, &page, SGP_FALLOC, 1833 NULL); 1834 if (error) { 1835 /* Remove the !PageUptodate pages we added */ 1836 shmem_undo_range(inode, 1837 (loff_t)start << PAGE_CACHE_SHIFT, 1838 (loff_t)index << PAGE_CACHE_SHIFT, true); 1839 goto undone; 1840 } 1841 1842 /* 1843 * Inform shmem_writepage() how far we have reached. 1844 * No need for lock or barrier: we have the page lock. 1845 */ 1846 shmem_falloc.next++; 1847 if (!PageUptodate(page)) 1848 shmem_falloc.nr_falloced++; 1849 1850 /* 1851 * If !PageUptodate, leave it that way so that freeable pages 1852 * can be recognized if we need to rollback on error later. 1853 * But set_page_dirty so that memory pressure will swap rather 1854 * than free the pages we are allocating (and SGP_CACHE pages 1855 * might still be clean: we now need to mark those dirty too). 1856 */ 1857 set_page_dirty(page); 1858 unlock_page(page); 1859 page_cache_release(page); 1860 cond_resched(); 1861 } 1862 1863 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 1864 i_size_write(inode, offset + len); 1865 inode->i_ctime = CURRENT_TIME; 1866 undone: 1867 spin_lock(&inode->i_lock); 1868 inode->i_private = NULL; 1869 spin_unlock(&inode->i_lock); 1870 out: 1871 mutex_unlock(&inode->i_mutex); 1872 return error; 1873 } 1874 1875 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 1876 { 1877 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 1878 1879 buf->f_type = TMPFS_MAGIC; 1880 buf->f_bsize = PAGE_CACHE_SIZE; 1881 buf->f_namelen = NAME_MAX; 1882 if (sbinfo->max_blocks) { 1883 buf->f_blocks = sbinfo->max_blocks; 1884 buf->f_bavail = 1885 buf->f_bfree = sbinfo->max_blocks - 1886 percpu_counter_sum(&sbinfo->used_blocks); 1887 } 1888 if (sbinfo->max_inodes) { 1889 buf->f_files = sbinfo->max_inodes; 1890 buf->f_ffree = sbinfo->free_inodes; 1891 } 1892 /* else leave those fields 0 like simple_statfs */ 1893 return 0; 1894 } 1895 1896 /* 1897 * File creation. Allocate an inode, and we're done.. 1898 */ 1899 static int 1900 shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) 1901 { 1902 struct inode *inode; 1903 int error = -ENOSPC; 1904 1905 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); 1906 if (inode) { 1907 error = simple_acl_create(dir, inode); 1908 if (error) 1909 goto out_iput; 1910 error = security_inode_init_security(inode, dir, 1911 &dentry->d_name, 1912 shmem_initxattrs, NULL); 1913 if (error && error != -EOPNOTSUPP) 1914 goto out_iput; 1915 1916 error = 0; 1917 dir->i_size += BOGO_DIRENT_SIZE; 1918 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1919 d_instantiate(dentry, inode); 1920 dget(dentry); /* Extra count - pin the dentry in core */ 1921 } 1922 return error; 1923 out_iput: 1924 iput(inode); 1925 return error; 1926 } 1927 1928 static int 1929 shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) 1930 { 1931 struct inode *inode; 1932 int error = -ENOSPC; 1933 1934 inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE); 1935 if (inode) { 1936 error = security_inode_init_security(inode, dir, 1937 NULL, 1938 shmem_initxattrs, NULL); 1939 if (error && error != -EOPNOTSUPP) 1940 goto out_iput; 1941 error = simple_acl_create(dir, inode); 1942 if (error) 1943 goto out_iput; 1944 d_tmpfile(dentry, inode); 1945 } 1946 return error; 1947 out_iput: 1948 iput(inode); 1949 return error; 1950 } 1951 1952 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 1953 { 1954 int error; 1955 1956 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) 1957 return error; 1958 inc_nlink(dir); 1959 return 0; 1960 } 1961 1962 static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode, 1963 bool excl) 1964 { 1965 return shmem_mknod(dir, dentry, mode | S_IFREG, 0); 1966 } 1967 1968 /* 1969 * Link a file.. 1970 */ 1971 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 1972 { 1973 struct inode *inode = old_dentry->d_inode; 1974 int ret; 1975 1976 /* 1977 * No ordinary (disk based) filesystem counts links as inodes; 1978 * but each new link needs a new dentry, pinning lowmem, and 1979 * tmpfs dentries cannot be pruned until they are unlinked. 1980 */ 1981 ret = shmem_reserve_inode(inode->i_sb); 1982 if (ret) 1983 goto out; 1984 1985 dir->i_size += BOGO_DIRENT_SIZE; 1986 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1987 inc_nlink(inode); 1988 ihold(inode); /* New dentry reference */ 1989 dget(dentry); /* Extra pinning count for the created dentry */ 1990 d_instantiate(dentry, inode); 1991 out: 1992 return ret; 1993 } 1994 1995 static int shmem_unlink(struct inode *dir, struct dentry *dentry) 1996 { 1997 struct inode *inode = dentry->d_inode; 1998 1999 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 2000 shmem_free_inode(inode->i_sb); 2001 2002 dir->i_size -= BOGO_DIRENT_SIZE; 2003 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 2004 drop_nlink(inode); 2005 dput(dentry); /* Undo the count from "create" - this does all the work */ 2006 return 0; 2007 } 2008 2009 static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 2010 { 2011 if (!simple_empty(dentry)) 2012 return -ENOTEMPTY; 2013 2014 drop_nlink(dentry->d_inode); 2015 drop_nlink(dir); 2016 return shmem_unlink(dir, dentry); 2017 } 2018 2019 /* 2020 * The VFS layer already does all the dentry stuff for rename, 2021 * we just have to decrement the usage count for the target if 2022 * it exists so that the VFS layer correctly free's it when it 2023 * gets overwritten. 2024 */ 2025 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 2026 { 2027 struct inode *inode = old_dentry->d_inode; 2028 int they_are_dirs = S_ISDIR(inode->i_mode); 2029 2030 if (!simple_empty(new_dentry)) 2031 return -ENOTEMPTY; 2032 2033 if (new_dentry->d_inode) { 2034 (void) shmem_unlink(new_dir, new_dentry); 2035 if (they_are_dirs) 2036 drop_nlink(old_dir); 2037 } else if (they_are_dirs) { 2038 drop_nlink(old_dir); 2039 inc_nlink(new_dir); 2040 } 2041 2042 old_dir->i_size -= BOGO_DIRENT_SIZE; 2043 new_dir->i_size += BOGO_DIRENT_SIZE; 2044 old_dir->i_ctime = old_dir->i_mtime = 2045 new_dir->i_ctime = new_dir->i_mtime = 2046 inode->i_ctime = CURRENT_TIME; 2047 return 0; 2048 } 2049 2050 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) 2051 { 2052 int error; 2053 int len; 2054 struct inode *inode; 2055 struct page *page; 2056 char *kaddr; 2057 struct shmem_inode_info *info; 2058 2059 len = strlen(symname) + 1; 2060 if (len > PAGE_CACHE_SIZE) 2061 return -ENAMETOOLONG; 2062 2063 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); 2064 if (!inode) 2065 return -ENOSPC; 2066 2067 error = security_inode_init_security(inode, dir, &dentry->d_name, 2068 shmem_initxattrs, NULL); 2069 if (error) { 2070 if (error != -EOPNOTSUPP) { 2071 iput(inode); 2072 return error; 2073 } 2074 error = 0; 2075 } 2076 2077 info = SHMEM_I(inode); 2078 inode->i_size = len-1; 2079 if (len <= SHORT_SYMLINK_LEN) { 2080 info->symlink = kmemdup(symname, len, GFP_KERNEL); 2081 if (!info->symlink) { 2082 iput(inode); 2083 return -ENOMEM; 2084 } 2085 inode->i_op = &shmem_short_symlink_operations; 2086 } else { 2087 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); 2088 if (error) { 2089 iput(inode); 2090 return error; 2091 } 2092 inode->i_mapping->a_ops = &shmem_aops; 2093 inode->i_op = &shmem_symlink_inode_operations; 2094 kaddr = kmap_atomic(page); 2095 memcpy(kaddr, symname, len); 2096 kunmap_atomic(kaddr); 2097 SetPageUptodate(page); 2098 set_page_dirty(page); 2099 unlock_page(page); 2100 page_cache_release(page); 2101 } 2102 dir->i_size += BOGO_DIRENT_SIZE; 2103 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 2104 d_instantiate(dentry, inode); 2105 dget(dentry); 2106 return 0; 2107 } 2108 2109 static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd) 2110 { 2111 nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink); 2112 return NULL; 2113 } 2114 2115 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) 2116 { 2117 struct page *page = NULL; 2118 int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); 2119 nd_set_link(nd, error ? ERR_PTR(error) : kmap(page)); 2120 if (page) 2121 unlock_page(page); 2122 return page; 2123 } 2124 2125 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 2126 { 2127 if (!IS_ERR(nd_get_link(nd))) { 2128 struct page *page = cookie; 2129 kunmap(page); 2130 mark_page_accessed(page); 2131 page_cache_release(page); 2132 } 2133 } 2134 2135 #ifdef CONFIG_TMPFS_XATTR 2136 /* 2137 * Superblocks without xattr inode operations may get some security.* xattr 2138 * support from the LSM "for free". As soon as we have any other xattrs 2139 * like ACLs, we also need to implement the security.* handlers at 2140 * filesystem level, though. 2141 */ 2142 2143 /* 2144 * Callback for security_inode_init_security() for acquiring xattrs. 2145 */ 2146 static int shmem_initxattrs(struct inode *inode, 2147 const struct xattr *xattr_array, 2148 void *fs_info) 2149 { 2150 struct shmem_inode_info *info = SHMEM_I(inode); 2151 const struct xattr *xattr; 2152 struct simple_xattr *new_xattr; 2153 size_t len; 2154 2155 for (xattr = xattr_array; xattr->name != NULL; xattr++) { 2156 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); 2157 if (!new_xattr) 2158 return -ENOMEM; 2159 2160 len = strlen(xattr->name) + 1; 2161 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 2162 GFP_KERNEL); 2163 if (!new_xattr->name) { 2164 kfree(new_xattr); 2165 return -ENOMEM; 2166 } 2167 2168 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, 2169 XATTR_SECURITY_PREFIX_LEN); 2170 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, 2171 xattr->name, len); 2172 2173 simple_xattr_list_add(&info->xattrs, new_xattr); 2174 } 2175 2176 return 0; 2177 } 2178 2179 static const struct xattr_handler *shmem_xattr_handlers[] = { 2180 #ifdef CONFIG_TMPFS_POSIX_ACL 2181 &posix_acl_access_xattr_handler, 2182 &posix_acl_default_xattr_handler, 2183 #endif 2184 NULL 2185 }; 2186 2187 static int shmem_xattr_validate(const char *name) 2188 { 2189 struct { const char *prefix; size_t len; } arr[] = { 2190 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN }, 2191 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN } 2192 }; 2193 int i; 2194 2195 for (i = 0; i < ARRAY_SIZE(arr); i++) { 2196 size_t preflen = arr[i].len; 2197 if (strncmp(name, arr[i].prefix, preflen) == 0) { 2198 if (!name[preflen]) 2199 return -EINVAL; 2200 return 0; 2201 } 2202 } 2203 return -EOPNOTSUPP; 2204 } 2205 2206 static ssize_t shmem_getxattr(struct dentry *dentry, const char *name, 2207 void *buffer, size_t size) 2208 { 2209 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2210 int err; 2211 2212 /* 2213 * If this is a request for a synthetic attribute in the system.* 2214 * namespace use the generic infrastructure to resolve a handler 2215 * for it via sb->s_xattr. 2216 */ 2217 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2218 return generic_getxattr(dentry, name, buffer, size); 2219 2220 err = shmem_xattr_validate(name); 2221 if (err) 2222 return err; 2223 2224 return simple_xattr_get(&info->xattrs, name, buffer, size); 2225 } 2226 2227 static int shmem_setxattr(struct dentry *dentry, const char *name, 2228 const void *value, size_t size, int flags) 2229 { 2230 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2231 int err; 2232 2233 /* 2234 * If this is a request for a synthetic attribute in the system.* 2235 * namespace use the generic infrastructure to resolve a handler 2236 * for it via sb->s_xattr. 2237 */ 2238 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2239 return generic_setxattr(dentry, name, value, size, flags); 2240 2241 err = shmem_xattr_validate(name); 2242 if (err) 2243 return err; 2244 2245 return simple_xattr_set(&info->xattrs, name, value, size, flags); 2246 } 2247 2248 static int shmem_removexattr(struct dentry *dentry, const char *name) 2249 { 2250 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2251 int err; 2252 2253 /* 2254 * If this is a request for a synthetic attribute in the system.* 2255 * namespace use the generic infrastructure to resolve a handler 2256 * for it via sb->s_xattr. 2257 */ 2258 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2259 return generic_removexattr(dentry, name); 2260 2261 err = shmem_xattr_validate(name); 2262 if (err) 2263 return err; 2264 2265 return simple_xattr_remove(&info->xattrs, name); 2266 } 2267 2268 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 2269 { 2270 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2271 return simple_xattr_list(&info->xattrs, buffer, size); 2272 } 2273 #endif /* CONFIG_TMPFS_XATTR */ 2274 2275 static const struct inode_operations shmem_short_symlink_operations = { 2276 .readlink = generic_readlink, 2277 .follow_link = shmem_follow_short_symlink, 2278 #ifdef CONFIG_TMPFS_XATTR 2279 .setxattr = shmem_setxattr, 2280 .getxattr = shmem_getxattr, 2281 .listxattr = shmem_listxattr, 2282 .removexattr = shmem_removexattr, 2283 #endif 2284 }; 2285 2286 static const struct inode_operations shmem_symlink_inode_operations = { 2287 .readlink = generic_readlink, 2288 .follow_link = shmem_follow_link, 2289 .put_link = shmem_put_link, 2290 #ifdef CONFIG_TMPFS_XATTR 2291 .setxattr = shmem_setxattr, 2292 .getxattr = shmem_getxattr, 2293 .listxattr = shmem_listxattr, 2294 .removexattr = shmem_removexattr, 2295 #endif 2296 }; 2297 2298 static struct dentry *shmem_get_parent(struct dentry *child) 2299 { 2300 return ERR_PTR(-ESTALE); 2301 } 2302 2303 static int shmem_match(struct inode *ino, void *vfh) 2304 { 2305 __u32 *fh = vfh; 2306 __u64 inum = fh[2]; 2307 inum = (inum << 32) | fh[1]; 2308 return ino->i_ino == inum && fh[0] == ino->i_generation; 2309 } 2310 2311 static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 2312 struct fid *fid, int fh_len, int fh_type) 2313 { 2314 struct inode *inode; 2315 struct dentry *dentry = NULL; 2316 u64 inum; 2317 2318 if (fh_len < 3) 2319 return NULL; 2320 2321 inum = fid->raw[2]; 2322 inum = (inum << 32) | fid->raw[1]; 2323 2324 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 2325 shmem_match, fid->raw); 2326 if (inode) { 2327 dentry = d_find_alias(inode); 2328 iput(inode); 2329 } 2330 2331 return dentry; 2332 } 2333 2334 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, 2335 struct inode *parent) 2336 { 2337 if (*len < 3) { 2338 *len = 3; 2339 return FILEID_INVALID; 2340 } 2341 2342 if (inode_unhashed(inode)) { 2343 /* Unfortunately insert_inode_hash is not idempotent, 2344 * so as we hash inodes here rather than at creation 2345 * time, we need a lock to ensure we only try 2346 * to do it once 2347 */ 2348 static DEFINE_SPINLOCK(lock); 2349 spin_lock(&lock); 2350 if (inode_unhashed(inode)) 2351 __insert_inode_hash(inode, 2352 inode->i_ino + inode->i_generation); 2353 spin_unlock(&lock); 2354 } 2355 2356 fh[0] = inode->i_generation; 2357 fh[1] = inode->i_ino; 2358 fh[2] = ((__u64)inode->i_ino) >> 32; 2359 2360 *len = 3; 2361 return 1; 2362 } 2363 2364 static const struct export_operations shmem_export_ops = { 2365 .get_parent = shmem_get_parent, 2366 .encode_fh = shmem_encode_fh, 2367 .fh_to_dentry = shmem_fh_to_dentry, 2368 }; 2369 2370 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, 2371 bool remount) 2372 { 2373 char *this_char, *value, *rest; 2374 struct mempolicy *mpol = NULL; 2375 uid_t uid; 2376 gid_t gid; 2377 2378 while (options != NULL) { 2379 this_char = options; 2380 for (;;) { 2381 /* 2382 * NUL-terminate this option: unfortunately, 2383 * mount options form a comma-separated list, 2384 * but mpol's nodelist may also contain commas. 2385 */ 2386 options = strchr(options, ','); 2387 if (options == NULL) 2388 break; 2389 options++; 2390 if (!isdigit(*options)) { 2391 options[-1] = '\0'; 2392 break; 2393 } 2394 } 2395 if (!*this_char) 2396 continue; 2397 if ((value = strchr(this_char,'=')) != NULL) { 2398 *value++ = 0; 2399 } else { 2400 printk(KERN_ERR 2401 "tmpfs: No value for mount option '%s'\n", 2402 this_char); 2403 goto error; 2404 } 2405 2406 if (!strcmp(this_char,"size")) { 2407 unsigned long long size; 2408 size = memparse(value,&rest); 2409 if (*rest == '%') { 2410 size <<= PAGE_SHIFT; 2411 size *= totalram_pages; 2412 do_div(size, 100); 2413 rest++; 2414 } 2415 if (*rest) 2416 goto bad_val; 2417 sbinfo->max_blocks = 2418 DIV_ROUND_UP(size, PAGE_CACHE_SIZE); 2419 } else if (!strcmp(this_char,"nr_blocks")) { 2420 sbinfo->max_blocks = memparse(value, &rest); 2421 if (*rest) 2422 goto bad_val; 2423 } else if (!strcmp(this_char,"nr_inodes")) { 2424 sbinfo->max_inodes = memparse(value, &rest); 2425 if (*rest) 2426 goto bad_val; 2427 } else if (!strcmp(this_char,"mode")) { 2428 if (remount) 2429 continue; 2430 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; 2431 if (*rest) 2432 goto bad_val; 2433 } else if (!strcmp(this_char,"uid")) { 2434 if (remount) 2435 continue; 2436 uid = simple_strtoul(value, &rest, 0); 2437 if (*rest) 2438 goto bad_val; 2439 sbinfo->uid = make_kuid(current_user_ns(), uid); 2440 if (!uid_valid(sbinfo->uid)) 2441 goto bad_val; 2442 } else if (!strcmp(this_char,"gid")) { 2443 if (remount) 2444 continue; 2445 gid = simple_strtoul(value, &rest, 0); 2446 if (*rest) 2447 goto bad_val; 2448 sbinfo->gid = make_kgid(current_user_ns(), gid); 2449 if (!gid_valid(sbinfo->gid)) 2450 goto bad_val; 2451 } else if (!strcmp(this_char,"mpol")) { 2452 mpol_put(mpol); 2453 mpol = NULL; 2454 if (mpol_parse_str(value, &mpol)) 2455 goto bad_val; 2456 } else { 2457 printk(KERN_ERR "tmpfs: Bad mount option %s\n", 2458 this_char); 2459 goto error; 2460 } 2461 } 2462 sbinfo->mpol = mpol; 2463 return 0; 2464 2465 bad_val: 2466 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", 2467 value, this_char); 2468 error: 2469 mpol_put(mpol); 2470 return 1; 2471 2472 } 2473 2474 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) 2475 { 2476 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2477 struct shmem_sb_info config = *sbinfo; 2478 unsigned long inodes; 2479 int error = -EINVAL; 2480 2481 config.mpol = NULL; 2482 if (shmem_parse_options(data, &config, true)) 2483 return error; 2484 2485 spin_lock(&sbinfo->stat_lock); 2486 inodes = sbinfo->max_inodes - sbinfo->free_inodes; 2487 if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0) 2488 goto out; 2489 if (config.max_inodes < inodes) 2490 goto out; 2491 /* 2492 * Those tests disallow limited->unlimited while any are in use; 2493 * but we must separately disallow unlimited->limited, because 2494 * in that case we have no record of how much is already in use. 2495 */ 2496 if (config.max_blocks && !sbinfo->max_blocks) 2497 goto out; 2498 if (config.max_inodes && !sbinfo->max_inodes) 2499 goto out; 2500 2501 error = 0; 2502 sbinfo->max_blocks = config.max_blocks; 2503 sbinfo->max_inodes = config.max_inodes; 2504 sbinfo->free_inodes = config.max_inodes - inodes; 2505 2506 /* 2507 * Preserve previous mempolicy unless mpol remount option was specified. 2508 */ 2509 if (config.mpol) { 2510 mpol_put(sbinfo->mpol); 2511 sbinfo->mpol = config.mpol; /* transfers initial ref */ 2512 } 2513 out: 2514 spin_unlock(&sbinfo->stat_lock); 2515 return error; 2516 } 2517 2518 static int shmem_show_options(struct seq_file *seq, struct dentry *root) 2519 { 2520 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); 2521 2522 if (sbinfo->max_blocks != shmem_default_max_blocks()) 2523 seq_printf(seq, ",size=%luk", 2524 sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); 2525 if (sbinfo->max_inodes != shmem_default_max_inodes()) 2526 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 2527 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) 2528 seq_printf(seq, ",mode=%03ho", sbinfo->mode); 2529 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 2530 seq_printf(seq, ",uid=%u", 2531 from_kuid_munged(&init_user_ns, sbinfo->uid)); 2532 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 2533 seq_printf(seq, ",gid=%u", 2534 from_kgid_munged(&init_user_ns, sbinfo->gid)); 2535 shmem_show_mpol(seq, sbinfo->mpol); 2536 return 0; 2537 } 2538 #endif /* CONFIG_TMPFS */ 2539 2540 static void shmem_put_super(struct super_block *sb) 2541 { 2542 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2543 2544 percpu_counter_destroy(&sbinfo->used_blocks); 2545 mpol_put(sbinfo->mpol); 2546 kfree(sbinfo); 2547 sb->s_fs_info = NULL; 2548 } 2549 2550 int shmem_fill_super(struct super_block *sb, void *data, int silent) 2551 { 2552 struct inode *inode; 2553 struct shmem_sb_info *sbinfo; 2554 int err = -ENOMEM; 2555 2556 /* Round up to L1_CACHE_BYTES to resist false sharing */ 2557 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 2558 L1_CACHE_BYTES), GFP_KERNEL); 2559 if (!sbinfo) 2560 return -ENOMEM; 2561 2562 sbinfo->mode = S_IRWXUGO | S_ISVTX; 2563 sbinfo->uid = current_fsuid(); 2564 sbinfo->gid = current_fsgid(); 2565 sb->s_fs_info = sbinfo; 2566 2567 #ifdef CONFIG_TMPFS 2568 /* 2569 * Per default we only allow half of the physical ram per 2570 * tmpfs instance, limiting inodes to one per page of lowmem; 2571 * but the internal instance is left unlimited. 2572 */ 2573 if (!(sb->s_flags & MS_KERNMOUNT)) { 2574 sbinfo->max_blocks = shmem_default_max_blocks(); 2575 sbinfo->max_inodes = shmem_default_max_inodes(); 2576 if (shmem_parse_options(data, sbinfo, false)) { 2577 err = -EINVAL; 2578 goto failed; 2579 } 2580 } else { 2581 sb->s_flags |= MS_NOUSER; 2582 } 2583 sb->s_export_op = &shmem_export_ops; 2584 sb->s_flags |= MS_NOSEC; 2585 #else 2586 sb->s_flags |= MS_NOUSER; 2587 #endif 2588 2589 spin_lock_init(&sbinfo->stat_lock); 2590 if (percpu_counter_init(&sbinfo->used_blocks, 0)) 2591 goto failed; 2592 sbinfo->free_inodes = sbinfo->max_inodes; 2593 2594 sb->s_maxbytes = MAX_LFS_FILESIZE; 2595 sb->s_blocksize = PAGE_CACHE_SIZE; 2596 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 2597 sb->s_magic = TMPFS_MAGIC; 2598 sb->s_op = &shmem_ops; 2599 sb->s_time_gran = 1; 2600 #ifdef CONFIG_TMPFS_XATTR 2601 sb->s_xattr = shmem_xattr_handlers; 2602 #endif 2603 #ifdef CONFIG_TMPFS_POSIX_ACL 2604 sb->s_flags |= MS_POSIXACL; 2605 #endif 2606 2607 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 2608 if (!inode) 2609 goto failed; 2610 inode->i_uid = sbinfo->uid; 2611 inode->i_gid = sbinfo->gid; 2612 sb->s_root = d_make_root(inode); 2613 if (!sb->s_root) 2614 goto failed; 2615 return 0; 2616 2617 failed: 2618 shmem_put_super(sb); 2619 return err; 2620 } 2621 2622 static struct kmem_cache *shmem_inode_cachep; 2623 2624 static struct inode *shmem_alloc_inode(struct super_block *sb) 2625 { 2626 struct shmem_inode_info *info; 2627 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 2628 if (!info) 2629 return NULL; 2630 return &info->vfs_inode; 2631 } 2632 2633 static void shmem_destroy_callback(struct rcu_head *head) 2634 { 2635 struct inode *inode = container_of(head, struct inode, i_rcu); 2636 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 2637 } 2638 2639 static void shmem_destroy_inode(struct inode *inode) 2640 { 2641 if (S_ISREG(inode->i_mode)) 2642 mpol_free_shared_policy(&SHMEM_I(inode)->policy); 2643 call_rcu(&inode->i_rcu, shmem_destroy_callback); 2644 } 2645 2646 static void shmem_init_inode(void *foo) 2647 { 2648 struct shmem_inode_info *info = foo; 2649 inode_init_once(&info->vfs_inode); 2650 } 2651 2652 static int shmem_init_inodecache(void) 2653 { 2654 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 2655 sizeof(struct shmem_inode_info), 2656 0, SLAB_PANIC, shmem_init_inode); 2657 return 0; 2658 } 2659 2660 static void shmem_destroy_inodecache(void) 2661 { 2662 kmem_cache_destroy(shmem_inode_cachep); 2663 } 2664 2665 static const struct address_space_operations shmem_aops = { 2666 .writepage = shmem_writepage, 2667 .set_page_dirty = __set_page_dirty_no_writeback, 2668 #ifdef CONFIG_TMPFS 2669 .write_begin = shmem_write_begin, 2670 .write_end = shmem_write_end, 2671 #endif 2672 .migratepage = migrate_page, 2673 .error_remove_page = generic_error_remove_page, 2674 }; 2675 2676 static const struct file_operations shmem_file_operations = { 2677 .mmap = shmem_mmap, 2678 #ifdef CONFIG_TMPFS 2679 .llseek = shmem_file_llseek, 2680 .read = new_sync_read, 2681 .write = new_sync_write, 2682 .read_iter = shmem_file_read_iter, 2683 .write_iter = generic_file_write_iter, 2684 .fsync = noop_fsync, 2685 .splice_read = shmem_file_splice_read, 2686 .splice_write = iter_file_splice_write, 2687 .fallocate = shmem_fallocate, 2688 #endif 2689 }; 2690 2691 static const struct inode_operations shmem_inode_operations = { 2692 .setattr = shmem_setattr, 2693 #ifdef CONFIG_TMPFS_XATTR 2694 .setxattr = shmem_setxattr, 2695 .getxattr = shmem_getxattr, 2696 .listxattr = shmem_listxattr, 2697 .removexattr = shmem_removexattr, 2698 .set_acl = simple_set_acl, 2699 #endif 2700 }; 2701 2702 static const struct inode_operations shmem_dir_inode_operations = { 2703 #ifdef CONFIG_TMPFS 2704 .create = shmem_create, 2705 .lookup = simple_lookup, 2706 .link = shmem_link, 2707 .unlink = shmem_unlink, 2708 .symlink = shmem_symlink, 2709 .mkdir = shmem_mkdir, 2710 .rmdir = shmem_rmdir, 2711 .mknod = shmem_mknod, 2712 .rename = shmem_rename, 2713 .tmpfile = shmem_tmpfile, 2714 #endif 2715 #ifdef CONFIG_TMPFS_XATTR 2716 .setxattr = shmem_setxattr, 2717 .getxattr = shmem_getxattr, 2718 .listxattr = shmem_listxattr, 2719 .removexattr = shmem_removexattr, 2720 #endif 2721 #ifdef CONFIG_TMPFS_POSIX_ACL 2722 .setattr = shmem_setattr, 2723 .set_acl = simple_set_acl, 2724 #endif 2725 }; 2726 2727 static const struct inode_operations shmem_special_inode_operations = { 2728 #ifdef CONFIG_TMPFS_XATTR 2729 .setxattr = shmem_setxattr, 2730 .getxattr = shmem_getxattr, 2731 .listxattr = shmem_listxattr, 2732 .removexattr = shmem_removexattr, 2733 #endif 2734 #ifdef CONFIG_TMPFS_POSIX_ACL 2735 .setattr = shmem_setattr, 2736 .set_acl = simple_set_acl, 2737 #endif 2738 }; 2739 2740 static const struct super_operations shmem_ops = { 2741 .alloc_inode = shmem_alloc_inode, 2742 .destroy_inode = shmem_destroy_inode, 2743 #ifdef CONFIG_TMPFS 2744 .statfs = shmem_statfs, 2745 .remount_fs = shmem_remount_fs, 2746 .show_options = shmem_show_options, 2747 #endif 2748 .evict_inode = shmem_evict_inode, 2749 .drop_inode = generic_delete_inode, 2750 .put_super = shmem_put_super, 2751 }; 2752 2753 static const struct vm_operations_struct shmem_vm_ops = { 2754 .fault = shmem_fault, 2755 .map_pages = filemap_map_pages, 2756 #ifdef CONFIG_NUMA 2757 .set_policy = shmem_set_policy, 2758 .get_policy = shmem_get_policy, 2759 #endif 2760 .remap_pages = generic_file_remap_pages, 2761 }; 2762 2763 static struct dentry *shmem_mount(struct file_system_type *fs_type, 2764 int flags, const char *dev_name, void *data) 2765 { 2766 return mount_nodev(fs_type, flags, data, shmem_fill_super); 2767 } 2768 2769 static struct file_system_type shmem_fs_type = { 2770 .owner = THIS_MODULE, 2771 .name = "tmpfs", 2772 .mount = shmem_mount, 2773 .kill_sb = kill_litter_super, 2774 .fs_flags = FS_USERNS_MOUNT, 2775 }; 2776 2777 int __init shmem_init(void) 2778 { 2779 int error; 2780 2781 /* If rootfs called this, don't re-init */ 2782 if (shmem_inode_cachep) 2783 return 0; 2784 2785 error = bdi_init(&shmem_backing_dev_info); 2786 if (error) 2787 goto out4; 2788 2789 error = shmem_init_inodecache(); 2790 if (error) 2791 goto out3; 2792 2793 error = register_filesystem(&shmem_fs_type); 2794 if (error) { 2795 printk(KERN_ERR "Could not register tmpfs\n"); 2796 goto out2; 2797 } 2798 2799 shm_mnt = kern_mount(&shmem_fs_type); 2800 if (IS_ERR(shm_mnt)) { 2801 error = PTR_ERR(shm_mnt); 2802 printk(KERN_ERR "Could not kern_mount tmpfs\n"); 2803 goto out1; 2804 } 2805 return 0; 2806 2807 out1: 2808 unregister_filesystem(&shmem_fs_type); 2809 out2: 2810 shmem_destroy_inodecache(); 2811 out3: 2812 bdi_destroy(&shmem_backing_dev_info); 2813 out4: 2814 shm_mnt = ERR_PTR(error); 2815 return error; 2816 } 2817 2818 #else /* !CONFIG_SHMEM */ 2819 2820 /* 2821 * tiny-shmem: simple shmemfs and tmpfs using ramfs code 2822 * 2823 * This is intended for small system where the benefits of the full 2824 * shmem code (swap-backed and resource-limited) are outweighed by 2825 * their complexity. On systems without swap this code should be 2826 * effectively equivalent, but much lighter weight. 2827 */ 2828 2829 static struct file_system_type shmem_fs_type = { 2830 .name = "tmpfs", 2831 .mount = ramfs_mount, 2832 .kill_sb = kill_litter_super, 2833 .fs_flags = FS_USERNS_MOUNT, 2834 }; 2835 2836 int __init shmem_init(void) 2837 { 2838 BUG_ON(register_filesystem(&shmem_fs_type) != 0); 2839 2840 shm_mnt = kern_mount(&shmem_fs_type); 2841 BUG_ON(IS_ERR(shm_mnt)); 2842 2843 return 0; 2844 } 2845 2846 int shmem_unuse(swp_entry_t swap, struct page *page) 2847 { 2848 return 0; 2849 } 2850 2851 int shmem_lock(struct file *file, int lock, struct user_struct *user) 2852 { 2853 return 0; 2854 } 2855 2856 void shmem_unlock_mapping(struct address_space *mapping) 2857 { 2858 } 2859 2860 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 2861 { 2862 truncate_inode_pages_range(inode->i_mapping, lstart, lend); 2863 } 2864 EXPORT_SYMBOL_GPL(shmem_truncate_range); 2865 2866 #define shmem_vm_ops generic_file_vm_ops 2867 #define shmem_file_operations ramfs_file_operations 2868 #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 2869 #define shmem_acct_size(flags, size) 0 2870 #define shmem_unacct_size(flags, size) do {} while (0) 2871 2872 #endif /* CONFIG_SHMEM */ 2873 2874 /* common code */ 2875 2876 static struct dentry_operations anon_ops = { 2877 .d_dname = simple_dname 2878 }; 2879 2880 static struct file *__shmem_file_setup(const char *name, loff_t size, 2881 unsigned long flags, unsigned int i_flags) 2882 { 2883 struct file *res; 2884 struct inode *inode; 2885 struct path path; 2886 struct super_block *sb; 2887 struct qstr this; 2888 2889 if (IS_ERR(shm_mnt)) 2890 return ERR_CAST(shm_mnt); 2891 2892 if (size < 0 || size > MAX_LFS_FILESIZE) 2893 return ERR_PTR(-EINVAL); 2894 2895 if (shmem_acct_size(flags, size)) 2896 return ERR_PTR(-ENOMEM); 2897 2898 res = ERR_PTR(-ENOMEM); 2899 this.name = name; 2900 this.len = strlen(name); 2901 this.hash = 0; /* will go */ 2902 sb = shm_mnt->mnt_sb; 2903 path.dentry = d_alloc_pseudo(sb, &this); 2904 if (!path.dentry) 2905 goto put_memory; 2906 d_set_d_op(path.dentry, &anon_ops); 2907 path.mnt = mntget(shm_mnt); 2908 2909 res = ERR_PTR(-ENOSPC); 2910 inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags); 2911 if (!inode) 2912 goto put_dentry; 2913 2914 inode->i_flags |= i_flags; 2915 d_instantiate(path.dentry, inode); 2916 inode->i_size = size; 2917 clear_nlink(inode); /* It is unlinked */ 2918 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); 2919 if (IS_ERR(res)) 2920 goto put_dentry; 2921 2922 res = alloc_file(&path, FMODE_WRITE | FMODE_READ, 2923 &shmem_file_operations); 2924 if (IS_ERR(res)) 2925 goto put_dentry; 2926 2927 return res; 2928 2929 put_dentry: 2930 path_put(&path); 2931 put_memory: 2932 shmem_unacct_size(flags, size); 2933 return res; 2934 } 2935 2936 /** 2937 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be 2938 * kernel internal. There will be NO LSM permission checks against the 2939 * underlying inode. So users of this interface must do LSM checks at a 2940 * higher layer. The one user is the big_key implementation. LSM checks 2941 * are provided at the key level rather than the inode level. 2942 * @name: name for dentry (to be seen in /proc/<pid>/maps 2943 * @size: size to be set for the file 2944 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 2945 */ 2946 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags) 2947 { 2948 return __shmem_file_setup(name, size, flags, S_PRIVATE); 2949 } 2950 2951 /** 2952 * shmem_file_setup - get an unlinked file living in tmpfs 2953 * @name: name for dentry (to be seen in /proc/<pid>/maps 2954 * @size: size to be set for the file 2955 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 2956 */ 2957 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 2958 { 2959 return __shmem_file_setup(name, size, flags, 0); 2960 } 2961 EXPORT_SYMBOL_GPL(shmem_file_setup); 2962 2963 /** 2964 * shmem_zero_setup - setup a shared anonymous mapping 2965 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff 2966 */ 2967 int shmem_zero_setup(struct vm_area_struct *vma) 2968 { 2969 struct file *file; 2970 loff_t size = vma->vm_end - vma->vm_start; 2971 2972 file = shmem_file_setup("dev/zero", size, vma->vm_flags); 2973 if (IS_ERR(file)) 2974 return PTR_ERR(file); 2975 2976 if (vma->vm_file) 2977 fput(vma->vm_file); 2978 vma->vm_file = file; 2979 vma->vm_ops = &shmem_vm_ops; 2980 return 0; 2981 } 2982 2983 /** 2984 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. 2985 * @mapping: the page's address_space 2986 * @index: the page index 2987 * @gfp: the page allocator flags to use if allocating 2988 * 2989 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 2990 * with any new page allocations done using the specified allocation flags. 2991 * But read_cache_page_gfp() uses the ->readpage() method: which does not 2992 * suit tmpfs, since it may have pages in swapcache, and needs to find those 2993 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 2994 * 2995 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 2996 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 2997 */ 2998 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 2999 pgoff_t index, gfp_t gfp) 3000 { 3001 #ifdef CONFIG_SHMEM 3002 struct inode *inode = mapping->host; 3003 struct page *page; 3004 int error; 3005 3006 BUG_ON(mapping->a_ops != &shmem_aops); 3007 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL); 3008 if (error) 3009 page = ERR_PTR(error); 3010 else 3011 unlock_page(page); 3012 return page; 3013 #else 3014 /* 3015 * The tiny !SHMEM case uses ramfs without swap 3016 */ 3017 return read_cache_page_gfp(mapping, index, gfp); 3018 #endif 3019 } 3020 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 3021