1 /* 2 * Resizable virtual memory filesystem for Linux. 3 * 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. 9 * Copyright (C) 2002-2011 Hugh Dickins. 10 * Copyright (C) 2011 Google Inc. 11 * Copyright (C) 2002-2005 VERITAS Software Corporation. 12 * Copyright (C) 2004 Andi Kleen, SuSE Labs 13 * 14 * Extended attribute support for tmpfs: 15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 17 * 18 * tiny-shmem: 19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20 * 21 * This file is released under the GPL. 22 */ 23 24 #include <linux/fs.h> 25 #include <linux/init.h> 26 #include <linux/vfs.h> 27 #include <linux/mount.h> 28 #include <linux/ramfs.h> 29 #include <linux/pagemap.h> 30 #include <linux/file.h> 31 #include <linux/mm.h> 32 #include <linux/export.h> 33 #include <linux/swap.h> 34 #include <linux/aio.h> 35 36 static struct vfsmount *shm_mnt; 37 38 #ifdef CONFIG_SHMEM 39 /* 40 * This virtual memory filesystem is heavily based on the ramfs. It 41 * extends ramfs by the ability to use swap and honor resource limits 42 * which makes it a completely usable filesystem. 43 */ 44 45 #include <linux/xattr.h> 46 #include <linux/exportfs.h> 47 #include <linux/posix_acl.h> 48 #include <linux/posix_acl_xattr.h> 49 #include <linux/mman.h> 50 #include <linux/string.h> 51 #include <linux/slab.h> 52 #include <linux/backing-dev.h> 53 #include <linux/shmem_fs.h> 54 #include <linux/writeback.h> 55 #include <linux/blkdev.h> 56 #include <linux/pagevec.h> 57 #include <linux/percpu_counter.h> 58 #include <linux/falloc.h> 59 #include <linux/splice.h> 60 #include <linux/security.h> 61 #include <linux/swapops.h> 62 #include <linux/mempolicy.h> 63 #include <linux/namei.h> 64 #include <linux/ctype.h> 65 #include <linux/migrate.h> 66 #include <linux/highmem.h> 67 #include <linux/seq_file.h> 68 #include <linux/magic.h> 69 70 #include <asm/uaccess.h> 71 #include <asm/pgtable.h> 72 73 #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) 74 #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) 75 76 /* Pretend that each entry is of this size in directory's i_size */ 77 #define BOGO_DIRENT_SIZE 20 78 79 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 80 #define SHORT_SYMLINK_LEN 128 81 82 /* 83 * shmem_fallocate and shmem_writepage communicate via inode->i_private 84 * (with i_mutex making sure that it has only one user at a time): 85 * we would prefer not to enlarge the shmem inode just for that. 86 */ 87 struct shmem_falloc { 88 pgoff_t start; /* start of range currently being fallocated */ 89 pgoff_t next; /* the next page offset to be fallocated */ 90 pgoff_t nr_falloced; /* how many new pages have been fallocated */ 91 pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 92 }; 93 94 /* Flag allocation requirements to shmem_getpage */ 95 enum sgp_type { 96 SGP_READ, /* don't exceed i_size, don't allocate page */ 97 SGP_CACHE, /* don't exceed i_size, may allocate page */ 98 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */ 99 SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ 100 SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ 101 }; 102 103 #ifdef CONFIG_TMPFS 104 static unsigned long shmem_default_max_blocks(void) 105 { 106 return totalram_pages / 2; 107 } 108 109 static unsigned long shmem_default_max_inodes(void) 110 { 111 return min(totalram_pages - totalhigh_pages, totalram_pages / 2); 112 } 113 #endif 114 115 static bool shmem_should_replace_page(struct page *page, gfp_t gfp); 116 static int shmem_replace_page(struct page **pagep, gfp_t gfp, 117 struct shmem_inode_info *info, pgoff_t index); 118 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 119 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type); 120 121 static inline int shmem_getpage(struct inode *inode, pgoff_t index, 122 struct page **pagep, enum sgp_type sgp, int *fault_type) 123 { 124 return shmem_getpage_gfp(inode, index, pagep, sgp, 125 mapping_gfp_mask(inode->i_mapping), fault_type); 126 } 127 128 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 129 { 130 return sb->s_fs_info; 131 } 132 133 /* 134 * shmem_file_setup pre-accounts the whole fixed size of a VM object, 135 * for shared memory and for shared anonymous (/dev/zero) mappings 136 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 137 * consistent with the pre-accounting of private mappings ... 138 */ 139 static inline int shmem_acct_size(unsigned long flags, loff_t size) 140 { 141 return (flags & VM_NORESERVE) ? 142 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); 143 } 144 145 static inline void shmem_unacct_size(unsigned long flags, loff_t size) 146 { 147 if (!(flags & VM_NORESERVE)) 148 vm_unacct_memory(VM_ACCT(size)); 149 } 150 151 /* 152 * ... whereas tmpfs objects are accounted incrementally as 153 * pages are allocated, in order to allow huge sparse files. 154 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 155 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 156 */ 157 static inline int shmem_acct_block(unsigned long flags) 158 { 159 return (flags & VM_NORESERVE) ? 160 security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0; 161 } 162 163 static inline void shmem_unacct_blocks(unsigned long flags, long pages) 164 { 165 if (flags & VM_NORESERVE) 166 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); 167 } 168 169 static const struct super_operations shmem_ops; 170 static const struct address_space_operations shmem_aops; 171 static const struct file_operations shmem_file_operations; 172 static const struct inode_operations shmem_inode_operations; 173 static const struct inode_operations shmem_dir_inode_operations; 174 static const struct inode_operations shmem_special_inode_operations; 175 static const struct vm_operations_struct shmem_vm_ops; 176 177 static struct backing_dev_info shmem_backing_dev_info __read_mostly = { 178 .ra_pages = 0, /* No readahead */ 179 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, 180 }; 181 182 static LIST_HEAD(shmem_swaplist); 183 static DEFINE_MUTEX(shmem_swaplist_mutex); 184 185 static int shmem_reserve_inode(struct super_block *sb) 186 { 187 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 188 if (sbinfo->max_inodes) { 189 spin_lock(&sbinfo->stat_lock); 190 if (!sbinfo->free_inodes) { 191 spin_unlock(&sbinfo->stat_lock); 192 return -ENOSPC; 193 } 194 sbinfo->free_inodes--; 195 spin_unlock(&sbinfo->stat_lock); 196 } 197 return 0; 198 } 199 200 static void shmem_free_inode(struct super_block *sb) 201 { 202 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 203 if (sbinfo->max_inodes) { 204 spin_lock(&sbinfo->stat_lock); 205 sbinfo->free_inodes++; 206 spin_unlock(&sbinfo->stat_lock); 207 } 208 } 209 210 /** 211 * shmem_recalc_inode - recalculate the block usage of an inode 212 * @inode: inode to recalc 213 * 214 * We have to calculate the free blocks since the mm can drop 215 * undirtied hole pages behind our back. 216 * 217 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 218 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 219 * 220 * It has to be called with the spinlock held. 221 */ 222 static void shmem_recalc_inode(struct inode *inode) 223 { 224 struct shmem_inode_info *info = SHMEM_I(inode); 225 long freed; 226 227 freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 228 if (freed > 0) { 229 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 230 if (sbinfo->max_blocks) 231 percpu_counter_add(&sbinfo->used_blocks, -freed); 232 info->alloced -= freed; 233 inode->i_blocks -= freed * BLOCKS_PER_PAGE; 234 shmem_unacct_blocks(info->flags, freed); 235 } 236 } 237 238 /* 239 * Replace item expected in radix tree by a new item, while holding tree lock. 240 */ 241 static int shmem_radix_tree_replace(struct address_space *mapping, 242 pgoff_t index, void *expected, void *replacement) 243 { 244 void **pslot; 245 void *item; 246 247 VM_BUG_ON(!expected); 248 VM_BUG_ON(!replacement); 249 pslot = radix_tree_lookup_slot(&mapping->page_tree, index); 250 if (!pslot) 251 return -ENOENT; 252 item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock); 253 if (item != expected) 254 return -ENOENT; 255 radix_tree_replace_slot(pslot, replacement); 256 return 0; 257 } 258 259 /* 260 * Sometimes, before we decide whether to proceed or to fail, we must check 261 * that an entry was not already brought back from swap by a racing thread. 262 * 263 * Checking page is not enough: by the time a SwapCache page is locked, it 264 * might be reused, and again be SwapCache, using the same swap as before. 265 */ 266 static bool shmem_confirm_swap(struct address_space *mapping, 267 pgoff_t index, swp_entry_t swap) 268 { 269 void *item; 270 271 rcu_read_lock(); 272 item = radix_tree_lookup(&mapping->page_tree, index); 273 rcu_read_unlock(); 274 return item == swp_to_radix_entry(swap); 275 } 276 277 /* 278 * Like add_to_page_cache_locked, but error if expected item has gone. 279 */ 280 static int shmem_add_to_page_cache(struct page *page, 281 struct address_space *mapping, 282 pgoff_t index, gfp_t gfp, void *expected) 283 { 284 int error; 285 286 VM_BUG_ON_PAGE(!PageLocked(page), page); 287 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 288 289 page_cache_get(page); 290 page->mapping = mapping; 291 page->index = index; 292 293 spin_lock_irq(&mapping->tree_lock); 294 if (!expected) 295 error = radix_tree_insert(&mapping->page_tree, index, page); 296 else 297 error = shmem_radix_tree_replace(mapping, index, expected, 298 page); 299 if (!error) { 300 mapping->nrpages++; 301 __inc_zone_page_state(page, NR_FILE_PAGES); 302 __inc_zone_page_state(page, NR_SHMEM); 303 spin_unlock_irq(&mapping->tree_lock); 304 } else { 305 page->mapping = NULL; 306 spin_unlock_irq(&mapping->tree_lock); 307 page_cache_release(page); 308 } 309 return error; 310 } 311 312 /* 313 * Like delete_from_page_cache, but substitutes swap for page. 314 */ 315 static void shmem_delete_from_page_cache(struct page *page, void *radswap) 316 { 317 struct address_space *mapping = page->mapping; 318 int error; 319 320 spin_lock_irq(&mapping->tree_lock); 321 error = shmem_radix_tree_replace(mapping, page->index, page, radswap); 322 page->mapping = NULL; 323 mapping->nrpages--; 324 __dec_zone_page_state(page, NR_FILE_PAGES); 325 __dec_zone_page_state(page, NR_SHMEM); 326 spin_unlock_irq(&mapping->tree_lock); 327 page_cache_release(page); 328 BUG_ON(error); 329 } 330 331 /* 332 * Remove swap entry from radix tree, free the swap and its page cache. 333 */ 334 static int shmem_free_swap(struct address_space *mapping, 335 pgoff_t index, void *radswap) 336 { 337 void *old; 338 339 spin_lock_irq(&mapping->tree_lock); 340 old = radix_tree_delete_item(&mapping->page_tree, index, radswap); 341 spin_unlock_irq(&mapping->tree_lock); 342 if (old != radswap) 343 return -ENOENT; 344 free_swap_and_cache(radix_to_swp_entry(radswap)); 345 return 0; 346 } 347 348 /* 349 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 350 */ 351 void shmem_unlock_mapping(struct address_space *mapping) 352 { 353 struct pagevec pvec; 354 pgoff_t indices[PAGEVEC_SIZE]; 355 pgoff_t index = 0; 356 357 pagevec_init(&pvec, 0); 358 /* 359 * Minor point, but we might as well stop if someone else SHM_LOCKs it. 360 */ 361 while (!mapping_unevictable(mapping)) { 362 /* 363 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it 364 * has finished, if it hits a row of PAGEVEC_SIZE swap entries. 365 */ 366 pvec.nr = find_get_entries(mapping, index, 367 PAGEVEC_SIZE, pvec.pages, indices); 368 if (!pvec.nr) 369 break; 370 index = indices[pvec.nr - 1] + 1; 371 pagevec_remove_exceptionals(&pvec); 372 check_move_unevictable_pages(pvec.pages, pvec.nr); 373 pagevec_release(&pvec); 374 cond_resched(); 375 } 376 } 377 378 /* 379 * Remove range of pages and swap entries from radix tree, and free them. 380 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. 381 */ 382 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, 383 bool unfalloc) 384 { 385 struct address_space *mapping = inode->i_mapping; 386 struct shmem_inode_info *info = SHMEM_I(inode); 387 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 388 pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT; 389 unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1); 390 unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1); 391 struct pagevec pvec; 392 pgoff_t indices[PAGEVEC_SIZE]; 393 long nr_swaps_freed = 0; 394 pgoff_t index; 395 int i; 396 397 if (lend == -1) 398 end = -1; /* unsigned, so actually very big */ 399 400 pagevec_init(&pvec, 0); 401 index = start; 402 while (index < end) { 403 pvec.nr = find_get_entries(mapping, index, 404 min(end - index, (pgoff_t)PAGEVEC_SIZE), 405 pvec.pages, indices); 406 if (!pvec.nr) 407 break; 408 mem_cgroup_uncharge_start(); 409 for (i = 0; i < pagevec_count(&pvec); i++) { 410 struct page *page = pvec.pages[i]; 411 412 index = indices[i]; 413 if (index >= end) 414 break; 415 416 if (radix_tree_exceptional_entry(page)) { 417 if (unfalloc) 418 continue; 419 nr_swaps_freed += !shmem_free_swap(mapping, 420 index, page); 421 continue; 422 } 423 424 if (!trylock_page(page)) 425 continue; 426 if (!unfalloc || !PageUptodate(page)) { 427 if (page->mapping == mapping) { 428 VM_BUG_ON_PAGE(PageWriteback(page), page); 429 truncate_inode_page(mapping, page); 430 } 431 } 432 unlock_page(page); 433 } 434 pagevec_remove_exceptionals(&pvec); 435 pagevec_release(&pvec); 436 mem_cgroup_uncharge_end(); 437 cond_resched(); 438 index++; 439 } 440 441 if (partial_start) { 442 struct page *page = NULL; 443 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL); 444 if (page) { 445 unsigned int top = PAGE_CACHE_SIZE; 446 if (start > end) { 447 top = partial_end; 448 partial_end = 0; 449 } 450 zero_user_segment(page, partial_start, top); 451 set_page_dirty(page); 452 unlock_page(page); 453 page_cache_release(page); 454 } 455 } 456 if (partial_end) { 457 struct page *page = NULL; 458 shmem_getpage(inode, end, &page, SGP_READ, NULL); 459 if (page) { 460 zero_user_segment(page, 0, partial_end); 461 set_page_dirty(page); 462 unlock_page(page); 463 page_cache_release(page); 464 } 465 } 466 if (start >= end) 467 return; 468 469 index = start; 470 for ( ; ; ) { 471 cond_resched(); 472 473 pvec.nr = find_get_entries(mapping, index, 474 min(end - index, (pgoff_t)PAGEVEC_SIZE), 475 pvec.pages, indices); 476 if (!pvec.nr) { 477 if (index == start || unfalloc) 478 break; 479 index = start; 480 continue; 481 } 482 if ((index == start || unfalloc) && indices[0] >= end) { 483 pagevec_remove_exceptionals(&pvec); 484 pagevec_release(&pvec); 485 break; 486 } 487 mem_cgroup_uncharge_start(); 488 for (i = 0; i < pagevec_count(&pvec); i++) { 489 struct page *page = pvec.pages[i]; 490 491 index = indices[i]; 492 if (index >= end) 493 break; 494 495 if (radix_tree_exceptional_entry(page)) { 496 if (unfalloc) 497 continue; 498 nr_swaps_freed += !shmem_free_swap(mapping, 499 index, page); 500 continue; 501 } 502 503 lock_page(page); 504 if (!unfalloc || !PageUptodate(page)) { 505 if (page->mapping == mapping) { 506 VM_BUG_ON_PAGE(PageWriteback(page), page); 507 truncate_inode_page(mapping, page); 508 } 509 } 510 unlock_page(page); 511 } 512 pagevec_remove_exceptionals(&pvec); 513 pagevec_release(&pvec); 514 mem_cgroup_uncharge_end(); 515 index++; 516 } 517 518 spin_lock(&info->lock); 519 info->swapped -= nr_swaps_freed; 520 shmem_recalc_inode(inode); 521 spin_unlock(&info->lock); 522 } 523 524 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 525 { 526 shmem_undo_range(inode, lstart, lend, false); 527 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 528 } 529 EXPORT_SYMBOL_GPL(shmem_truncate_range); 530 531 static int shmem_setattr(struct dentry *dentry, struct iattr *attr) 532 { 533 struct inode *inode = dentry->d_inode; 534 int error; 535 536 error = inode_change_ok(inode, attr); 537 if (error) 538 return error; 539 540 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 541 loff_t oldsize = inode->i_size; 542 loff_t newsize = attr->ia_size; 543 544 if (newsize != oldsize) { 545 i_size_write(inode, newsize); 546 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 547 } 548 if (newsize < oldsize) { 549 loff_t holebegin = round_up(newsize, PAGE_SIZE); 550 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); 551 shmem_truncate_range(inode, newsize, (loff_t)-1); 552 /* unmap again to remove racily COWed private pages */ 553 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); 554 } 555 } 556 557 setattr_copy(inode, attr); 558 if (attr->ia_valid & ATTR_MODE) 559 error = posix_acl_chmod(inode, inode->i_mode); 560 return error; 561 } 562 563 static void shmem_evict_inode(struct inode *inode) 564 { 565 struct shmem_inode_info *info = SHMEM_I(inode); 566 567 if (inode->i_mapping->a_ops == &shmem_aops) { 568 shmem_unacct_size(info->flags, inode->i_size); 569 inode->i_size = 0; 570 shmem_truncate_range(inode, 0, (loff_t)-1); 571 if (!list_empty(&info->swaplist)) { 572 mutex_lock(&shmem_swaplist_mutex); 573 list_del_init(&info->swaplist); 574 mutex_unlock(&shmem_swaplist_mutex); 575 } 576 } else 577 kfree(info->symlink); 578 579 simple_xattrs_free(&info->xattrs); 580 WARN_ON(inode->i_blocks); 581 shmem_free_inode(inode->i_sb); 582 clear_inode(inode); 583 } 584 585 /* 586 * If swap found in inode, free it and move page from swapcache to filecache. 587 */ 588 static int shmem_unuse_inode(struct shmem_inode_info *info, 589 swp_entry_t swap, struct page **pagep) 590 { 591 struct address_space *mapping = info->vfs_inode.i_mapping; 592 void *radswap; 593 pgoff_t index; 594 gfp_t gfp; 595 int error = 0; 596 597 radswap = swp_to_radix_entry(swap); 598 index = radix_tree_locate_item(&mapping->page_tree, radswap); 599 if (index == -1) 600 return 0; 601 602 /* 603 * Move _head_ to start search for next from here. 604 * But be careful: shmem_evict_inode checks list_empty without taking 605 * mutex, and there's an instant in list_move_tail when info->swaplist 606 * would appear empty, if it were the only one on shmem_swaplist. 607 */ 608 if (shmem_swaplist.next != &info->swaplist) 609 list_move_tail(&shmem_swaplist, &info->swaplist); 610 611 gfp = mapping_gfp_mask(mapping); 612 if (shmem_should_replace_page(*pagep, gfp)) { 613 mutex_unlock(&shmem_swaplist_mutex); 614 error = shmem_replace_page(pagep, gfp, info, index); 615 mutex_lock(&shmem_swaplist_mutex); 616 /* 617 * We needed to drop mutex to make that restrictive page 618 * allocation, but the inode might have been freed while we 619 * dropped it: although a racing shmem_evict_inode() cannot 620 * complete without emptying the radix_tree, our page lock 621 * on this swapcache page is not enough to prevent that - 622 * free_swap_and_cache() of our swap entry will only 623 * trylock_page(), removing swap from radix_tree whatever. 624 * 625 * We must not proceed to shmem_add_to_page_cache() if the 626 * inode has been freed, but of course we cannot rely on 627 * inode or mapping or info to check that. However, we can 628 * safely check if our swap entry is still in use (and here 629 * it can't have got reused for another page): if it's still 630 * in use, then the inode cannot have been freed yet, and we 631 * can safely proceed (if it's no longer in use, that tells 632 * nothing about the inode, but we don't need to unuse swap). 633 */ 634 if (!page_swapcount(*pagep)) 635 error = -ENOENT; 636 } 637 638 /* 639 * We rely on shmem_swaplist_mutex, not only to protect the swaplist, 640 * but also to hold up shmem_evict_inode(): so inode cannot be freed 641 * beneath us (pagelock doesn't help until the page is in pagecache). 642 */ 643 if (!error) 644 error = shmem_add_to_page_cache(*pagep, mapping, index, 645 GFP_NOWAIT, radswap); 646 if (error != -ENOMEM) { 647 /* 648 * Truncation and eviction use free_swap_and_cache(), which 649 * only does trylock page: if we raced, best clean up here. 650 */ 651 delete_from_swap_cache(*pagep); 652 set_page_dirty(*pagep); 653 if (!error) { 654 spin_lock(&info->lock); 655 info->swapped--; 656 spin_unlock(&info->lock); 657 swap_free(swap); 658 } 659 error = 1; /* not an error, but entry was found */ 660 } 661 return error; 662 } 663 664 /* 665 * Search through swapped inodes to find and replace swap by page. 666 */ 667 int shmem_unuse(swp_entry_t swap, struct page *page) 668 { 669 struct list_head *this, *next; 670 struct shmem_inode_info *info; 671 int found = 0; 672 int error = 0; 673 674 /* 675 * There's a faint possibility that swap page was replaced before 676 * caller locked it: caller will come back later with the right page. 677 */ 678 if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val)) 679 goto out; 680 681 /* 682 * Charge page using GFP_KERNEL while we can wait, before taking 683 * the shmem_swaplist_mutex which might hold up shmem_writepage(). 684 * Charged back to the user (not to caller) when swap account is used. 685 */ 686 error = mem_cgroup_charge_file(page, current->mm, GFP_KERNEL); 687 if (error) 688 goto out; 689 /* No radix_tree_preload: swap entry keeps a place for page in tree */ 690 691 mutex_lock(&shmem_swaplist_mutex); 692 list_for_each_safe(this, next, &shmem_swaplist) { 693 info = list_entry(this, struct shmem_inode_info, swaplist); 694 if (info->swapped) 695 found = shmem_unuse_inode(info, swap, &page); 696 else 697 list_del_init(&info->swaplist); 698 cond_resched(); 699 if (found) 700 break; 701 } 702 mutex_unlock(&shmem_swaplist_mutex); 703 704 if (found < 0) 705 error = found; 706 out: 707 unlock_page(page); 708 page_cache_release(page); 709 return error; 710 } 711 712 /* 713 * Move the page from the page cache to the swap cache. 714 */ 715 static int shmem_writepage(struct page *page, struct writeback_control *wbc) 716 { 717 struct shmem_inode_info *info; 718 struct address_space *mapping; 719 struct inode *inode; 720 swp_entry_t swap; 721 pgoff_t index; 722 723 BUG_ON(!PageLocked(page)); 724 mapping = page->mapping; 725 index = page->index; 726 inode = mapping->host; 727 info = SHMEM_I(inode); 728 if (info->flags & VM_LOCKED) 729 goto redirty; 730 if (!total_swap_pages) 731 goto redirty; 732 733 /* 734 * shmem_backing_dev_info's capabilities prevent regular writeback or 735 * sync from ever calling shmem_writepage; but a stacking filesystem 736 * might use ->writepage of its underlying filesystem, in which case 737 * tmpfs should write out to swap only in response to memory pressure, 738 * and not for the writeback threads or sync. 739 */ 740 if (!wbc->for_reclaim) { 741 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 742 goto redirty; 743 } 744 745 /* 746 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC 747 * value into swapfile.c, the only way we can correctly account for a 748 * fallocated page arriving here is now to initialize it and write it. 749 * 750 * That's okay for a page already fallocated earlier, but if we have 751 * not yet completed the fallocation, then (a) we want to keep track 752 * of this page in case we have to undo it, and (b) it may not be a 753 * good idea to continue anyway, once we're pushing into swap. So 754 * reactivate the page, and let shmem_fallocate() quit when too many. 755 */ 756 if (!PageUptodate(page)) { 757 if (inode->i_private) { 758 struct shmem_falloc *shmem_falloc; 759 spin_lock(&inode->i_lock); 760 shmem_falloc = inode->i_private; 761 if (shmem_falloc && 762 index >= shmem_falloc->start && 763 index < shmem_falloc->next) 764 shmem_falloc->nr_unswapped++; 765 else 766 shmem_falloc = NULL; 767 spin_unlock(&inode->i_lock); 768 if (shmem_falloc) 769 goto redirty; 770 } 771 clear_highpage(page); 772 flush_dcache_page(page); 773 SetPageUptodate(page); 774 } 775 776 swap = get_swap_page(); 777 if (!swap.val) 778 goto redirty; 779 780 /* 781 * Add inode to shmem_unuse()'s list of swapped-out inodes, 782 * if it's not already there. Do it now before the page is 783 * moved to swap cache, when its pagelock no longer protects 784 * the inode from eviction. But don't unlock the mutex until 785 * we've incremented swapped, because shmem_unuse_inode() will 786 * prune a !swapped inode from the swaplist under this mutex. 787 */ 788 mutex_lock(&shmem_swaplist_mutex); 789 if (list_empty(&info->swaplist)) 790 list_add_tail(&info->swaplist, &shmem_swaplist); 791 792 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { 793 swap_shmem_alloc(swap); 794 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); 795 796 spin_lock(&info->lock); 797 info->swapped++; 798 shmem_recalc_inode(inode); 799 spin_unlock(&info->lock); 800 801 mutex_unlock(&shmem_swaplist_mutex); 802 BUG_ON(page_mapped(page)); 803 swap_writepage(page, wbc); 804 return 0; 805 } 806 807 mutex_unlock(&shmem_swaplist_mutex); 808 swapcache_free(swap, NULL); 809 redirty: 810 set_page_dirty(page); 811 if (wbc->for_reclaim) 812 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ 813 unlock_page(page); 814 return 0; 815 } 816 817 #ifdef CONFIG_NUMA 818 #ifdef CONFIG_TMPFS 819 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 820 { 821 char buffer[64]; 822 823 if (!mpol || mpol->mode == MPOL_DEFAULT) 824 return; /* show nothing */ 825 826 mpol_to_str(buffer, sizeof(buffer), mpol); 827 828 seq_printf(seq, ",mpol=%s", buffer); 829 } 830 831 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 832 { 833 struct mempolicy *mpol = NULL; 834 if (sbinfo->mpol) { 835 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 836 mpol = sbinfo->mpol; 837 mpol_get(mpol); 838 spin_unlock(&sbinfo->stat_lock); 839 } 840 return mpol; 841 } 842 #endif /* CONFIG_TMPFS */ 843 844 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 845 struct shmem_inode_info *info, pgoff_t index) 846 { 847 struct vm_area_struct pvma; 848 struct page *page; 849 850 /* Create a pseudo vma that just contains the policy */ 851 pvma.vm_start = 0; 852 /* Bias interleave by inode number to distribute better across nodes */ 853 pvma.vm_pgoff = index + info->vfs_inode.i_ino; 854 pvma.vm_ops = NULL; 855 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); 856 857 page = swapin_readahead(swap, gfp, &pvma, 0); 858 859 /* Drop reference taken by mpol_shared_policy_lookup() */ 860 mpol_cond_put(pvma.vm_policy); 861 862 return page; 863 } 864 865 static struct page *shmem_alloc_page(gfp_t gfp, 866 struct shmem_inode_info *info, pgoff_t index) 867 { 868 struct vm_area_struct pvma; 869 struct page *page; 870 871 /* Create a pseudo vma that just contains the policy */ 872 pvma.vm_start = 0; 873 /* Bias interleave by inode number to distribute better across nodes */ 874 pvma.vm_pgoff = index + info->vfs_inode.i_ino; 875 pvma.vm_ops = NULL; 876 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); 877 878 page = alloc_page_vma(gfp, &pvma, 0); 879 880 /* Drop reference taken by mpol_shared_policy_lookup() */ 881 mpol_cond_put(pvma.vm_policy); 882 883 return page; 884 } 885 #else /* !CONFIG_NUMA */ 886 #ifdef CONFIG_TMPFS 887 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 888 { 889 } 890 #endif /* CONFIG_TMPFS */ 891 892 static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 893 struct shmem_inode_info *info, pgoff_t index) 894 { 895 return swapin_readahead(swap, gfp, NULL, 0); 896 } 897 898 static inline struct page *shmem_alloc_page(gfp_t gfp, 899 struct shmem_inode_info *info, pgoff_t index) 900 { 901 return alloc_page(gfp); 902 } 903 #endif /* CONFIG_NUMA */ 904 905 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS) 906 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 907 { 908 return NULL; 909 } 910 #endif 911 912 /* 913 * When a page is moved from swapcache to shmem filecache (either by the 914 * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of 915 * shmem_unuse_inode()), it may have been read in earlier from swap, in 916 * ignorance of the mapping it belongs to. If that mapping has special 917 * constraints (like the gma500 GEM driver, which requires RAM below 4GB), 918 * we may need to copy to a suitable page before moving to filecache. 919 * 920 * In a future release, this may well be extended to respect cpuset and 921 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); 922 * but for now it is a simple matter of zone. 923 */ 924 static bool shmem_should_replace_page(struct page *page, gfp_t gfp) 925 { 926 return page_zonenum(page) > gfp_zone(gfp); 927 } 928 929 static int shmem_replace_page(struct page **pagep, gfp_t gfp, 930 struct shmem_inode_info *info, pgoff_t index) 931 { 932 struct page *oldpage, *newpage; 933 struct address_space *swap_mapping; 934 pgoff_t swap_index; 935 int error; 936 937 oldpage = *pagep; 938 swap_index = page_private(oldpage); 939 swap_mapping = page_mapping(oldpage); 940 941 /* 942 * We have arrived here because our zones are constrained, so don't 943 * limit chance of success by further cpuset and node constraints. 944 */ 945 gfp &= ~GFP_CONSTRAINT_MASK; 946 newpage = shmem_alloc_page(gfp, info, index); 947 if (!newpage) 948 return -ENOMEM; 949 950 page_cache_get(newpage); 951 copy_highpage(newpage, oldpage); 952 flush_dcache_page(newpage); 953 954 __set_page_locked(newpage); 955 SetPageUptodate(newpage); 956 SetPageSwapBacked(newpage); 957 set_page_private(newpage, swap_index); 958 SetPageSwapCache(newpage); 959 960 /* 961 * Our caller will very soon move newpage out of swapcache, but it's 962 * a nice clean interface for us to replace oldpage by newpage there. 963 */ 964 spin_lock_irq(&swap_mapping->tree_lock); 965 error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, 966 newpage); 967 if (!error) { 968 __inc_zone_page_state(newpage, NR_FILE_PAGES); 969 __dec_zone_page_state(oldpage, NR_FILE_PAGES); 970 } 971 spin_unlock_irq(&swap_mapping->tree_lock); 972 973 if (unlikely(error)) { 974 /* 975 * Is this possible? I think not, now that our callers check 976 * both PageSwapCache and page_private after getting page lock; 977 * but be defensive. Reverse old to newpage for clear and free. 978 */ 979 oldpage = newpage; 980 } else { 981 mem_cgroup_replace_page_cache(oldpage, newpage); 982 lru_cache_add_anon(newpage); 983 *pagep = newpage; 984 } 985 986 ClearPageSwapCache(oldpage); 987 set_page_private(oldpage, 0); 988 989 unlock_page(oldpage); 990 page_cache_release(oldpage); 991 page_cache_release(oldpage); 992 return error; 993 } 994 995 /* 996 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate 997 * 998 * If we allocate a new one we do not mark it dirty. That's up to the 999 * vm. If we swap it in we mark it dirty since we also free the swap 1000 * entry since a page cannot live in both the swap and page cache 1001 */ 1002 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 1003 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type) 1004 { 1005 struct address_space *mapping = inode->i_mapping; 1006 struct shmem_inode_info *info; 1007 struct shmem_sb_info *sbinfo; 1008 struct page *page; 1009 swp_entry_t swap; 1010 int error; 1011 int once = 0; 1012 int alloced = 0; 1013 1014 if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT)) 1015 return -EFBIG; 1016 repeat: 1017 swap.val = 0; 1018 page = find_lock_entry(mapping, index); 1019 if (radix_tree_exceptional_entry(page)) { 1020 swap = radix_to_swp_entry(page); 1021 page = NULL; 1022 } 1023 1024 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1025 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1026 error = -EINVAL; 1027 goto failed; 1028 } 1029 1030 /* fallocated page? */ 1031 if (page && !PageUptodate(page)) { 1032 if (sgp != SGP_READ) 1033 goto clear; 1034 unlock_page(page); 1035 page_cache_release(page); 1036 page = NULL; 1037 } 1038 if (page || (sgp == SGP_READ && !swap.val)) { 1039 *pagep = page; 1040 return 0; 1041 } 1042 1043 /* 1044 * Fast cache lookup did not find it: 1045 * bring it back from swap or allocate. 1046 */ 1047 info = SHMEM_I(inode); 1048 sbinfo = SHMEM_SB(inode->i_sb); 1049 1050 if (swap.val) { 1051 /* Look it up and read it in.. */ 1052 page = lookup_swap_cache(swap); 1053 if (!page) { 1054 /* here we actually do the io */ 1055 if (fault_type) 1056 *fault_type |= VM_FAULT_MAJOR; 1057 page = shmem_swapin(swap, gfp, info, index); 1058 if (!page) { 1059 error = -ENOMEM; 1060 goto failed; 1061 } 1062 } 1063 1064 /* We have to do this with page locked to prevent races */ 1065 lock_page(page); 1066 if (!PageSwapCache(page) || page_private(page) != swap.val || 1067 !shmem_confirm_swap(mapping, index, swap)) { 1068 error = -EEXIST; /* try again */ 1069 goto unlock; 1070 } 1071 if (!PageUptodate(page)) { 1072 error = -EIO; 1073 goto failed; 1074 } 1075 wait_on_page_writeback(page); 1076 1077 if (shmem_should_replace_page(page, gfp)) { 1078 error = shmem_replace_page(&page, gfp, info, index); 1079 if (error) 1080 goto failed; 1081 } 1082 1083 error = mem_cgroup_charge_file(page, current->mm, 1084 gfp & GFP_RECLAIM_MASK); 1085 if (!error) { 1086 error = shmem_add_to_page_cache(page, mapping, index, 1087 gfp, swp_to_radix_entry(swap)); 1088 /* 1089 * We already confirmed swap under page lock, and make 1090 * no memory allocation here, so usually no possibility 1091 * of error; but free_swap_and_cache() only trylocks a 1092 * page, so it is just possible that the entry has been 1093 * truncated or holepunched since swap was confirmed. 1094 * shmem_undo_range() will have done some of the 1095 * unaccounting, now delete_from_swap_cache() will do 1096 * the rest (including mem_cgroup_uncharge_swapcache). 1097 * Reset swap.val? No, leave it so "failed" goes back to 1098 * "repeat": reading a hole and writing should succeed. 1099 */ 1100 if (error) 1101 delete_from_swap_cache(page); 1102 } 1103 if (error) 1104 goto failed; 1105 1106 spin_lock(&info->lock); 1107 info->swapped--; 1108 shmem_recalc_inode(inode); 1109 spin_unlock(&info->lock); 1110 1111 delete_from_swap_cache(page); 1112 set_page_dirty(page); 1113 swap_free(swap); 1114 1115 } else { 1116 if (shmem_acct_block(info->flags)) { 1117 error = -ENOSPC; 1118 goto failed; 1119 } 1120 if (sbinfo->max_blocks) { 1121 if (percpu_counter_compare(&sbinfo->used_blocks, 1122 sbinfo->max_blocks) >= 0) { 1123 error = -ENOSPC; 1124 goto unacct; 1125 } 1126 percpu_counter_inc(&sbinfo->used_blocks); 1127 } 1128 1129 page = shmem_alloc_page(gfp, info, index); 1130 if (!page) { 1131 error = -ENOMEM; 1132 goto decused; 1133 } 1134 1135 __SetPageSwapBacked(page); 1136 __set_page_locked(page); 1137 error = mem_cgroup_charge_file(page, current->mm, 1138 gfp & GFP_RECLAIM_MASK); 1139 if (error) 1140 goto decused; 1141 error = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK); 1142 if (!error) { 1143 error = shmem_add_to_page_cache(page, mapping, index, 1144 gfp, NULL); 1145 radix_tree_preload_end(); 1146 } 1147 if (error) { 1148 mem_cgroup_uncharge_cache_page(page); 1149 goto decused; 1150 } 1151 lru_cache_add_anon(page); 1152 1153 spin_lock(&info->lock); 1154 info->alloced++; 1155 inode->i_blocks += BLOCKS_PER_PAGE; 1156 shmem_recalc_inode(inode); 1157 spin_unlock(&info->lock); 1158 alloced = true; 1159 1160 /* 1161 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page. 1162 */ 1163 if (sgp == SGP_FALLOC) 1164 sgp = SGP_WRITE; 1165 clear: 1166 /* 1167 * Let SGP_WRITE caller clear ends if write does not fill page; 1168 * but SGP_FALLOC on a page fallocated earlier must initialize 1169 * it now, lest undo on failure cancel our earlier guarantee. 1170 */ 1171 if (sgp != SGP_WRITE) { 1172 clear_highpage(page); 1173 flush_dcache_page(page); 1174 SetPageUptodate(page); 1175 } 1176 if (sgp == SGP_DIRTY) 1177 set_page_dirty(page); 1178 } 1179 1180 /* Perhaps the file has been truncated since we checked */ 1181 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1182 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1183 error = -EINVAL; 1184 if (alloced) 1185 goto trunc; 1186 else 1187 goto failed; 1188 } 1189 *pagep = page; 1190 return 0; 1191 1192 /* 1193 * Error recovery. 1194 */ 1195 trunc: 1196 info = SHMEM_I(inode); 1197 ClearPageDirty(page); 1198 delete_from_page_cache(page); 1199 spin_lock(&info->lock); 1200 info->alloced--; 1201 inode->i_blocks -= BLOCKS_PER_PAGE; 1202 spin_unlock(&info->lock); 1203 decused: 1204 sbinfo = SHMEM_SB(inode->i_sb); 1205 if (sbinfo->max_blocks) 1206 percpu_counter_add(&sbinfo->used_blocks, -1); 1207 unacct: 1208 shmem_unacct_blocks(info->flags, 1); 1209 failed: 1210 if (swap.val && error != -EINVAL && 1211 !shmem_confirm_swap(mapping, index, swap)) 1212 error = -EEXIST; 1213 unlock: 1214 if (page) { 1215 unlock_page(page); 1216 page_cache_release(page); 1217 } 1218 if (error == -ENOSPC && !once++) { 1219 info = SHMEM_I(inode); 1220 spin_lock(&info->lock); 1221 shmem_recalc_inode(inode); 1222 spin_unlock(&info->lock); 1223 goto repeat; 1224 } 1225 if (error == -EEXIST) /* from above or from radix_tree_insert */ 1226 goto repeat; 1227 return error; 1228 } 1229 1230 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1231 { 1232 struct inode *inode = file_inode(vma->vm_file); 1233 int error; 1234 int ret = VM_FAULT_LOCKED; 1235 1236 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); 1237 if (error) 1238 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 1239 1240 if (ret & VM_FAULT_MAJOR) { 1241 count_vm_event(PGMAJFAULT); 1242 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 1243 } 1244 return ret; 1245 } 1246 1247 #ifdef CONFIG_NUMA 1248 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 1249 { 1250 struct inode *inode = file_inode(vma->vm_file); 1251 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 1252 } 1253 1254 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 1255 unsigned long addr) 1256 { 1257 struct inode *inode = file_inode(vma->vm_file); 1258 pgoff_t index; 1259 1260 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 1261 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 1262 } 1263 #endif 1264 1265 int shmem_lock(struct file *file, int lock, struct user_struct *user) 1266 { 1267 struct inode *inode = file_inode(file); 1268 struct shmem_inode_info *info = SHMEM_I(inode); 1269 int retval = -ENOMEM; 1270 1271 spin_lock(&info->lock); 1272 if (lock && !(info->flags & VM_LOCKED)) { 1273 if (!user_shm_lock(inode->i_size, user)) 1274 goto out_nomem; 1275 info->flags |= VM_LOCKED; 1276 mapping_set_unevictable(file->f_mapping); 1277 } 1278 if (!lock && (info->flags & VM_LOCKED) && user) { 1279 user_shm_unlock(inode->i_size, user); 1280 info->flags &= ~VM_LOCKED; 1281 mapping_clear_unevictable(file->f_mapping); 1282 } 1283 retval = 0; 1284 1285 out_nomem: 1286 spin_unlock(&info->lock); 1287 return retval; 1288 } 1289 1290 static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 1291 { 1292 file_accessed(file); 1293 vma->vm_ops = &shmem_vm_ops; 1294 return 0; 1295 } 1296 1297 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, 1298 umode_t mode, dev_t dev, unsigned long flags) 1299 { 1300 struct inode *inode; 1301 struct shmem_inode_info *info; 1302 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 1303 1304 if (shmem_reserve_inode(sb)) 1305 return NULL; 1306 1307 inode = new_inode(sb); 1308 if (inode) { 1309 inode->i_ino = get_next_ino(); 1310 inode_init_owner(inode, dir, mode); 1311 inode->i_blocks = 0; 1312 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; 1313 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1314 inode->i_generation = get_seconds(); 1315 info = SHMEM_I(inode); 1316 memset(info, 0, (char *)inode - (char *)info); 1317 spin_lock_init(&info->lock); 1318 info->flags = flags & VM_NORESERVE; 1319 INIT_LIST_HEAD(&info->swaplist); 1320 simple_xattrs_init(&info->xattrs); 1321 cache_no_acl(inode); 1322 1323 switch (mode & S_IFMT) { 1324 default: 1325 inode->i_op = &shmem_special_inode_operations; 1326 init_special_inode(inode, mode, dev); 1327 break; 1328 case S_IFREG: 1329 inode->i_mapping->a_ops = &shmem_aops; 1330 inode->i_op = &shmem_inode_operations; 1331 inode->i_fop = &shmem_file_operations; 1332 mpol_shared_policy_init(&info->policy, 1333 shmem_get_sbmpol(sbinfo)); 1334 break; 1335 case S_IFDIR: 1336 inc_nlink(inode); 1337 /* Some things misbehave if size == 0 on a directory */ 1338 inode->i_size = 2 * BOGO_DIRENT_SIZE; 1339 inode->i_op = &shmem_dir_inode_operations; 1340 inode->i_fop = &simple_dir_operations; 1341 break; 1342 case S_IFLNK: 1343 /* 1344 * Must not load anything in the rbtree, 1345 * mpol_free_shared_policy will not be called. 1346 */ 1347 mpol_shared_policy_init(&info->policy, NULL); 1348 break; 1349 } 1350 } else 1351 shmem_free_inode(sb); 1352 return inode; 1353 } 1354 1355 bool shmem_mapping(struct address_space *mapping) 1356 { 1357 return mapping->backing_dev_info == &shmem_backing_dev_info; 1358 } 1359 1360 #ifdef CONFIG_TMPFS 1361 static const struct inode_operations shmem_symlink_inode_operations; 1362 static const struct inode_operations shmem_short_symlink_operations; 1363 1364 #ifdef CONFIG_TMPFS_XATTR 1365 static int shmem_initxattrs(struct inode *, const struct xattr *, void *); 1366 #else 1367 #define shmem_initxattrs NULL 1368 #endif 1369 1370 static int 1371 shmem_write_begin(struct file *file, struct address_space *mapping, 1372 loff_t pos, unsigned len, unsigned flags, 1373 struct page **pagep, void **fsdata) 1374 { 1375 int ret; 1376 struct inode *inode = mapping->host; 1377 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1378 ret = shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); 1379 if (ret == 0 && *pagep) 1380 init_page_accessed(*pagep); 1381 return ret; 1382 } 1383 1384 static int 1385 shmem_write_end(struct file *file, struct address_space *mapping, 1386 loff_t pos, unsigned len, unsigned copied, 1387 struct page *page, void *fsdata) 1388 { 1389 struct inode *inode = mapping->host; 1390 1391 if (pos + copied > inode->i_size) 1392 i_size_write(inode, pos + copied); 1393 1394 if (!PageUptodate(page)) { 1395 if (copied < PAGE_CACHE_SIZE) { 1396 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1397 zero_user_segments(page, 0, from, 1398 from + copied, PAGE_CACHE_SIZE); 1399 } 1400 SetPageUptodate(page); 1401 } 1402 set_page_dirty(page); 1403 unlock_page(page); 1404 page_cache_release(page); 1405 1406 return copied; 1407 } 1408 1409 static ssize_t shmem_file_aio_read(struct kiocb *iocb, 1410 const struct iovec *iov, unsigned long nr_segs, loff_t pos) 1411 { 1412 struct file *file = iocb->ki_filp; 1413 struct inode *inode = file_inode(file); 1414 struct address_space *mapping = inode->i_mapping; 1415 pgoff_t index; 1416 unsigned long offset; 1417 enum sgp_type sgp = SGP_READ; 1418 int error = 0; 1419 ssize_t retval; 1420 size_t count; 1421 loff_t *ppos = &iocb->ki_pos; 1422 struct iov_iter iter; 1423 1424 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); 1425 if (retval) 1426 return retval; 1427 iov_iter_init(&iter, iov, nr_segs, count, 0); 1428 1429 /* 1430 * Might this read be for a stacking filesystem? Then when reading 1431 * holes of a sparse file, we actually need to allocate those pages, 1432 * and even mark them dirty, so it cannot exceed the max_blocks limit. 1433 */ 1434 if (segment_eq(get_fs(), KERNEL_DS)) 1435 sgp = SGP_DIRTY; 1436 1437 index = *ppos >> PAGE_CACHE_SHIFT; 1438 offset = *ppos & ~PAGE_CACHE_MASK; 1439 1440 for (;;) { 1441 struct page *page = NULL; 1442 pgoff_t end_index; 1443 unsigned long nr, ret; 1444 loff_t i_size = i_size_read(inode); 1445 1446 end_index = i_size >> PAGE_CACHE_SHIFT; 1447 if (index > end_index) 1448 break; 1449 if (index == end_index) { 1450 nr = i_size & ~PAGE_CACHE_MASK; 1451 if (nr <= offset) 1452 break; 1453 } 1454 1455 error = shmem_getpage(inode, index, &page, sgp, NULL); 1456 if (error) { 1457 if (error == -EINVAL) 1458 error = 0; 1459 break; 1460 } 1461 if (page) 1462 unlock_page(page); 1463 1464 /* 1465 * We must evaluate after, since reads (unlike writes) 1466 * are called without i_mutex protection against truncate 1467 */ 1468 nr = PAGE_CACHE_SIZE; 1469 i_size = i_size_read(inode); 1470 end_index = i_size >> PAGE_CACHE_SHIFT; 1471 if (index == end_index) { 1472 nr = i_size & ~PAGE_CACHE_MASK; 1473 if (nr <= offset) { 1474 if (page) 1475 page_cache_release(page); 1476 break; 1477 } 1478 } 1479 nr -= offset; 1480 1481 if (page) { 1482 /* 1483 * If users can be writing to this page using arbitrary 1484 * virtual addresses, take care about potential aliasing 1485 * before reading the page on the kernel side. 1486 */ 1487 if (mapping_writably_mapped(mapping)) 1488 flush_dcache_page(page); 1489 /* 1490 * Mark the page accessed if we read the beginning. 1491 */ 1492 if (!offset) 1493 mark_page_accessed(page); 1494 } else { 1495 page = ZERO_PAGE(0); 1496 page_cache_get(page); 1497 } 1498 1499 /* 1500 * Ok, we have the page, and it's up-to-date, so 1501 * now we can copy it to user space... 1502 */ 1503 ret = copy_page_to_iter(page, offset, nr, &iter); 1504 retval += ret; 1505 offset += ret; 1506 index += offset >> PAGE_CACHE_SHIFT; 1507 offset &= ~PAGE_CACHE_MASK; 1508 1509 page_cache_release(page); 1510 if (!iov_iter_count(&iter)) 1511 break; 1512 if (ret < nr) { 1513 error = -EFAULT; 1514 break; 1515 } 1516 cond_resched(); 1517 } 1518 1519 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 1520 file_accessed(file); 1521 return retval ? retval : error; 1522 } 1523 1524 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, 1525 struct pipe_inode_info *pipe, size_t len, 1526 unsigned int flags) 1527 { 1528 struct address_space *mapping = in->f_mapping; 1529 struct inode *inode = mapping->host; 1530 unsigned int loff, nr_pages, req_pages; 1531 struct page *pages[PIPE_DEF_BUFFERS]; 1532 struct partial_page partial[PIPE_DEF_BUFFERS]; 1533 struct page *page; 1534 pgoff_t index, end_index; 1535 loff_t isize, left; 1536 int error, page_nr; 1537 struct splice_pipe_desc spd = { 1538 .pages = pages, 1539 .partial = partial, 1540 .nr_pages_max = PIPE_DEF_BUFFERS, 1541 .flags = flags, 1542 .ops = &page_cache_pipe_buf_ops, 1543 .spd_release = spd_release_page, 1544 }; 1545 1546 isize = i_size_read(inode); 1547 if (unlikely(*ppos >= isize)) 1548 return 0; 1549 1550 left = isize - *ppos; 1551 if (unlikely(left < len)) 1552 len = left; 1553 1554 if (splice_grow_spd(pipe, &spd)) 1555 return -ENOMEM; 1556 1557 index = *ppos >> PAGE_CACHE_SHIFT; 1558 loff = *ppos & ~PAGE_CACHE_MASK; 1559 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1560 nr_pages = min(req_pages, spd.nr_pages_max); 1561 1562 spd.nr_pages = find_get_pages_contig(mapping, index, 1563 nr_pages, spd.pages); 1564 index += spd.nr_pages; 1565 error = 0; 1566 1567 while (spd.nr_pages < nr_pages) { 1568 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL); 1569 if (error) 1570 break; 1571 unlock_page(page); 1572 spd.pages[spd.nr_pages++] = page; 1573 index++; 1574 } 1575 1576 index = *ppos >> PAGE_CACHE_SHIFT; 1577 nr_pages = spd.nr_pages; 1578 spd.nr_pages = 0; 1579 1580 for (page_nr = 0; page_nr < nr_pages; page_nr++) { 1581 unsigned int this_len; 1582 1583 if (!len) 1584 break; 1585 1586 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); 1587 page = spd.pages[page_nr]; 1588 1589 if (!PageUptodate(page) || page->mapping != mapping) { 1590 error = shmem_getpage(inode, index, &page, 1591 SGP_CACHE, NULL); 1592 if (error) 1593 break; 1594 unlock_page(page); 1595 page_cache_release(spd.pages[page_nr]); 1596 spd.pages[page_nr] = page; 1597 } 1598 1599 isize = i_size_read(inode); 1600 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1601 if (unlikely(!isize || index > end_index)) 1602 break; 1603 1604 if (end_index == index) { 1605 unsigned int plen; 1606 1607 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1608 if (plen <= loff) 1609 break; 1610 1611 this_len = min(this_len, plen - loff); 1612 len = this_len; 1613 } 1614 1615 spd.partial[page_nr].offset = loff; 1616 spd.partial[page_nr].len = this_len; 1617 len -= this_len; 1618 loff = 0; 1619 spd.nr_pages++; 1620 index++; 1621 } 1622 1623 while (page_nr < nr_pages) 1624 page_cache_release(spd.pages[page_nr++]); 1625 1626 if (spd.nr_pages) 1627 error = splice_to_pipe(pipe, &spd); 1628 1629 splice_shrink_spd(&spd); 1630 1631 if (error > 0) { 1632 *ppos += error; 1633 file_accessed(in); 1634 } 1635 return error; 1636 } 1637 1638 /* 1639 * llseek SEEK_DATA or SEEK_HOLE through the radix_tree. 1640 */ 1641 static pgoff_t shmem_seek_hole_data(struct address_space *mapping, 1642 pgoff_t index, pgoff_t end, int whence) 1643 { 1644 struct page *page; 1645 struct pagevec pvec; 1646 pgoff_t indices[PAGEVEC_SIZE]; 1647 bool done = false; 1648 int i; 1649 1650 pagevec_init(&pvec, 0); 1651 pvec.nr = 1; /* start small: we may be there already */ 1652 while (!done) { 1653 pvec.nr = find_get_entries(mapping, index, 1654 pvec.nr, pvec.pages, indices); 1655 if (!pvec.nr) { 1656 if (whence == SEEK_DATA) 1657 index = end; 1658 break; 1659 } 1660 for (i = 0; i < pvec.nr; i++, index++) { 1661 if (index < indices[i]) { 1662 if (whence == SEEK_HOLE) { 1663 done = true; 1664 break; 1665 } 1666 index = indices[i]; 1667 } 1668 page = pvec.pages[i]; 1669 if (page && !radix_tree_exceptional_entry(page)) { 1670 if (!PageUptodate(page)) 1671 page = NULL; 1672 } 1673 if (index >= end || 1674 (page && whence == SEEK_DATA) || 1675 (!page && whence == SEEK_HOLE)) { 1676 done = true; 1677 break; 1678 } 1679 } 1680 pagevec_remove_exceptionals(&pvec); 1681 pagevec_release(&pvec); 1682 pvec.nr = PAGEVEC_SIZE; 1683 cond_resched(); 1684 } 1685 return index; 1686 } 1687 1688 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) 1689 { 1690 struct address_space *mapping = file->f_mapping; 1691 struct inode *inode = mapping->host; 1692 pgoff_t start, end; 1693 loff_t new_offset; 1694 1695 if (whence != SEEK_DATA && whence != SEEK_HOLE) 1696 return generic_file_llseek_size(file, offset, whence, 1697 MAX_LFS_FILESIZE, i_size_read(inode)); 1698 mutex_lock(&inode->i_mutex); 1699 /* We're holding i_mutex so we can access i_size directly */ 1700 1701 if (offset < 0) 1702 offset = -EINVAL; 1703 else if (offset >= inode->i_size) 1704 offset = -ENXIO; 1705 else { 1706 start = offset >> PAGE_CACHE_SHIFT; 1707 end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1708 new_offset = shmem_seek_hole_data(mapping, start, end, whence); 1709 new_offset <<= PAGE_CACHE_SHIFT; 1710 if (new_offset > offset) { 1711 if (new_offset < inode->i_size) 1712 offset = new_offset; 1713 else if (whence == SEEK_DATA) 1714 offset = -ENXIO; 1715 else 1716 offset = inode->i_size; 1717 } 1718 } 1719 1720 if (offset >= 0) 1721 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); 1722 mutex_unlock(&inode->i_mutex); 1723 return offset; 1724 } 1725 1726 static long shmem_fallocate(struct file *file, int mode, loff_t offset, 1727 loff_t len) 1728 { 1729 struct inode *inode = file_inode(file); 1730 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1731 struct shmem_falloc shmem_falloc; 1732 pgoff_t start, index, end; 1733 int error; 1734 1735 mutex_lock(&inode->i_mutex); 1736 1737 if (mode & FALLOC_FL_PUNCH_HOLE) { 1738 struct address_space *mapping = file->f_mapping; 1739 loff_t unmap_start = round_up(offset, PAGE_SIZE); 1740 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 1741 1742 if ((u64)unmap_end > (u64)unmap_start) 1743 unmap_mapping_range(mapping, unmap_start, 1744 1 + unmap_end - unmap_start, 0); 1745 shmem_truncate_range(inode, offset, offset + len - 1); 1746 /* No need to unmap again: hole-punching leaves COWed pages */ 1747 error = 0; 1748 goto out; 1749 } 1750 1751 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 1752 error = inode_newsize_ok(inode, offset + len); 1753 if (error) 1754 goto out; 1755 1756 start = offset >> PAGE_CACHE_SHIFT; 1757 end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1758 /* Try to avoid a swapstorm if len is impossible to satisfy */ 1759 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 1760 error = -ENOSPC; 1761 goto out; 1762 } 1763 1764 shmem_falloc.start = start; 1765 shmem_falloc.next = start; 1766 shmem_falloc.nr_falloced = 0; 1767 shmem_falloc.nr_unswapped = 0; 1768 spin_lock(&inode->i_lock); 1769 inode->i_private = &shmem_falloc; 1770 spin_unlock(&inode->i_lock); 1771 1772 for (index = start; index < end; index++) { 1773 struct page *page; 1774 1775 /* 1776 * Good, the fallocate(2) manpage permits EINTR: we may have 1777 * been interrupted because we are using up too much memory. 1778 */ 1779 if (signal_pending(current)) 1780 error = -EINTR; 1781 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) 1782 error = -ENOMEM; 1783 else 1784 error = shmem_getpage(inode, index, &page, SGP_FALLOC, 1785 NULL); 1786 if (error) { 1787 /* Remove the !PageUptodate pages we added */ 1788 shmem_undo_range(inode, 1789 (loff_t)start << PAGE_CACHE_SHIFT, 1790 (loff_t)index << PAGE_CACHE_SHIFT, true); 1791 goto undone; 1792 } 1793 1794 /* 1795 * Inform shmem_writepage() how far we have reached. 1796 * No need for lock or barrier: we have the page lock. 1797 */ 1798 shmem_falloc.next++; 1799 if (!PageUptodate(page)) 1800 shmem_falloc.nr_falloced++; 1801 1802 /* 1803 * If !PageUptodate, leave it that way so that freeable pages 1804 * can be recognized if we need to rollback on error later. 1805 * But set_page_dirty so that memory pressure will swap rather 1806 * than free the pages we are allocating (and SGP_CACHE pages 1807 * might still be clean: we now need to mark those dirty too). 1808 */ 1809 set_page_dirty(page); 1810 unlock_page(page); 1811 page_cache_release(page); 1812 cond_resched(); 1813 } 1814 1815 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 1816 i_size_write(inode, offset + len); 1817 inode->i_ctime = CURRENT_TIME; 1818 undone: 1819 spin_lock(&inode->i_lock); 1820 inode->i_private = NULL; 1821 spin_unlock(&inode->i_lock); 1822 out: 1823 mutex_unlock(&inode->i_mutex); 1824 return error; 1825 } 1826 1827 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 1828 { 1829 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 1830 1831 buf->f_type = TMPFS_MAGIC; 1832 buf->f_bsize = PAGE_CACHE_SIZE; 1833 buf->f_namelen = NAME_MAX; 1834 if (sbinfo->max_blocks) { 1835 buf->f_blocks = sbinfo->max_blocks; 1836 buf->f_bavail = 1837 buf->f_bfree = sbinfo->max_blocks - 1838 percpu_counter_sum(&sbinfo->used_blocks); 1839 } 1840 if (sbinfo->max_inodes) { 1841 buf->f_files = sbinfo->max_inodes; 1842 buf->f_ffree = sbinfo->free_inodes; 1843 } 1844 /* else leave those fields 0 like simple_statfs */ 1845 return 0; 1846 } 1847 1848 /* 1849 * File creation. Allocate an inode, and we're done.. 1850 */ 1851 static int 1852 shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) 1853 { 1854 struct inode *inode; 1855 int error = -ENOSPC; 1856 1857 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); 1858 if (inode) { 1859 error = simple_acl_create(dir, inode); 1860 if (error) 1861 goto out_iput; 1862 error = security_inode_init_security(inode, dir, 1863 &dentry->d_name, 1864 shmem_initxattrs, NULL); 1865 if (error && error != -EOPNOTSUPP) 1866 goto out_iput; 1867 1868 error = 0; 1869 dir->i_size += BOGO_DIRENT_SIZE; 1870 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1871 d_instantiate(dentry, inode); 1872 dget(dentry); /* Extra count - pin the dentry in core */ 1873 } 1874 return error; 1875 out_iput: 1876 iput(inode); 1877 return error; 1878 } 1879 1880 static int 1881 shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) 1882 { 1883 struct inode *inode; 1884 int error = -ENOSPC; 1885 1886 inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE); 1887 if (inode) { 1888 error = security_inode_init_security(inode, dir, 1889 NULL, 1890 shmem_initxattrs, NULL); 1891 if (error && error != -EOPNOTSUPP) 1892 goto out_iput; 1893 error = simple_acl_create(dir, inode); 1894 if (error) 1895 goto out_iput; 1896 d_tmpfile(dentry, inode); 1897 } 1898 return error; 1899 out_iput: 1900 iput(inode); 1901 return error; 1902 } 1903 1904 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 1905 { 1906 int error; 1907 1908 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) 1909 return error; 1910 inc_nlink(dir); 1911 return 0; 1912 } 1913 1914 static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode, 1915 bool excl) 1916 { 1917 return shmem_mknod(dir, dentry, mode | S_IFREG, 0); 1918 } 1919 1920 /* 1921 * Link a file.. 1922 */ 1923 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 1924 { 1925 struct inode *inode = old_dentry->d_inode; 1926 int ret; 1927 1928 /* 1929 * No ordinary (disk based) filesystem counts links as inodes; 1930 * but each new link needs a new dentry, pinning lowmem, and 1931 * tmpfs dentries cannot be pruned until they are unlinked. 1932 */ 1933 ret = shmem_reserve_inode(inode->i_sb); 1934 if (ret) 1935 goto out; 1936 1937 dir->i_size += BOGO_DIRENT_SIZE; 1938 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1939 inc_nlink(inode); 1940 ihold(inode); /* New dentry reference */ 1941 dget(dentry); /* Extra pinning count for the created dentry */ 1942 d_instantiate(dentry, inode); 1943 out: 1944 return ret; 1945 } 1946 1947 static int shmem_unlink(struct inode *dir, struct dentry *dentry) 1948 { 1949 struct inode *inode = dentry->d_inode; 1950 1951 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 1952 shmem_free_inode(inode->i_sb); 1953 1954 dir->i_size -= BOGO_DIRENT_SIZE; 1955 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1956 drop_nlink(inode); 1957 dput(dentry); /* Undo the count from "create" - this does all the work */ 1958 return 0; 1959 } 1960 1961 static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 1962 { 1963 if (!simple_empty(dentry)) 1964 return -ENOTEMPTY; 1965 1966 drop_nlink(dentry->d_inode); 1967 drop_nlink(dir); 1968 return shmem_unlink(dir, dentry); 1969 } 1970 1971 /* 1972 * The VFS layer already does all the dentry stuff for rename, 1973 * we just have to decrement the usage count for the target if 1974 * it exists so that the VFS layer correctly free's it when it 1975 * gets overwritten. 1976 */ 1977 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 1978 { 1979 struct inode *inode = old_dentry->d_inode; 1980 int they_are_dirs = S_ISDIR(inode->i_mode); 1981 1982 if (!simple_empty(new_dentry)) 1983 return -ENOTEMPTY; 1984 1985 if (new_dentry->d_inode) { 1986 (void) shmem_unlink(new_dir, new_dentry); 1987 if (they_are_dirs) 1988 drop_nlink(old_dir); 1989 } else if (they_are_dirs) { 1990 drop_nlink(old_dir); 1991 inc_nlink(new_dir); 1992 } 1993 1994 old_dir->i_size -= BOGO_DIRENT_SIZE; 1995 new_dir->i_size += BOGO_DIRENT_SIZE; 1996 old_dir->i_ctime = old_dir->i_mtime = 1997 new_dir->i_ctime = new_dir->i_mtime = 1998 inode->i_ctime = CURRENT_TIME; 1999 return 0; 2000 } 2001 2002 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) 2003 { 2004 int error; 2005 int len; 2006 struct inode *inode; 2007 struct page *page; 2008 char *kaddr; 2009 struct shmem_inode_info *info; 2010 2011 len = strlen(symname) + 1; 2012 if (len > PAGE_CACHE_SIZE) 2013 return -ENAMETOOLONG; 2014 2015 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); 2016 if (!inode) 2017 return -ENOSPC; 2018 2019 error = security_inode_init_security(inode, dir, &dentry->d_name, 2020 shmem_initxattrs, NULL); 2021 if (error) { 2022 if (error != -EOPNOTSUPP) { 2023 iput(inode); 2024 return error; 2025 } 2026 error = 0; 2027 } 2028 2029 info = SHMEM_I(inode); 2030 inode->i_size = len-1; 2031 if (len <= SHORT_SYMLINK_LEN) { 2032 info->symlink = kmemdup(symname, len, GFP_KERNEL); 2033 if (!info->symlink) { 2034 iput(inode); 2035 return -ENOMEM; 2036 } 2037 inode->i_op = &shmem_short_symlink_operations; 2038 } else { 2039 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); 2040 if (error) { 2041 iput(inode); 2042 return error; 2043 } 2044 inode->i_mapping->a_ops = &shmem_aops; 2045 inode->i_op = &shmem_symlink_inode_operations; 2046 kaddr = kmap_atomic(page); 2047 memcpy(kaddr, symname, len); 2048 kunmap_atomic(kaddr); 2049 SetPageUptodate(page); 2050 set_page_dirty(page); 2051 unlock_page(page); 2052 page_cache_release(page); 2053 } 2054 dir->i_size += BOGO_DIRENT_SIZE; 2055 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 2056 d_instantiate(dentry, inode); 2057 dget(dentry); 2058 return 0; 2059 } 2060 2061 static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd) 2062 { 2063 nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink); 2064 return NULL; 2065 } 2066 2067 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) 2068 { 2069 struct page *page = NULL; 2070 int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); 2071 nd_set_link(nd, error ? ERR_PTR(error) : kmap(page)); 2072 if (page) 2073 unlock_page(page); 2074 return page; 2075 } 2076 2077 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 2078 { 2079 if (!IS_ERR(nd_get_link(nd))) { 2080 struct page *page = cookie; 2081 kunmap(page); 2082 mark_page_accessed(page); 2083 page_cache_release(page); 2084 } 2085 } 2086 2087 #ifdef CONFIG_TMPFS_XATTR 2088 /* 2089 * Superblocks without xattr inode operations may get some security.* xattr 2090 * support from the LSM "for free". As soon as we have any other xattrs 2091 * like ACLs, we also need to implement the security.* handlers at 2092 * filesystem level, though. 2093 */ 2094 2095 /* 2096 * Callback for security_inode_init_security() for acquiring xattrs. 2097 */ 2098 static int shmem_initxattrs(struct inode *inode, 2099 const struct xattr *xattr_array, 2100 void *fs_info) 2101 { 2102 struct shmem_inode_info *info = SHMEM_I(inode); 2103 const struct xattr *xattr; 2104 struct simple_xattr *new_xattr; 2105 size_t len; 2106 2107 for (xattr = xattr_array; xattr->name != NULL; xattr++) { 2108 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); 2109 if (!new_xattr) 2110 return -ENOMEM; 2111 2112 len = strlen(xattr->name) + 1; 2113 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 2114 GFP_KERNEL); 2115 if (!new_xattr->name) { 2116 kfree(new_xattr); 2117 return -ENOMEM; 2118 } 2119 2120 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, 2121 XATTR_SECURITY_PREFIX_LEN); 2122 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, 2123 xattr->name, len); 2124 2125 simple_xattr_list_add(&info->xattrs, new_xattr); 2126 } 2127 2128 return 0; 2129 } 2130 2131 static const struct xattr_handler *shmem_xattr_handlers[] = { 2132 #ifdef CONFIG_TMPFS_POSIX_ACL 2133 &posix_acl_access_xattr_handler, 2134 &posix_acl_default_xattr_handler, 2135 #endif 2136 NULL 2137 }; 2138 2139 static int shmem_xattr_validate(const char *name) 2140 { 2141 struct { const char *prefix; size_t len; } arr[] = { 2142 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN }, 2143 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN } 2144 }; 2145 int i; 2146 2147 for (i = 0; i < ARRAY_SIZE(arr); i++) { 2148 size_t preflen = arr[i].len; 2149 if (strncmp(name, arr[i].prefix, preflen) == 0) { 2150 if (!name[preflen]) 2151 return -EINVAL; 2152 return 0; 2153 } 2154 } 2155 return -EOPNOTSUPP; 2156 } 2157 2158 static ssize_t shmem_getxattr(struct dentry *dentry, const char *name, 2159 void *buffer, size_t size) 2160 { 2161 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2162 int err; 2163 2164 /* 2165 * If this is a request for a synthetic attribute in the system.* 2166 * namespace use the generic infrastructure to resolve a handler 2167 * for it via sb->s_xattr. 2168 */ 2169 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2170 return generic_getxattr(dentry, name, buffer, size); 2171 2172 err = shmem_xattr_validate(name); 2173 if (err) 2174 return err; 2175 2176 return simple_xattr_get(&info->xattrs, name, buffer, size); 2177 } 2178 2179 static int shmem_setxattr(struct dentry *dentry, const char *name, 2180 const void *value, size_t size, int flags) 2181 { 2182 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2183 int err; 2184 2185 /* 2186 * If this is a request for a synthetic attribute in the system.* 2187 * namespace use the generic infrastructure to resolve a handler 2188 * for it via sb->s_xattr. 2189 */ 2190 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2191 return generic_setxattr(dentry, name, value, size, flags); 2192 2193 err = shmem_xattr_validate(name); 2194 if (err) 2195 return err; 2196 2197 return simple_xattr_set(&info->xattrs, name, value, size, flags); 2198 } 2199 2200 static int shmem_removexattr(struct dentry *dentry, const char *name) 2201 { 2202 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2203 int err; 2204 2205 /* 2206 * If this is a request for a synthetic attribute in the system.* 2207 * namespace use the generic infrastructure to resolve a handler 2208 * for it via sb->s_xattr. 2209 */ 2210 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2211 return generic_removexattr(dentry, name); 2212 2213 err = shmem_xattr_validate(name); 2214 if (err) 2215 return err; 2216 2217 return simple_xattr_remove(&info->xattrs, name); 2218 } 2219 2220 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 2221 { 2222 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2223 return simple_xattr_list(&info->xattrs, buffer, size); 2224 } 2225 #endif /* CONFIG_TMPFS_XATTR */ 2226 2227 static const struct inode_operations shmem_short_symlink_operations = { 2228 .readlink = generic_readlink, 2229 .follow_link = shmem_follow_short_symlink, 2230 #ifdef CONFIG_TMPFS_XATTR 2231 .setxattr = shmem_setxattr, 2232 .getxattr = shmem_getxattr, 2233 .listxattr = shmem_listxattr, 2234 .removexattr = shmem_removexattr, 2235 #endif 2236 }; 2237 2238 static const struct inode_operations shmem_symlink_inode_operations = { 2239 .readlink = generic_readlink, 2240 .follow_link = shmem_follow_link, 2241 .put_link = shmem_put_link, 2242 #ifdef CONFIG_TMPFS_XATTR 2243 .setxattr = shmem_setxattr, 2244 .getxattr = shmem_getxattr, 2245 .listxattr = shmem_listxattr, 2246 .removexattr = shmem_removexattr, 2247 #endif 2248 }; 2249 2250 static struct dentry *shmem_get_parent(struct dentry *child) 2251 { 2252 return ERR_PTR(-ESTALE); 2253 } 2254 2255 static int shmem_match(struct inode *ino, void *vfh) 2256 { 2257 __u32 *fh = vfh; 2258 __u64 inum = fh[2]; 2259 inum = (inum << 32) | fh[1]; 2260 return ino->i_ino == inum && fh[0] == ino->i_generation; 2261 } 2262 2263 static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 2264 struct fid *fid, int fh_len, int fh_type) 2265 { 2266 struct inode *inode; 2267 struct dentry *dentry = NULL; 2268 u64 inum; 2269 2270 if (fh_len < 3) 2271 return NULL; 2272 2273 inum = fid->raw[2]; 2274 inum = (inum << 32) | fid->raw[1]; 2275 2276 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 2277 shmem_match, fid->raw); 2278 if (inode) { 2279 dentry = d_find_alias(inode); 2280 iput(inode); 2281 } 2282 2283 return dentry; 2284 } 2285 2286 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, 2287 struct inode *parent) 2288 { 2289 if (*len < 3) { 2290 *len = 3; 2291 return FILEID_INVALID; 2292 } 2293 2294 if (inode_unhashed(inode)) { 2295 /* Unfortunately insert_inode_hash is not idempotent, 2296 * so as we hash inodes here rather than at creation 2297 * time, we need a lock to ensure we only try 2298 * to do it once 2299 */ 2300 static DEFINE_SPINLOCK(lock); 2301 spin_lock(&lock); 2302 if (inode_unhashed(inode)) 2303 __insert_inode_hash(inode, 2304 inode->i_ino + inode->i_generation); 2305 spin_unlock(&lock); 2306 } 2307 2308 fh[0] = inode->i_generation; 2309 fh[1] = inode->i_ino; 2310 fh[2] = ((__u64)inode->i_ino) >> 32; 2311 2312 *len = 3; 2313 return 1; 2314 } 2315 2316 static const struct export_operations shmem_export_ops = { 2317 .get_parent = shmem_get_parent, 2318 .encode_fh = shmem_encode_fh, 2319 .fh_to_dentry = shmem_fh_to_dentry, 2320 }; 2321 2322 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, 2323 bool remount) 2324 { 2325 char *this_char, *value, *rest; 2326 struct mempolicy *mpol = NULL; 2327 uid_t uid; 2328 gid_t gid; 2329 2330 while (options != NULL) { 2331 this_char = options; 2332 for (;;) { 2333 /* 2334 * NUL-terminate this option: unfortunately, 2335 * mount options form a comma-separated list, 2336 * but mpol's nodelist may also contain commas. 2337 */ 2338 options = strchr(options, ','); 2339 if (options == NULL) 2340 break; 2341 options++; 2342 if (!isdigit(*options)) { 2343 options[-1] = '\0'; 2344 break; 2345 } 2346 } 2347 if (!*this_char) 2348 continue; 2349 if ((value = strchr(this_char,'=')) != NULL) { 2350 *value++ = 0; 2351 } else { 2352 printk(KERN_ERR 2353 "tmpfs: No value for mount option '%s'\n", 2354 this_char); 2355 goto error; 2356 } 2357 2358 if (!strcmp(this_char,"size")) { 2359 unsigned long long size; 2360 size = memparse(value,&rest); 2361 if (*rest == '%') { 2362 size <<= PAGE_SHIFT; 2363 size *= totalram_pages; 2364 do_div(size, 100); 2365 rest++; 2366 } 2367 if (*rest) 2368 goto bad_val; 2369 sbinfo->max_blocks = 2370 DIV_ROUND_UP(size, PAGE_CACHE_SIZE); 2371 } else if (!strcmp(this_char,"nr_blocks")) { 2372 sbinfo->max_blocks = memparse(value, &rest); 2373 if (*rest) 2374 goto bad_val; 2375 } else if (!strcmp(this_char,"nr_inodes")) { 2376 sbinfo->max_inodes = memparse(value, &rest); 2377 if (*rest) 2378 goto bad_val; 2379 } else if (!strcmp(this_char,"mode")) { 2380 if (remount) 2381 continue; 2382 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; 2383 if (*rest) 2384 goto bad_val; 2385 } else if (!strcmp(this_char,"uid")) { 2386 if (remount) 2387 continue; 2388 uid = simple_strtoul(value, &rest, 0); 2389 if (*rest) 2390 goto bad_val; 2391 sbinfo->uid = make_kuid(current_user_ns(), uid); 2392 if (!uid_valid(sbinfo->uid)) 2393 goto bad_val; 2394 } else if (!strcmp(this_char,"gid")) { 2395 if (remount) 2396 continue; 2397 gid = simple_strtoul(value, &rest, 0); 2398 if (*rest) 2399 goto bad_val; 2400 sbinfo->gid = make_kgid(current_user_ns(), gid); 2401 if (!gid_valid(sbinfo->gid)) 2402 goto bad_val; 2403 } else if (!strcmp(this_char,"mpol")) { 2404 mpol_put(mpol); 2405 mpol = NULL; 2406 if (mpol_parse_str(value, &mpol)) 2407 goto bad_val; 2408 } else { 2409 printk(KERN_ERR "tmpfs: Bad mount option %s\n", 2410 this_char); 2411 goto error; 2412 } 2413 } 2414 sbinfo->mpol = mpol; 2415 return 0; 2416 2417 bad_val: 2418 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", 2419 value, this_char); 2420 error: 2421 mpol_put(mpol); 2422 return 1; 2423 2424 } 2425 2426 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) 2427 { 2428 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2429 struct shmem_sb_info config = *sbinfo; 2430 unsigned long inodes; 2431 int error = -EINVAL; 2432 2433 config.mpol = NULL; 2434 if (shmem_parse_options(data, &config, true)) 2435 return error; 2436 2437 spin_lock(&sbinfo->stat_lock); 2438 inodes = sbinfo->max_inodes - sbinfo->free_inodes; 2439 if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0) 2440 goto out; 2441 if (config.max_inodes < inodes) 2442 goto out; 2443 /* 2444 * Those tests disallow limited->unlimited while any are in use; 2445 * but we must separately disallow unlimited->limited, because 2446 * in that case we have no record of how much is already in use. 2447 */ 2448 if (config.max_blocks && !sbinfo->max_blocks) 2449 goto out; 2450 if (config.max_inodes && !sbinfo->max_inodes) 2451 goto out; 2452 2453 error = 0; 2454 sbinfo->max_blocks = config.max_blocks; 2455 sbinfo->max_inodes = config.max_inodes; 2456 sbinfo->free_inodes = config.max_inodes - inodes; 2457 2458 /* 2459 * Preserve previous mempolicy unless mpol remount option was specified. 2460 */ 2461 if (config.mpol) { 2462 mpol_put(sbinfo->mpol); 2463 sbinfo->mpol = config.mpol; /* transfers initial ref */ 2464 } 2465 out: 2466 spin_unlock(&sbinfo->stat_lock); 2467 return error; 2468 } 2469 2470 static int shmem_show_options(struct seq_file *seq, struct dentry *root) 2471 { 2472 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); 2473 2474 if (sbinfo->max_blocks != shmem_default_max_blocks()) 2475 seq_printf(seq, ",size=%luk", 2476 sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); 2477 if (sbinfo->max_inodes != shmem_default_max_inodes()) 2478 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 2479 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) 2480 seq_printf(seq, ",mode=%03ho", sbinfo->mode); 2481 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 2482 seq_printf(seq, ",uid=%u", 2483 from_kuid_munged(&init_user_ns, sbinfo->uid)); 2484 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 2485 seq_printf(seq, ",gid=%u", 2486 from_kgid_munged(&init_user_ns, sbinfo->gid)); 2487 shmem_show_mpol(seq, sbinfo->mpol); 2488 return 0; 2489 } 2490 #endif /* CONFIG_TMPFS */ 2491 2492 static void shmem_put_super(struct super_block *sb) 2493 { 2494 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2495 2496 percpu_counter_destroy(&sbinfo->used_blocks); 2497 mpol_put(sbinfo->mpol); 2498 kfree(sbinfo); 2499 sb->s_fs_info = NULL; 2500 } 2501 2502 int shmem_fill_super(struct super_block *sb, void *data, int silent) 2503 { 2504 struct inode *inode; 2505 struct shmem_sb_info *sbinfo; 2506 int err = -ENOMEM; 2507 2508 /* Round up to L1_CACHE_BYTES to resist false sharing */ 2509 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 2510 L1_CACHE_BYTES), GFP_KERNEL); 2511 if (!sbinfo) 2512 return -ENOMEM; 2513 2514 sbinfo->mode = S_IRWXUGO | S_ISVTX; 2515 sbinfo->uid = current_fsuid(); 2516 sbinfo->gid = current_fsgid(); 2517 sb->s_fs_info = sbinfo; 2518 2519 #ifdef CONFIG_TMPFS 2520 /* 2521 * Per default we only allow half of the physical ram per 2522 * tmpfs instance, limiting inodes to one per page of lowmem; 2523 * but the internal instance is left unlimited. 2524 */ 2525 if (!(sb->s_flags & MS_KERNMOUNT)) { 2526 sbinfo->max_blocks = shmem_default_max_blocks(); 2527 sbinfo->max_inodes = shmem_default_max_inodes(); 2528 if (shmem_parse_options(data, sbinfo, false)) { 2529 err = -EINVAL; 2530 goto failed; 2531 } 2532 } else { 2533 sb->s_flags |= MS_NOUSER; 2534 } 2535 sb->s_export_op = &shmem_export_ops; 2536 sb->s_flags |= MS_NOSEC; 2537 #else 2538 sb->s_flags |= MS_NOUSER; 2539 #endif 2540 2541 spin_lock_init(&sbinfo->stat_lock); 2542 if (percpu_counter_init(&sbinfo->used_blocks, 0)) 2543 goto failed; 2544 sbinfo->free_inodes = sbinfo->max_inodes; 2545 2546 sb->s_maxbytes = MAX_LFS_FILESIZE; 2547 sb->s_blocksize = PAGE_CACHE_SIZE; 2548 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 2549 sb->s_magic = TMPFS_MAGIC; 2550 sb->s_op = &shmem_ops; 2551 sb->s_time_gran = 1; 2552 #ifdef CONFIG_TMPFS_XATTR 2553 sb->s_xattr = shmem_xattr_handlers; 2554 #endif 2555 #ifdef CONFIG_TMPFS_POSIX_ACL 2556 sb->s_flags |= MS_POSIXACL; 2557 #endif 2558 2559 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 2560 if (!inode) 2561 goto failed; 2562 inode->i_uid = sbinfo->uid; 2563 inode->i_gid = sbinfo->gid; 2564 sb->s_root = d_make_root(inode); 2565 if (!sb->s_root) 2566 goto failed; 2567 return 0; 2568 2569 failed: 2570 shmem_put_super(sb); 2571 return err; 2572 } 2573 2574 static struct kmem_cache *shmem_inode_cachep; 2575 2576 static struct inode *shmem_alloc_inode(struct super_block *sb) 2577 { 2578 struct shmem_inode_info *info; 2579 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 2580 if (!info) 2581 return NULL; 2582 return &info->vfs_inode; 2583 } 2584 2585 static void shmem_destroy_callback(struct rcu_head *head) 2586 { 2587 struct inode *inode = container_of(head, struct inode, i_rcu); 2588 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 2589 } 2590 2591 static void shmem_destroy_inode(struct inode *inode) 2592 { 2593 if (S_ISREG(inode->i_mode)) 2594 mpol_free_shared_policy(&SHMEM_I(inode)->policy); 2595 call_rcu(&inode->i_rcu, shmem_destroy_callback); 2596 } 2597 2598 static void shmem_init_inode(void *foo) 2599 { 2600 struct shmem_inode_info *info = foo; 2601 inode_init_once(&info->vfs_inode); 2602 } 2603 2604 static int shmem_init_inodecache(void) 2605 { 2606 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 2607 sizeof(struct shmem_inode_info), 2608 0, SLAB_PANIC, shmem_init_inode); 2609 return 0; 2610 } 2611 2612 static void shmem_destroy_inodecache(void) 2613 { 2614 kmem_cache_destroy(shmem_inode_cachep); 2615 } 2616 2617 static const struct address_space_operations shmem_aops = { 2618 .writepage = shmem_writepage, 2619 .set_page_dirty = __set_page_dirty_no_writeback, 2620 #ifdef CONFIG_TMPFS 2621 .write_begin = shmem_write_begin, 2622 .write_end = shmem_write_end, 2623 #endif 2624 .migratepage = migrate_page, 2625 .error_remove_page = generic_error_remove_page, 2626 }; 2627 2628 static const struct file_operations shmem_file_operations = { 2629 .mmap = shmem_mmap, 2630 #ifdef CONFIG_TMPFS 2631 .llseek = shmem_file_llseek, 2632 .read = do_sync_read, 2633 .write = do_sync_write, 2634 .aio_read = shmem_file_aio_read, 2635 .aio_write = generic_file_aio_write, 2636 .fsync = noop_fsync, 2637 .splice_read = shmem_file_splice_read, 2638 .splice_write = generic_file_splice_write, 2639 .fallocate = shmem_fallocate, 2640 #endif 2641 }; 2642 2643 static const struct inode_operations shmem_inode_operations = { 2644 .setattr = shmem_setattr, 2645 #ifdef CONFIG_TMPFS_XATTR 2646 .setxattr = shmem_setxattr, 2647 .getxattr = shmem_getxattr, 2648 .listxattr = shmem_listxattr, 2649 .removexattr = shmem_removexattr, 2650 .set_acl = simple_set_acl, 2651 #endif 2652 }; 2653 2654 static const struct inode_operations shmem_dir_inode_operations = { 2655 #ifdef CONFIG_TMPFS 2656 .create = shmem_create, 2657 .lookup = simple_lookup, 2658 .link = shmem_link, 2659 .unlink = shmem_unlink, 2660 .symlink = shmem_symlink, 2661 .mkdir = shmem_mkdir, 2662 .rmdir = shmem_rmdir, 2663 .mknod = shmem_mknod, 2664 .rename = shmem_rename, 2665 .tmpfile = shmem_tmpfile, 2666 #endif 2667 #ifdef CONFIG_TMPFS_XATTR 2668 .setxattr = shmem_setxattr, 2669 .getxattr = shmem_getxattr, 2670 .listxattr = shmem_listxattr, 2671 .removexattr = shmem_removexattr, 2672 #endif 2673 #ifdef CONFIG_TMPFS_POSIX_ACL 2674 .setattr = shmem_setattr, 2675 .set_acl = simple_set_acl, 2676 #endif 2677 }; 2678 2679 static const struct inode_operations shmem_special_inode_operations = { 2680 #ifdef CONFIG_TMPFS_XATTR 2681 .setxattr = shmem_setxattr, 2682 .getxattr = shmem_getxattr, 2683 .listxattr = shmem_listxattr, 2684 .removexattr = shmem_removexattr, 2685 #endif 2686 #ifdef CONFIG_TMPFS_POSIX_ACL 2687 .setattr = shmem_setattr, 2688 .set_acl = simple_set_acl, 2689 #endif 2690 }; 2691 2692 static const struct super_operations shmem_ops = { 2693 .alloc_inode = shmem_alloc_inode, 2694 .destroy_inode = shmem_destroy_inode, 2695 #ifdef CONFIG_TMPFS 2696 .statfs = shmem_statfs, 2697 .remount_fs = shmem_remount_fs, 2698 .show_options = shmem_show_options, 2699 #endif 2700 .evict_inode = shmem_evict_inode, 2701 .drop_inode = generic_delete_inode, 2702 .put_super = shmem_put_super, 2703 }; 2704 2705 static const struct vm_operations_struct shmem_vm_ops = { 2706 .fault = shmem_fault, 2707 .map_pages = filemap_map_pages, 2708 #ifdef CONFIG_NUMA 2709 .set_policy = shmem_set_policy, 2710 .get_policy = shmem_get_policy, 2711 #endif 2712 .remap_pages = generic_file_remap_pages, 2713 }; 2714 2715 static struct dentry *shmem_mount(struct file_system_type *fs_type, 2716 int flags, const char *dev_name, void *data) 2717 { 2718 return mount_nodev(fs_type, flags, data, shmem_fill_super); 2719 } 2720 2721 static struct file_system_type shmem_fs_type = { 2722 .owner = THIS_MODULE, 2723 .name = "tmpfs", 2724 .mount = shmem_mount, 2725 .kill_sb = kill_litter_super, 2726 .fs_flags = FS_USERNS_MOUNT, 2727 }; 2728 2729 int __init shmem_init(void) 2730 { 2731 int error; 2732 2733 /* If rootfs called this, don't re-init */ 2734 if (shmem_inode_cachep) 2735 return 0; 2736 2737 error = bdi_init(&shmem_backing_dev_info); 2738 if (error) 2739 goto out4; 2740 2741 error = shmem_init_inodecache(); 2742 if (error) 2743 goto out3; 2744 2745 error = register_filesystem(&shmem_fs_type); 2746 if (error) { 2747 printk(KERN_ERR "Could not register tmpfs\n"); 2748 goto out2; 2749 } 2750 2751 shm_mnt = kern_mount(&shmem_fs_type); 2752 if (IS_ERR(shm_mnt)) { 2753 error = PTR_ERR(shm_mnt); 2754 printk(KERN_ERR "Could not kern_mount tmpfs\n"); 2755 goto out1; 2756 } 2757 return 0; 2758 2759 out1: 2760 unregister_filesystem(&shmem_fs_type); 2761 out2: 2762 shmem_destroy_inodecache(); 2763 out3: 2764 bdi_destroy(&shmem_backing_dev_info); 2765 out4: 2766 shm_mnt = ERR_PTR(error); 2767 return error; 2768 } 2769 2770 #else /* !CONFIG_SHMEM */ 2771 2772 /* 2773 * tiny-shmem: simple shmemfs and tmpfs using ramfs code 2774 * 2775 * This is intended for small system where the benefits of the full 2776 * shmem code (swap-backed and resource-limited) are outweighed by 2777 * their complexity. On systems without swap this code should be 2778 * effectively equivalent, but much lighter weight. 2779 */ 2780 2781 static struct file_system_type shmem_fs_type = { 2782 .name = "tmpfs", 2783 .mount = ramfs_mount, 2784 .kill_sb = kill_litter_super, 2785 .fs_flags = FS_USERNS_MOUNT, 2786 }; 2787 2788 int __init shmem_init(void) 2789 { 2790 BUG_ON(register_filesystem(&shmem_fs_type) != 0); 2791 2792 shm_mnt = kern_mount(&shmem_fs_type); 2793 BUG_ON(IS_ERR(shm_mnt)); 2794 2795 return 0; 2796 } 2797 2798 int shmem_unuse(swp_entry_t swap, struct page *page) 2799 { 2800 return 0; 2801 } 2802 2803 int shmem_lock(struct file *file, int lock, struct user_struct *user) 2804 { 2805 return 0; 2806 } 2807 2808 void shmem_unlock_mapping(struct address_space *mapping) 2809 { 2810 } 2811 2812 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 2813 { 2814 truncate_inode_pages_range(inode->i_mapping, lstart, lend); 2815 } 2816 EXPORT_SYMBOL_GPL(shmem_truncate_range); 2817 2818 #define shmem_vm_ops generic_file_vm_ops 2819 #define shmem_file_operations ramfs_file_operations 2820 #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 2821 #define shmem_acct_size(flags, size) 0 2822 #define shmem_unacct_size(flags, size) do {} while (0) 2823 2824 #endif /* CONFIG_SHMEM */ 2825 2826 /* common code */ 2827 2828 static struct dentry_operations anon_ops = { 2829 .d_dname = simple_dname 2830 }; 2831 2832 static struct file *__shmem_file_setup(const char *name, loff_t size, 2833 unsigned long flags, unsigned int i_flags) 2834 { 2835 struct file *res; 2836 struct inode *inode; 2837 struct path path; 2838 struct super_block *sb; 2839 struct qstr this; 2840 2841 if (IS_ERR(shm_mnt)) 2842 return ERR_CAST(shm_mnt); 2843 2844 if (size < 0 || size > MAX_LFS_FILESIZE) 2845 return ERR_PTR(-EINVAL); 2846 2847 if (shmem_acct_size(flags, size)) 2848 return ERR_PTR(-ENOMEM); 2849 2850 res = ERR_PTR(-ENOMEM); 2851 this.name = name; 2852 this.len = strlen(name); 2853 this.hash = 0; /* will go */ 2854 sb = shm_mnt->mnt_sb; 2855 path.dentry = d_alloc_pseudo(sb, &this); 2856 if (!path.dentry) 2857 goto put_memory; 2858 d_set_d_op(path.dentry, &anon_ops); 2859 path.mnt = mntget(shm_mnt); 2860 2861 res = ERR_PTR(-ENOSPC); 2862 inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags); 2863 if (!inode) 2864 goto put_dentry; 2865 2866 inode->i_flags |= i_flags; 2867 d_instantiate(path.dentry, inode); 2868 inode->i_size = size; 2869 clear_nlink(inode); /* It is unlinked */ 2870 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); 2871 if (IS_ERR(res)) 2872 goto put_dentry; 2873 2874 res = alloc_file(&path, FMODE_WRITE | FMODE_READ, 2875 &shmem_file_operations); 2876 if (IS_ERR(res)) 2877 goto put_dentry; 2878 2879 return res; 2880 2881 put_dentry: 2882 path_put(&path); 2883 put_memory: 2884 shmem_unacct_size(flags, size); 2885 return res; 2886 } 2887 2888 /** 2889 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be 2890 * kernel internal. There will be NO LSM permission checks against the 2891 * underlying inode. So users of this interface must do LSM checks at a 2892 * higher layer. The one user is the big_key implementation. LSM checks 2893 * are provided at the key level rather than the inode level. 2894 * @name: name for dentry (to be seen in /proc/<pid>/maps 2895 * @size: size to be set for the file 2896 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 2897 */ 2898 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags) 2899 { 2900 return __shmem_file_setup(name, size, flags, S_PRIVATE); 2901 } 2902 2903 /** 2904 * shmem_file_setup - get an unlinked file living in tmpfs 2905 * @name: name for dentry (to be seen in /proc/<pid>/maps 2906 * @size: size to be set for the file 2907 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 2908 */ 2909 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 2910 { 2911 return __shmem_file_setup(name, size, flags, 0); 2912 } 2913 EXPORT_SYMBOL_GPL(shmem_file_setup); 2914 2915 /** 2916 * shmem_zero_setup - setup a shared anonymous mapping 2917 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff 2918 */ 2919 int shmem_zero_setup(struct vm_area_struct *vma) 2920 { 2921 struct file *file; 2922 loff_t size = vma->vm_end - vma->vm_start; 2923 2924 file = shmem_file_setup("dev/zero", size, vma->vm_flags); 2925 if (IS_ERR(file)) 2926 return PTR_ERR(file); 2927 2928 if (vma->vm_file) 2929 fput(vma->vm_file); 2930 vma->vm_file = file; 2931 vma->vm_ops = &shmem_vm_ops; 2932 return 0; 2933 } 2934 2935 /** 2936 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. 2937 * @mapping: the page's address_space 2938 * @index: the page index 2939 * @gfp: the page allocator flags to use if allocating 2940 * 2941 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 2942 * with any new page allocations done using the specified allocation flags. 2943 * But read_cache_page_gfp() uses the ->readpage() method: which does not 2944 * suit tmpfs, since it may have pages in swapcache, and needs to find those 2945 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 2946 * 2947 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 2948 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 2949 */ 2950 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 2951 pgoff_t index, gfp_t gfp) 2952 { 2953 #ifdef CONFIG_SHMEM 2954 struct inode *inode = mapping->host; 2955 struct page *page; 2956 int error; 2957 2958 BUG_ON(mapping->a_ops != &shmem_aops); 2959 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL); 2960 if (error) 2961 page = ERR_PTR(error); 2962 else 2963 unlock_page(page); 2964 return page; 2965 #else 2966 /* 2967 * The tiny !SHMEM case uses ramfs without swap 2968 */ 2969 return read_cache_page_gfp(mapping, index, gfp); 2970 #endif 2971 } 2972 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 2973