1 /* 2 * Resizable virtual memory filesystem for Linux. 3 * 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. 9 * Copyright (C) 2002-2011 Hugh Dickins. 10 * Copyright (C) 2011 Google Inc. 11 * Copyright (C) 2002-2005 VERITAS Software Corporation. 12 * Copyright (C) 2004 Andi Kleen, SuSE Labs 13 * 14 * Extended attribute support for tmpfs: 15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 17 * 18 * tiny-shmem: 19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20 * 21 * This file is released under the GPL. 22 */ 23 24 #include <linux/fs.h> 25 #include <linux/init.h> 26 #include <linux/vfs.h> 27 #include <linux/mount.h> 28 #include <linux/ramfs.h> 29 #include <linux/pagemap.h> 30 #include <linux/file.h> 31 #include <linux/mm.h> 32 #include <linux/export.h> 33 #include <linux/swap.h> 34 #include <linux/aio.h> 35 36 static struct vfsmount *shm_mnt; 37 38 #ifdef CONFIG_SHMEM 39 /* 40 * This virtual memory filesystem is heavily based on the ramfs. It 41 * extends ramfs by the ability to use swap and honor resource limits 42 * which makes it a completely usable filesystem. 43 */ 44 45 #include <linux/xattr.h> 46 #include <linux/exportfs.h> 47 #include <linux/posix_acl.h> 48 #include <linux/posix_acl_xattr.h> 49 #include <linux/mman.h> 50 #include <linux/string.h> 51 #include <linux/slab.h> 52 #include <linux/backing-dev.h> 53 #include <linux/shmem_fs.h> 54 #include <linux/writeback.h> 55 #include <linux/blkdev.h> 56 #include <linux/pagevec.h> 57 #include <linux/percpu_counter.h> 58 #include <linux/falloc.h> 59 #include <linux/splice.h> 60 #include <linux/security.h> 61 #include <linux/swapops.h> 62 #include <linux/mempolicy.h> 63 #include <linux/namei.h> 64 #include <linux/ctype.h> 65 #include <linux/migrate.h> 66 #include <linux/highmem.h> 67 #include <linux/seq_file.h> 68 #include <linux/magic.h> 69 70 #include <asm/uaccess.h> 71 #include <asm/pgtable.h> 72 73 #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) 74 #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) 75 76 /* Pretend that each entry is of this size in directory's i_size */ 77 #define BOGO_DIRENT_SIZE 20 78 79 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 80 #define SHORT_SYMLINK_LEN 128 81 82 /* 83 * shmem_fallocate communicates with shmem_fault or shmem_writepage via 84 * inode->i_private (with i_mutex making sure that it has only one user at 85 * a time): we would prefer not to enlarge the shmem inode just for that. 86 */ 87 struct shmem_falloc { 88 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */ 89 pgoff_t start; /* start of range currently being fallocated */ 90 pgoff_t next; /* the next page offset to be fallocated */ 91 pgoff_t nr_falloced; /* how many new pages have been fallocated */ 92 pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 93 }; 94 95 /* Flag allocation requirements to shmem_getpage */ 96 enum sgp_type { 97 SGP_READ, /* don't exceed i_size, don't allocate page */ 98 SGP_CACHE, /* don't exceed i_size, may allocate page */ 99 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */ 100 SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ 101 SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ 102 }; 103 104 #ifdef CONFIG_TMPFS 105 static unsigned long shmem_default_max_blocks(void) 106 { 107 return totalram_pages / 2; 108 } 109 110 static unsigned long shmem_default_max_inodes(void) 111 { 112 return min(totalram_pages - totalhigh_pages, totalram_pages / 2); 113 } 114 #endif 115 116 static bool shmem_should_replace_page(struct page *page, gfp_t gfp); 117 static int shmem_replace_page(struct page **pagep, gfp_t gfp, 118 struct shmem_inode_info *info, pgoff_t index); 119 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 120 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type); 121 122 static inline int shmem_getpage(struct inode *inode, pgoff_t index, 123 struct page **pagep, enum sgp_type sgp, int *fault_type) 124 { 125 return shmem_getpage_gfp(inode, index, pagep, sgp, 126 mapping_gfp_mask(inode->i_mapping), fault_type); 127 } 128 129 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 130 { 131 return sb->s_fs_info; 132 } 133 134 /* 135 * shmem_file_setup pre-accounts the whole fixed size of a VM object, 136 * for shared memory and for shared anonymous (/dev/zero) mappings 137 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 138 * consistent with the pre-accounting of private mappings ... 139 */ 140 static inline int shmem_acct_size(unsigned long flags, loff_t size) 141 { 142 return (flags & VM_NORESERVE) ? 143 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); 144 } 145 146 static inline void shmem_unacct_size(unsigned long flags, loff_t size) 147 { 148 if (!(flags & VM_NORESERVE)) 149 vm_unacct_memory(VM_ACCT(size)); 150 } 151 152 static inline int shmem_reacct_size(unsigned long flags, 153 loff_t oldsize, loff_t newsize) 154 { 155 if (!(flags & VM_NORESERVE)) { 156 if (VM_ACCT(newsize) > VM_ACCT(oldsize)) 157 return security_vm_enough_memory_mm(current->mm, 158 VM_ACCT(newsize) - VM_ACCT(oldsize)); 159 else if (VM_ACCT(newsize) < VM_ACCT(oldsize)) 160 vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize)); 161 } 162 return 0; 163 } 164 165 /* 166 * ... whereas tmpfs objects are accounted incrementally as 167 * pages are allocated, in order to allow huge sparse files. 168 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 169 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 170 */ 171 static inline int shmem_acct_block(unsigned long flags) 172 { 173 return (flags & VM_NORESERVE) ? 174 security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0; 175 } 176 177 static inline void shmem_unacct_blocks(unsigned long flags, long pages) 178 { 179 if (flags & VM_NORESERVE) 180 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); 181 } 182 183 static const struct super_operations shmem_ops; 184 static const struct address_space_operations shmem_aops; 185 static const struct file_operations shmem_file_operations; 186 static const struct inode_operations shmem_inode_operations; 187 static const struct inode_operations shmem_dir_inode_operations; 188 static const struct inode_operations shmem_special_inode_operations; 189 static const struct vm_operations_struct shmem_vm_ops; 190 191 static struct backing_dev_info shmem_backing_dev_info __read_mostly = { 192 .ra_pages = 0, /* No readahead */ 193 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, 194 }; 195 196 static LIST_HEAD(shmem_swaplist); 197 static DEFINE_MUTEX(shmem_swaplist_mutex); 198 199 static int shmem_reserve_inode(struct super_block *sb) 200 { 201 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 202 if (sbinfo->max_inodes) { 203 spin_lock(&sbinfo->stat_lock); 204 if (!sbinfo->free_inodes) { 205 spin_unlock(&sbinfo->stat_lock); 206 return -ENOSPC; 207 } 208 sbinfo->free_inodes--; 209 spin_unlock(&sbinfo->stat_lock); 210 } 211 return 0; 212 } 213 214 static void shmem_free_inode(struct super_block *sb) 215 { 216 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 217 if (sbinfo->max_inodes) { 218 spin_lock(&sbinfo->stat_lock); 219 sbinfo->free_inodes++; 220 spin_unlock(&sbinfo->stat_lock); 221 } 222 } 223 224 /** 225 * shmem_recalc_inode - recalculate the block usage of an inode 226 * @inode: inode to recalc 227 * 228 * We have to calculate the free blocks since the mm can drop 229 * undirtied hole pages behind our back. 230 * 231 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 232 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 233 * 234 * It has to be called with the spinlock held. 235 */ 236 static void shmem_recalc_inode(struct inode *inode) 237 { 238 struct shmem_inode_info *info = SHMEM_I(inode); 239 long freed; 240 241 freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 242 if (freed > 0) { 243 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 244 if (sbinfo->max_blocks) 245 percpu_counter_add(&sbinfo->used_blocks, -freed); 246 info->alloced -= freed; 247 inode->i_blocks -= freed * BLOCKS_PER_PAGE; 248 shmem_unacct_blocks(info->flags, freed); 249 } 250 } 251 252 /* 253 * Replace item expected in radix tree by a new item, while holding tree lock. 254 */ 255 static int shmem_radix_tree_replace(struct address_space *mapping, 256 pgoff_t index, void *expected, void *replacement) 257 { 258 void **pslot; 259 void *item; 260 261 VM_BUG_ON(!expected); 262 VM_BUG_ON(!replacement); 263 pslot = radix_tree_lookup_slot(&mapping->page_tree, index); 264 if (!pslot) 265 return -ENOENT; 266 item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock); 267 if (item != expected) 268 return -ENOENT; 269 radix_tree_replace_slot(pslot, replacement); 270 return 0; 271 } 272 273 /* 274 * Sometimes, before we decide whether to proceed or to fail, we must check 275 * that an entry was not already brought back from swap by a racing thread. 276 * 277 * Checking page is not enough: by the time a SwapCache page is locked, it 278 * might be reused, and again be SwapCache, using the same swap as before. 279 */ 280 static bool shmem_confirm_swap(struct address_space *mapping, 281 pgoff_t index, swp_entry_t swap) 282 { 283 void *item; 284 285 rcu_read_lock(); 286 item = radix_tree_lookup(&mapping->page_tree, index); 287 rcu_read_unlock(); 288 return item == swp_to_radix_entry(swap); 289 } 290 291 /* 292 * Like add_to_page_cache_locked, but error if expected item has gone. 293 */ 294 static int shmem_add_to_page_cache(struct page *page, 295 struct address_space *mapping, 296 pgoff_t index, void *expected) 297 { 298 int error; 299 300 VM_BUG_ON_PAGE(!PageLocked(page), page); 301 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 302 303 page_cache_get(page); 304 page->mapping = mapping; 305 page->index = index; 306 307 spin_lock_irq(&mapping->tree_lock); 308 if (!expected) 309 error = radix_tree_insert(&mapping->page_tree, index, page); 310 else 311 error = shmem_radix_tree_replace(mapping, index, expected, 312 page); 313 if (!error) { 314 mapping->nrpages++; 315 __inc_zone_page_state(page, NR_FILE_PAGES); 316 __inc_zone_page_state(page, NR_SHMEM); 317 spin_unlock_irq(&mapping->tree_lock); 318 } else { 319 page->mapping = NULL; 320 spin_unlock_irq(&mapping->tree_lock); 321 page_cache_release(page); 322 } 323 return error; 324 } 325 326 /* 327 * Like delete_from_page_cache, but substitutes swap for page. 328 */ 329 static void shmem_delete_from_page_cache(struct page *page, void *radswap) 330 { 331 struct address_space *mapping = page->mapping; 332 int error; 333 334 spin_lock_irq(&mapping->tree_lock); 335 error = shmem_radix_tree_replace(mapping, page->index, page, radswap); 336 page->mapping = NULL; 337 mapping->nrpages--; 338 __dec_zone_page_state(page, NR_FILE_PAGES); 339 __dec_zone_page_state(page, NR_SHMEM); 340 spin_unlock_irq(&mapping->tree_lock); 341 page_cache_release(page); 342 BUG_ON(error); 343 } 344 345 /* 346 * Remove swap entry from radix tree, free the swap and its page cache. 347 */ 348 static int shmem_free_swap(struct address_space *mapping, 349 pgoff_t index, void *radswap) 350 { 351 void *old; 352 353 spin_lock_irq(&mapping->tree_lock); 354 old = radix_tree_delete_item(&mapping->page_tree, index, radswap); 355 spin_unlock_irq(&mapping->tree_lock); 356 if (old != radswap) 357 return -ENOENT; 358 free_swap_and_cache(radix_to_swp_entry(radswap)); 359 return 0; 360 } 361 362 /* 363 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 364 */ 365 void shmem_unlock_mapping(struct address_space *mapping) 366 { 367 struct pagevec pvec; 368 pgoff_t indices[PAGEVEC_SIZE]; 369 pgoff_t index = 0; 370 371 pagevec_init(&pvec, 0); 372 /* 373 * Minor point, but we might as well stop if someone else SHM_LOCKs it. 374 */ 375 while (!mapping_unevictable(mapping)) { 376 /* 377 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it 378 * has finished, if it hits a row of PAGEVEC_SIZE swap entries. 379 */ 380 pvec.nr = find_get_entries(mapping, index, 381 PAGEVEC_SIZE, pvec.pages, indices); 382 if (!pvec.nr) 383 break; 384 index = indices[pvec.nr - 1] + 1; 385 pagevec_remove_exceptionals(&pvec); 386 check_move_unevictable_pages(pvec.pages, pvec.nr); 387 pagevec_release(&pvec); 388 cond_resched(); 389 } 390 } 391 392 /* 393 * Remove range of pages and swap entries from radix tree, and free them. 394 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. 395 */ 396 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, 397 bool unfalloc) 398 { 399 struct address_space *mapping = inode->i_mapping; 400 struct shmem_inode_info *info = SHMEM_I(inode); 401 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 402 pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT; 403 unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1); 404 unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1); 405 struct pagevec pvec; 406 pgoff_t indices[PAGEVEC_SIZE]; 407 long nr_swaps_freed = 0; 408 pgoff_t index; 409 int i; 410 411 if (lend == -1) 412 end = -1; /* unsigned, so actually very big */ 413 414 pagevec_init(&pvec, 0); 415 index = start; 416 while (index < end) { 417 pvec.nr = find_get_entries(mapping, index, 418 min(end - index, (pgoff_t)PAGEVEC_SIZE), 419 pvec.pages, indices); 420 if (!pvec.nr) 421 break; 422 for (i = 0; i < pagevec_count(&pvec); i++) { 423 struct page *page = pvec.pages[i]; 424 425 index = indices[i]; 426 if (index >= end) 427 break; 428 429 if (radix_tree_exceptional_entry(page)) { 430 if (unfalloc) 431 continue; 432 nr_swaps_freed += !shmem_free_swap(mapping, 433 index, page); 434 continue; 435 } 436 437 if (!trylock_page(page)) 438 continue; 439 if (!unfalloc || !PageUptodate(page)) { 440 if (page->mapping == mapping) { 441 VM_BUG_ON_PAGE(PageWriteback(page), page); 442 truncate_inode_page(mapping, page); 443 } 444 } 445 unlock_page(page); 446 } 447 pagevec_remove_exceptionals(&pvec); 448 pagevec_release(&pvec); 449 cond_resched(); 450 index++; 451 } 452 453 if (partial_start) { 454 struct page *page = NULL; 455 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL); 456 if (page) { 457 unsigned int top = PAGE_CACHE_SIZE; 458 if (start > end) { 459 top = partial_end; 460 partial_end = 0; 461 } 462 zero_user_segment(page, partial_start, top); 463 set_page_dirty(page); 464 unlock_page(page); 465 page_cache_release(page); 466 } 467 } 468 if (partial_end) { 469 struct page *page = NULL; 470 shmem_getpage(inode, end, &page, SGP_READ, NULL); 471 if (page) { 472 zero_user_segment(page, 0, partial_end); 473 set_page_dirty(page); 474 unlock_page(page); 475 page_cache_release(page); 476 } 477 } 478 if (start >= end) 479 return; 480 481 index = start; 482 while (index < end) { 483 cond_resched(); 484 485 pvec.nr = find_get_entries(mapping, index, 486 min(end - index, (pgoff_t)PAGEVEC_SIZE), 487 pvec.pages, indices); 488 if (!pvec.nr) { 489 /* If all gone or hole-punch or unfalloc, we're done */ 490 if (index == start || end != -1) 491 break; 492 /* But if truncating, restart to make sure all gone */ 493 index = start; 494 continue; 495 } 496 for (i = 0; i < pagevec_count(&pvec); i++) { 497 struct page *page = pvec.pages[i]; 498 499 index = indices[i]; 500 if (index >= end) 501 break; 502 503 if (radix_tree_exceptional_entry(page)) { 504 if (unfalloc) 505 continue; 506 if (shmem_free_swap(mapping, index, page)) { 507 /* Swap was replaced by page: retry */ 508 index--; 509 break; 510 } 511 nr_swaps_freed++; 512 continue; 513 } 514 515 lock_page(page); 516 if (!unfalloc || !PageUptodate(page)) { 517 if (page->mapping == mapping) { 518 VM_BUG_ON_PAGE(PageWriteback(page), page); 519 truncate_inode_page(mapping, page); 520 } else { 521 /* Page was replaced by swap: retry */ 522 unlock_page(page); 523 index--; 524 break; 525 } 526 } 527 unlock_page(page); 528 } 529 pagevec_remove_exceptionals(&pvec); 530 pagevec_release(&pvec); 531 index++; 532 } 533 534 spin_lock(&info->lock); 535 info->swapped -= nr_swaps_freed; 536 shmem_recalc_inode(inode); 537 spin_unlock(&info->lock); 538 } 539 540 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 541 { 542 shmem_undo_range(inode, lstart, lend, false); 543 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 544 } 545 EXPORT_SYMBOL_GPL(shmem_truncate_range); 546 547 static int shmem_setattr(struct dentry *dentry, struct iattr *attr) 548 { 549 struct inode *inode = dentry->d_inode; 550 int error; 551 552 error = inode_change_ok(inode, attr); 553 if (error) 554 return error; 555 556 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 557 loff_t oldsize = inode->i_size; 558 loff_t newsize = attr->ia_size; 559 560 if (newsize != oldsize) { 561 error = shmem_reacct_size(SHMEM_I(inode)->flags, 562 oldsize, newsize); 563 if (error) 564 return error; 565 i_size_write(inode, newsize); 566 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 567 } 568 if (newsize < oldsize) { 569 loff_t holebegin = round_up(newsize, PAGE_SIZE); 570 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); 571 shmem_truncate_range(inode, newsize, (loff_t)-1); 572 /* unmap again to remove racily COWed private pages */ 573 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); 574 } 575 } 576 577 setattr_copy(inode, attr); 578 if (attr->ia_valid & ATTR_MODE) 579 error = posix_acl_chmod(inode, inode->i_mode); 580 return error; 581 } 582 583 static void shmem_evict_inode(struct inode *inode) 584 { 585 struct shmem_inode_info *info = SHMEM_I(inode); 586 587 if (inode->i_mapping->a_ops == &shmem_aops) { 588 shmem_unacct_size(info->flags, inode->i_size); 589 inode->i_size = 0; 590 shmem_truncate_range(inode, 0, (loff_t)-1); 591 if (!list_empty(&info->swaplist)) { 592 mutex_lock(&shmem_swaplist_mutex); 593 list_del_init(&info->swaplist); 594 mutex_unlock(&shmem_swaplist_mutex); 595 } 596 } else 597 kfree(info->symlink); 598 599 simple_xattrs_free(&info->xattrs); 600 WARN_ON(inode->i_blocks); 601 shmem_free_inode(inode->i_sb); 602 clear_inode(inode); 603 } 604 605 /* 606 * If swap found in inode, free it and move page from swapcache to filecache. 607 */ 608 static int shmem_unuse_inode(struct shmem_inode_info *info, 609 swp_entry_t swap, struct page **pagep) 610 { 611 struct address_space *mapping = info->vfs_inode.i_mapping; 612 void *radswap; 613 pgoff_t index; 614 gfp_t gfp; 615 int error = 0; 616 617 radswap = swp_to_radix_entry(swap); 618 index = radix_tree_locate_item(&mapping->page_tree, radswap); 619 if (index == -1) 620 return -EAGAIN; /* tell shmem_unuse we found nothing */ 621 622 /* 623 * Move _head_ to start search for next from here. 624 * But be careful: shmem_evict_inode checks list_empty without taking 625 * mutex, and there's an instant in list_move_tail when info->swaplist 626 * would appear empty, if it were the only one on shmem_swaplist. 627 */ 628 if (shmem_swaplist.next != &info->swaplist) 629 list_move_tail(&shmem_swaplist, &info->swaplist); 630 631 gfp = mapping_gfp_mask(mapping); 632 if (shmem_should_replace_page(*pagep, gfp)) { 633 mutex_unlock(&shmem_swaplist_mutex); 634 error = shmem_replace_page(pagep, gfp, info, index); 635 mutex_lock(&shmem_swaplist_mutex); 636 /* 637 * We needed to drop mutex to make that restrictive page 638 * allocation, but the inode might have been freed while we 639 * dropped it: although a racing shmem_evict_inode() cannot 640 * complete without emptying the radix_tree, our page lock 641 * on this swapcache page is not enough to prevent that - 642 * free_swap_and_cache() of our swap entry will only 643 * trylock_page(), removing swap from radix_tree whatever. 644 * 645 * We must not proceed to shmem_add_to_page_cache() if the 646 * inode has been freed, but of course we cannot rely on 647 * inode or mapping or info to check that. However, we can 648 * safely check if our swap entry is still in use (and here 649 * it can't have got reused for another page): if it's still 650 * in use, then the inode cannot have been freed yet, and we 651 * can safely proceed (if it's no longer in use, that tells 652 * nothing about the inode, but we don't need to unuse swap). 653 */ 654 if (!page_swapcount(*pagep)) 655 error = -ENOENT; 656 } 657 658 /* 659 * We rely on shmem_swaplist_mutex, not only to protect the swaplist, 660 * but also to hold up shmem_evict_inode(): so inode cannot be freed 661 * beneath us (pagelock doesn't help until the page is in pagecache). 662 */ 663 if (!error) 664 error = shmem_add_to_page_cache(*pagep, mapping, index, 665 radswap); 666 if (error != -ENOMEM) { 667 /* 668 * Truncation and eviction use free_swap_and_cache(), which 669 * only does trylock page: if we raced, best clean up here. 670 */ 671 delete_from_swap_cache(*pagep); 672 set_page_dirty(*pagep); 673 if (!error) { 674 spin_lock(&info->lock); 675 info->swapped--; 676 spin_unlock(&info->lock); 677 swap_free(swap); 678 } 679 } 680 return error; 681 } 682 683 /* 684 * Search through swapped inodes to find and replace swap by page. 685 */ 686 int shmem_unuse(swp_entry_t swap, struct page *page) 687 { 688 struct list_head *this, *next; 689 struct shmem_inode_info *info; 690 struct mem_cgroup *memcg; 691 int error = 0; 692 693 /* 694 * There's a faint possibility that swap page was replaced before 695 * caller locked it: caller will come back later with the right page. 696 */ 697 if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val)) 698 goto out; 699 700 /* 701 * Charge page using GFP_KERNEL while we can wait, before taking 702 * the shmem_swaplist_mutex which might hold up shmem_writepage(). 703 * Charged back to the user (not to caller) when swap account is used. 704 */ 705 error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg); 706 if (error) 707 goto out; 708 /* No radix_tree_preload: swap entry keeps a place for page in tree */ 709 error = -EAGAIN; 710 711 mutex_lock(&shmem_swaplist_mutex); 712 list_for_each_safe(this, next, &shmem_swaplist) { 713 info = list_entry(this, struct shmem_inode_info, swaplist); 714 if (info->swapped) 715 error = shmem_unuse_inode(info, swap, &page); 716 else 717 list_del_init(&info->swaplist); 718 cond_resched(); 719 if (error != -EAGAIN) 720 break; 721 /* found nothing in this: move on to search the next */ 722 } 723 mutex_unlock(&shmem_swaplist_mutex); 724 725 if (error) { 726 if (error != -ENOMEM) 727 error = 0; 728 mem_cgroup_cancel_charge(page, memcg); 729 } else 730 mem_cgroup_commit_charge(page, memcg, true); 731 out: 732 unlock_page(page); 733 page_cache_release(page); 734 return error; 735 } 736 737 /* 738 * Move the page from the page cache to the swap cache. 739 */ 740 static int shmem_writepage(struct page *page, struct writeback_control *wbc) 741 { 742 struct shmem_inode_info *info; 743 struct address_space *mapping; 744 struct inode *inode; 745 swp_entry_t swap; 746 pgoff_t index; 747 748 BUG_ON(!PageLocked(page)); 749 mapping = page->mapping; 750 index = page->index; 751 inode = mapping->host; 752 info = SHMEM_I(inode); 753 if (info->flags & VM_LOCKED) 754 goto redirty; 755 if (!total_swap_pages) 756 goto redirty; 757 758 /* 759 * shmem_backing_dev_info's capabilities prevent regular writeback or 760 * sync from ever calling shmem_writepage; but a stacking filesystem 761 * might use ->writepage of its underlying filesystem, in which case 762 * tmpfs should write out to swap only in response to memory pressure, 763 * and not for the writeback threads or sync. 764 */ 765 if (!wbc->for_reclaim) { 766 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 767 goto redirty; 768 } 769 770 /* 771 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC 772 * value into swapfile.c, the only way we can correctly account for a 773 * fallocated page arriving here is now to initialize it and write it. 774 * 775 * That's okay for a page already fallocated earlier, but if we have 776 * not yet completed the fallocation, then (a) we want to keep track 777 * of this page in case we have to undo it, and (b) it may not be a 778 * good idea to continue anyway, once we're pushing into swap. So 779 * reactivate the page, and let shmem_fallocate() quit when too many. 780 */ 781 if (!PageUptodate(page)) { 782 if (inode->i_private) { 783 struct shmem_falloc *shmem_falloc; 784 spin_lock(&inode->i_lock); 785 shmem_falloc = inode->i_private; 786 if (shmem_falloc && 787 !shmem_falloc->waitq && 788 index >= shmem_falloc->start && 789 index < shmem_falloc->next) 790 shmem_falloc->nr_unswapped++; 791 else 792 shmem_falloc = NULL; 793 spin_unlock(&inode->i_lock); 794 if (shmem_falloc) 795 goto redirty; 796 } 797 clear_highpage(page); 798 flush_dcache_page(page); 799 SetPageUptodate(page); 800 } 801 802 swap = get_swap_page(); 803 if (!swap.val) 804 goto redirty; 805 806 /* 807 * Add inode to shmem_unuse()'s list of swapped-out inodes, 808 * if it's not already there. Do it now before the page is 809 * moved to swap cache, when its pagelock no longer protects 810 * the inode from eviction. But don't unlock the mutex until 811 * we've incremented swapped, because shmem_unuse_inode() will 812 * prune a !swapped inode from the swaplist under this mutex. 813 */ 814 mutex_lock(&shmem_swaplist_mutex); 815 if (list_empty(&info->swaplist)) 816 list_add_tail(&info->swaplist, &shmem_swaplist); 817 818 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { 819 swap_shmem_alloc(swap); 820 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); 821 822 spin_lock(&info->lock); 823 info->swapped++; 824 shmem_recalc_inode(inode); 825 spin_unlock(&info->lock); 826 827 mutex_unlock(&shmem_swaplist_mutex); 828 BUG_ON(page_mapped(page)); 829 swap_writepage(page, wbc); 830 return 0; 831 } 832 833 mutex_unlock(&shmem_swaplist_mutex); 834 swapcache_free(swap); 835 redirty: 836 set_page_dirty(page); 837 if (wbc->for_reclaim) 838 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ 839 unlock_page(page); 840 return 0; 841 } 842 843 #ifdef CONFIG_NUMA 844 #ifdef CONFIG_TMPFS 845 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 846 { 847 char buffer[64]; 848 849 if (!mpol || mpol->mode == MPOL_DEFAULT) 850 return; /* show nothing */ 851 852 mpol_to_str(buffer, sizeof(buffer), mpol); 853 854 seq_printf(seq, ",mpol=%s", buffer); 855 } 856 857 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 858 { 859 struct mempolicy *mpol = NULL; 860 if (sbinfo->mpol) { 861 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 862 mpol = sbinfo->mpol; 863 mpol_get(mpol); 864 spin_unlock(&sbinfo->stat_lock); 865 } 866 return mpol; 867 } 868 #endif /* CONFIG_TMPFS */ 869 870 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 871 struct shmem_inode_info *info, pgoff_t index) 872 { 873 struct vm_area_struct pvma; 874 struct page *page; 875 876 /* Create a pseudo vma that just contains the policy */ 877 pvma.vm_start = 0; 878 /* Bias interleave by inode number to distribute better across nodes */ 879 pvma.vm_pgoff = index + info->vfs_inode.i_ino; 880 pvma.vm_ops = NULL; 881 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); 882 883 page = swapin_readahead(swap, gfp, &pvma, 0); 884 885 /* Drop reference taken by mpol_shared_policy_lookup() */ 886 mpol_cond_put(pvma.vm_policy); 887 888 return page; 889 } 890 891 static struct page *shmem_alloc_page(gfp_t gfp, 892 struct shmem_inode_info *info, pgoff_t index) 893 { 894 struct vm_area_struct pvma; 895 struct page *page; 896 897 /* Create a pseudo vma that just contains the policy */ 898 pvma.vm_start = 0; 899 /* Bias interleave by inode number to distribute better across nodes */ 900 pvma.vm_pgoff = index + info->vfs_inode.i_ino; 901 pvma.vm_ops = NULL; 902 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); 903 904 page = alloc_page_vma(gfp, &pvma, 0); 905 906 /* Drop reference taken by mpol_shared_policy_lookup() */ 907 mpol_cond_put(pvma.vm_policy); 908 909 return page; 910 } 911 #else /* !CONFIG_NUMA */ 912 #ifdef CONFIG_TMPFS 913 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 914 { 915 } 916 #endif /* CONFIG_TMPFS */ 917 918 static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 919 struct shmem_inode_info *info, pgoff_t index) 920 { 921 return swapin_readahead(swap, gfp, NULL, 0); 922 } 923 924 static inline struct page *shmem_alloc_page(gfp_t gfp, 925 struct shmem_inode_info *info, pgoff_t index) 926 { 927 return alloc_page(gfp); 928 } 929 #endif /* CONFIG_NUMA */ 930 931 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS) 932 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 933 { 934 return NULL; 935 } 936 #endif 937 938 /* 939 * When a page is moved from swapcache to shmem filecache (either by the 940 * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of 941 * shmem_unuse_inode()), it may have been read in earlier from swap, in 942 * ignorance of the mapping it belongs to. If that mapping has special 943 * constraints (like the gma500 GEM driver, which requires RAM below 4GB), 944 * we may need to copy to a suitable page before moving to filecache. 945 * 946 * In a future release, this may well be extended to respect cpuset and 947 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); 948 * but for now it is a simple matter of zone. 949 */ 950 static bool shmem_should_replace_page(struct page *page, gfp_t gfp) 951 { 952 return page_zonenum(page) > gfp_zone(gfp); 953 } 954 955 static int shmem_replace_page(struct page **pagep, gfp_t gfp, 956 struct shmem_inode_info *info, pgoff_t index) 957 { 958 struct page *oldpage, *newpage; 959 struct address_space *swap_mapping; 960 pgoff_t swap_index; 961 int error; 962 963 oldpage = *pagep; 964 swap_index = page_private(oldpage); 965 swap_mapping = page_mapping(oldpage); 966 967 /* 968 * We have arrived here because our zones are constrained, so don't 969 * limit chance of success by further cpuset and node constraints. 970 */ 971 gfp &= ~GFP_CONSTRAINT_MASK; 972 newpage = shmem_alloc_page(gfp, info, index); 973 if (!newpage) 974 return -ENOMEM; 975 976 page_cache_get(newpage); 977 copy_highpage(newpage, oldpage); 978 flush_dcache_page(newpage); 979 980 __set_page_locked(newpage); 981 SetPageUptodate(newpage); 982 SetPageSwapBacked(newpage); 983 set_page_private(newpage, swap_index); 984 SetPageSwapCache(newpage); 985 986 /* 987 * Our caller will very soon move newpage out of swapcache, but it's 988 * a nice clean interface for us to replace oldpage by newpage there. 989 */ 990 spin_lock_irq(&swap_mapping->tree_lock); 991 error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, 992 newpage); 993 if (!error) { 994 __inc_zone_page_state(newpage, NR_FILE_PAGES); 995 __dec_zone_page_state(oldpage, NR_FILE_PAGES); 996 } 997 spin_unlock_irq(&swap_mapping->tree_lock); 998 999 if (unlikely(error)) { 1000 /* 1001 * Is this possible? I think not, now that our callers check 1002 * both PageSwapCache and page_private after getting page lock; 1003 * but be defensive. Reverse old to newpage for clear and free. 1004 */ 1005 oldpage = newpage; 1006 } else { 1007 mem_cgroup_migrate(oldpage, newpage, false); 1008 lru_cache_add_anon(newpage); 1009 *pagep = newpage; 1010 } 1011 1012 ClearPageSwapCache(oldpage); 1013 set_page_private(oldpage, 0); 1014 1015 unlock_page(oldpage); 1016 page_cache_release(oldpage); 1017 page_cache_release(oldpage); 1018 return error; 1019 } 1020 1021 /* 1022 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate 1023 * 1024 * If we allocate a new one we do not mark it dirty. That's up to the 1025 * vm. If we swap it in we mark it dirty since we also free the swap 1026 * entry since a page cannot live in both the swap and page cache 1027 */ 1028 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 1029 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type) 1030 { 1031 struct address_space *mapping = inode->i_mapping; 1032 struct shmem_inode_info *info; 1033 struct shmem_sb_info *sbinfo; 1034 struct mem_cgroup *memcg; 1035 struct page *page; 1036 swp_entry_t swap; 1037 int error; 1038 int once = 0; 1039 int alloced = 0; 1040 1041 if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT)) 1042 return -EFBIG; 1043 repeat: 1044 swap.val = 0; 1045 page = find_lock_entry(mapping, index); 1046 if (radix_tree_exceptional_entry(page)) { 1047 swap = radix_to_swp_entry(page); 1048 page = NULL; 1049 } 1050 1051 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1052 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1053 error = -EINVAL; 1054 goto failed; 1055 } 1056 1057 if (page && sgp == SGP_WRITE) 1058 mark_page_accessed(page); 1059 1060 /* fallocated page? */ 1061 if (page && !PageUptodate(page)) { 1062 if (sgp != SGP_READ) 1063 goto clear; 1064 unlock_page(page); 1065 page_cache_release(page); 1066 page = NULL; 1067 } 1068 if (page || (sgp == SGP_READ && !swap.val)) { 1069 *pagep = page; 1070 return 0; 1071 } 1072 1073 /* 1074 * Fast cache lookup did not find it: 1075 * bring it back from swap or allocate. 1076 */ 1077 info = SHMEM_I(inode); 1078 sbinfo = SHMEM_SB(inode->i_sb); 1079 1080 if (swap.val) { 1081 /* Look it up and read it in.. */ 1082 page = lookup_swap_cache(swap); 1083 if (!page) { 1084 /* here we actually do the io */ 1085 if (fault_type) 1086 *fault_type |= VM_FAULT_MAJOR; 1087 page = shmem_swapin(swap, gfp, info, index); 1088 if (!page) { 1089 error = -ENOMEM; 1090 goto failed; 1091 } 1092 } 1093 1094 /* We have to do this with page locked to prevent races */ 1095 lock_page(page); 1096 if (!PageSwapCache(page) || page_private(page) != swap.val || 1097 !shmem_confirm_swap(mapping, index, swap)) { 1098 error = -EEXIST; /* try again */ 1099 goto unlock; 1100 } 1101 if (!PageUptodate(page)) { 1102 error = -EIO; 1103 goto failed; 1104 } 1105 wait_on_page_writeback(page); 1106 1107 if (shmem_should_replace_page(page, gfp)) { 1108 error = shmem_replace_page(&page, gfp, info, index); 1109 if (error) 1110 goto failed; 1111 } 1112 1113 error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg); 1114 if (!error) { 1115 error = shmem_add_to_page_cache(page, mapping, index, 1116 swp_to_radix_entry(swap)); 1117 /* 1118 * We already confirmed swap under page lock, and make 1119 * no memory allocation here, so usually no possibility 1120 * of error; but free_swap_and_cache() only trylocks a 1121 * page, so it is just possible that the entry has been 1122 * truncated or holepunched since swap was confirmed. 1123 * shmem_undo_range() will have done some of the 1124 * unaccounting, now delete_from_swap_cache() will do 1125 * the rest (including mem_cgroup_uncharge_swapcache). 1126 * Reset swap.val? No, leave it so "failed" goes back to 1127 * "repeat": reading a hole and writing should succeed. 1128 */ 1129 if (error) { 1130 mem_cgroup_cancel_charge(page, memcg); 1131 delete_from_swap_cache(page); 1132 } 1133 } 1134 if (error) 1135 goto failed; 1136 1137 mem_cgroup_commit_charge(page, memcg, true); 1138 1139 spin_lock(&info->lock); 1140 info->swapped--; 1141 shmem_recalc_inode(inode); 1142 spin_unlock(&info->lock); 1143 1144 if (sgp == SGP_WRITE) 1145 mark_page_accessed(page); 1146 1147 delete_from_swap_cache(page); 1148 set_page_dirty(page); 1149 swap_free(swap); 1150 1151 } else { 1152 if (shmem_acct_block(info->flags)) { 1153 error = -ENOSPC; 1154 goto failed; 1155 } 1156 if (sbinfo->max_blocks) { 1157 if (percpu_counter_compare(&sbinfo->used_blocks, 1158 sbinfo->max_blocks) >= 0) { 1159 error = -ENOSPC; 1160 goto unacct; 1161 } 1162 percpu_counter_inc(&sbinfo->used_blocks); 1163 } 1164 1165 page = shmem_alloc_page(gfp, info, index); 1166 if (!page) { 1167 error = -ENOMEM; 1168 goto decused; 1169 } 1170 1171 __SetPageSwapBacked(page); 1172 __set_page_locked(page); 1173 if (sgp == SGP_WRITE) 1174 __SetPageReferenced(page); 1175 1176 error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg); 1177 if (error) 1178 goto decused; 1179 error = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK); 1180 if (!error) { 1181 error = shmem_add_to_page_cache(page, mapping, index, 1182 NULL); 1183 radix_tree_preload_end(); 1184 } 1185 if (error) { 1186 mem_cgroup_cancel_charge(page, memcg); 1187 goto decused; 1188 } 1189 mem_cgroup_commit_charge(page, memcg, false); 1190 lru_cache_add_anon(page); 1191 1192 spin_lock(&info->lock); 1193 info->alloced++; 1194 inode->i_blocks += BLOCKS_PER_PAGE; 1195 shmem_recalc_inode(inode); 1196 spin_unlock(&info->lock); 1197 alloced = true; 1198 1199 /* 1200 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page. 1201 */ 1202 if (sgp == SGP_FALLOC) 1203 sgp = SGP_WRITE; 1204 clear: 1205 /* 1206 * Let SGP_WRITE caller clear ends if write does not fill page; 1207 * but SGP_FALLOC on a page fallocated earlier must initialize 1208 * it now, lest undo on failure cancel our earlier guarantee. 1209 */ 1210 if (sgp != SGP_WRITE) { 1211 clear_highpage(page); 1212 flush_dcache_page(page); 1213 SetPageUptodate(page); 1214 } 1215 if (sgp == SGP_DIRTY) 1216 set_page_dirty(page); 1217 } 1218 1219 /* Perhaps the file has been truncated since we checked */ 1220 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1221 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1222 error = -EINVAL; 1223 if (alloced) 1224 goto trunc; 1225 else 1226 goto failed; 1227 } 1228 *pagep = page; 1229 return 0; 1230 1231 /* 1232 * Error recovery. 1233 */ 1234 trunc: 1235 info = SHMEM_I(inode); 1236 ClearPageDirty(page); 1237 delete_from_page_cache(page); 1238 spin_lock(&info->lock); 1239 info->alloced--; 1240 inode->i_blocks -= BLOCKS_PER_PAGE; 1241 spin_unlock(&info->lock); 1242 decused: 1243 sbinfo = SHMEM_SB(inode->i_sb); 1244 if (sbinfo->max_blocks) 1245 percpu_counter_add(&sbinfo->used_blocks, -1); 1246 unacct: 1247 shmem_unacct_blocks(info->flags, 1); 1248 failed: 1249 if (swap.val && error != -EINVAL && 1250 !shmem_confirm_swap(mapping, index, swap)) 1251 error = -EEXIST; 1252 unlock: 1253 if (page) { 1254 unlock_page(page); 1255 page_cache_release(page); 1256 } 1257 if (error == -ENOSPC && !once++) { 1258 info = SHMEM_I(inode); 1259 spin_lock(&info->lock); 1260 shmem_recalc_inode(inode); 1261 spin_unlock(&info->lock); 1262 goto repeat; 1263 } 1264 if (error == -EEXIST) /* from above or from radix_tree_insert */ 1265 goto repeat; 1266 return error; 1267 } 1268 1269 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1270 { 1271 struct inode *inode = file_inode(vma->vm_file); 1272 int error; 1273 int ret = VM_FAULT_LOCKED; 1274 1275 /* 1276 * Trinity finds that probing a hole which tmpfs is punching can 1277 * prevent the hole-punch from ever completing: which in turn 1278 * locks writers out with its hold on i_mutex. So refrain from 1279 * faulting pages into the hole while it's being punched. Although 1280 * shmem_undo_range() does remove the additions, it may be unable to 1281 * keep up, as each new page needs its own unmap_mapping_range() call, 1282 * and the i_mmap tree grows ever slower to scan if new vmas are added. 1283 * 1284 * It does not matter if we sometimes reach this check just before the 1285 * hole-punch begins, so that one fault then races with the punch: 1286 * we just need to make racing faults a rare case. 1287 * 1288 * The implementation below would be much simpler if we just used a 1289 * standard mutex or completion: but we cannot take i_mutex in fault, 1290 * and bloating every shmem inode for this unlikely case would be sad. 1291 */ 1292 if (unlikely(inode->i_private)) { 1293 struct shmem_falloc *shmem_falloc; 1294 1295 spin_lock(&inode->i_lock); 1296 shmem_falloc = inode->i_private; 1297 if (shmem_falloc && 1298 shmem_falloc->waitq && 1299 vmf->pgoff >= shmem_falloc->start && 1300 vmf->pgoff < shmem_falloc->next) { 1301 wait_queue_head_t *shmem_falloc_waitq; 1302 DEFINE_WAIT(shmem_fault_wait); 1303 1304 ret = VM_FAULT_NOPAGE; 1305 if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) && 1306 !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { 1307 /* It's polite to up mmap_sem if we can */ 1308 up_read(&vma->vm_mm->mmap_sem); 1309 ret = VM_FAULT_RETRY; 1310 } 1311 1312 shmem_falloc_waitq = shmem_falloc->waitq; 1313 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, 1314 TASK_UNINTERRUPTIBLE); 1315 spin_unlock(&inode->i_lock); 1316 schedule(); 1317 1318 /* 1319 * shmem_falloc_waitq points into the shmem_fallocate() 1320 * stack of the hole-punching task: shmem_falloc_waitq 1321 * is usually invalid by the time we reach here, but 1322 * finish_wait() does not dereference it in that case; 1323 * though i_lock needed lest racing with wake_up_all(). 1324 */ 1325 spin_lock(&inode->i_lock); 1326 finish_wait(shmem_falloc_waitq, &shmem_fault_wait); 1327 spin_unlock(&inode->i_lock); 1328 return ret; 1329 } 1330 spin_unlock(&inode->i_lock); 1331 } 1332 1333 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); 1334 if (error) 1335 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 1336 1337 if (ret & VM_FAULT_MAJOR) { 1338 count_vm_event(PGMAJFAULT); 1339 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 1340 } 1341 return ret; 1342 } 1343 1344 #ifdef CONFIG_NUMA 1345 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 1346 { 1347 struct inode *inode = file_inode(vma->vm_file); 1348 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 1349 } 1350 1351 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 1352 unsigned long addr) 1353 { 1354 struct inode *inode = file_inode(vma->vm_file); 1355 pgoff_t index; 1356 1357 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 1358 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 1359 } 1360 #endif 1361 1362 int shmem_lock(struct file *file, int lock, struct user_struct *user) 1363 { 1364 struct inode *inode = file_inode(file); 1365 struct shmem_inode_info *info = SHMEM_I(inode); 1366 int retval = -ENOMEM; 1367 1368 spin_lock(&info->lock); 1369 if (lock && !(info->flags & VM_LOCKED)) { 1370 if (!user_shm_lock(inode->i_size, user)) 1371 goto out_nomem; 1372 info->flags |= VM_LOCKED; 1373 mapping_set_unevictable(file->f_mapping); 1374 } 1375 if (!lock && (info->flags & VM_LOCKED) && user) { 1376 user_shm_unlock(inode->i_size, user); 1377 info->flags &= ~VM_LOCKED; 1378 mapping_clear_unevictable(file->f_mapping); 1379 } 1380 retval = 0; 1381 1382 out_nomem: 1383 spin_unlock(&info->lock); 1384 return retval; 1385 } 1386 1387 static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 1388 { 1389 file_accessed(file); 1390 vma->vm_ops = &shmem_vm_ops; 1391 return 0; 1392 } 1393 1394 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, 1395 umode_t mode, dev_t dev, unsigned long flags) 1396 { 1397 struct inode *inode; 1398 struct shmem_inode_info *info; 1399 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 1400 1401 if (shmem_reserve_inode(sb)) 1402 return NULL; 1403 1404 inode = new_inode(sb); 1405 if (inode) { 1406 inode->i_ino = get_next_ino(); 1407 inode_init_owner(inode, dir, mode); 1408 inode->i_blocks = 0; 1409 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; 1410 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1411 inode->i_generation = get_seconds(); 1412 info = SHMEM_I(inode); 1413 memset(info, 0, (char *)inode - (char *)info); 1414 spin_lock_init(&info->lock); 1415 info->flags = flags & VM_NORESERVE; 1416 INIT_LIST_HEAD(&info->swaplist); 1417 simple_xattrs_init(&info->xattrs); 1418 cache_no_acl(inode); 1419 1420 switch (mode & S_IFMT) { 1421 default: 1422 inode->i_op = &shmem_special_inode_operations; 1423 init_special_inode(inode, mode, dev); 1424 break; 1425 case S_IFREG: 1426 inode->i_mapping->a_ops = &shmem_aops; 1427 inode->i_op = &shmem_inode_operations; 1428 inode->i_fop = &shmem_file_operations; 1429 mpol_shared_policy_init(&info->policy, 1430 shmem_get_sbmpol(sbinfo)); 1431 break; 1432 case S_IFDIR: 1433 inc_nlink(inode); 1434 /* Some things misbehave if size == 0 on a directory */ 1435 inode->i_size = 2 * BOGO_DIRENT_SIZE; 1436 inode->i_op = &shmem_dir_inode_operations; 1437 inode->i_fop = &simple_dir_operations; 1438 break; 1439 case S_IFLNK: 1440 /* 1441 * Must not load anything in the rbtree, 1442 * mpol_free_shared_policy will not be called. 1443 */ 1444 mpol_shared_policy_init(&info->policy, NULL); 1445 break; 1446 } 1447 } else 1448 shmem_free_inode(sb); 1449 return inode; 1450 } 1451 1452 bool shmem_mapping(struct address_space *mapping) 1453 { 1454 return mapping->backing_dev_info == &shmem_backing_dev_info; 1455 } 1456 1457 #ifdef CONFIG_TMPFS 1458 static const struct inode_operations shmem_symlink_inode_operations; 1459 static const struct inode_operations shmem_short_symlink_operations; 1460 1461 #ifdef CONFIG_TMPFS_XATTR 1462 static int shmem_initxattrs(struct inode *, const struct xattr *, void *); 1463 #else 1464 #define shmem_initxattrs NULL 1465 #endif 1466 1467 static int 1468 shmem_write_begin(struct file *file, struct address_space *mapping, 1469 loff_t pos, unsigned len, unsigned flags, 1470 struct page **pagep, void **fsdata) 1471 { 1472 struct inode *inode = mapping->host; 1473 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1474 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); 1475 } 1476 1477 static int 1478 shmem_write_end(struct file *file, struct address_space *mapping, 1479 loff_t pos, unsigned len, unsigned copied, 1480 struct page *page, void *fsdata) 1481 { 1482 struct inode *inode = mapping->host; 1483 1484 if (pos + copied > inode->i_size) 1485 i_size_write(inode, pos + copied); 1486 1487 if (!PageUptodate(page)) { 1488 if (copied < PAGE_CACHE_SIZE) { 1489 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1490 zero_user_segments(page, 0, from, 1491 from + copied, PAGE_CACHE_SIZE); 1492 } 1493 SetPageUptodate(page); 1494 } 1495 set_page_dirty(page); 1496 unlock_page(page); 1497 page_cache_release(page); 1498 1499 return copied; 1500 } 1501 1502 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 1503 { 1504 struct file *file = iocb->ki_filp; 1505 struct inode *inode = file_inode(file); 1506 struct address_space *mapping = inode->i_mapping; 1507 pgoff_t index; 1508 unsigned long offset; 1509 enum sgp_type sgp = SGP_READ; 1510 int error = 0; 1511 ssize_t retval = 0; 1512 loff_t *ppos = &iocb->ki_pos; 1513 1514 /* 1515 * Might this read be for a stacking filesystem? Then when reading 1516 * holes of a sparse file, we actually need to allocate those pages, 1517 * and even mark them dirty, so it cannot exceed the max_blocks limit. 1518 */ 1519 if (segment_eq(get_fs(), KERNEL_DS)) 1520 sgp = SGP_DIRTY; 1521 1522 index = *ppos >> PAGE_CACHE_SHIFT; 1523 offset = *ppos & ~PAGE_CACHE_MASK; 1524 1525 for (;;) { 1526 struct page *page = NULL; 1527 pgoff_t end_index; 1528 unsigned long nr, ret; 1529 loff_t i_size = i_size_read(inode); 1530 1531 end_index = i_size >> PAGE_CACHE_SHIFT; 1532 if (index > end_index) 1533 break; 1534 if (index == end_index) { 1535 nr = i_size & ~PAGE_CACHE_MASK; 1536 if (nr <= offset) 1537 break; 1538 } 1539 1540 error = shmem_getpage(inode, index, &page, sgp, NULL); 1541 if (error) { 1542 if (error == -EINVAL) 1543 error = 0; 1544 break; 1545 } 1546 if (page) 1547 unlock_page(page); 1548 1549 /* 1550 * We must evaluate after, since reads (unlike writes) 1551 * are called without i_mutex protection against truncate 1552 */ 1553 nr = PAGE_CACHE_SIZE; 1554 i_size = i_size_read(inode); 1555 end_index = i_size >> PAGE_CACHE_SHIFT; 1556 if (index == end_index) { 1557 nr = i_size & ~PAGE_CACHE_MASK; 1558 if (nr <= offset) { 1559 if (page) 1560 page_cache_release(page); 1561 break; 1562 } 1563 } 1564 nr -= offset; 1565 1566 if (page) { 1567 /* 1568 * If users can be writing to this page using arbitrary 1569 * virtual addresses, take care about potential aliasing 1570 * before reading the page on the kernel side. 1571 */ 1572 if (mapping_writably_mapped(mapping)) 1573 flush_dcache_page(page); 1574 /* 1575 * Mark the page accessed if we read the beginning. 1576 */ 1577 if (!offset) 1578 mark_page_accessed(page); 1579 } else { 1580 page = ZERO_PAGE(0); 1581 page_cache_get(page); 1582 } 1583 1584 /* 1585 * Ok, we have the page, and it's up-to-date, so 1586 * now we can copy it to user space... 1587 */ 1588 ret = copy_page_to_iter(page, offset, nr, to); 1589 retval += ret; 1590 offset += ret; 1591 index += offset >> PAGE_CACHE_SHIFT; 1592 offset &= ~PAGE_CACHE_MASK; 1593 1594 page_cache_release(page); 1595 if (!iov_iter_count(to)) 1596 break; 1597 if (ret < nr) { 1598 error = -EFAULT; 1599 break; 1600 } 1601 cond_resched(); 1602 } 1603 1604 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 1605 file_accessed(file); 1606 return retval ? retval : error; 1607 } 1608 1609 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, 1610 struct pipe_inode_info *pipe, size_t len, 1611 unsigned int flags) 1612 { 1613 struct address_space *mapping = in->f_mapping; 1614 struct inode *inode = mapping->host; 1615 unsigned int loff, nr_pages, req_pages; 1616 struct page *pages[PIPE_DEF_BUFFERS]; 1617 struct partial_page partial[PIPE_DEF_BUFFERS]; 1618 struct page *page; 1619 pgoff_t index, end_index; 1620 loff_t isize, left; 1621 int error, page_nr; 1622 struct splice_pipe_desc spd = { 1623 .pages = pages, 1624 .partial = partial, 1625 .nr_pages_max = PIPE_DEF_BUFFERS, 1626 .flags = flags, 1627 .ops = &page_cache_pipe_buf_ops, 1628 .spd_release = spd_release_page, 1629 }; 1630 1631 isize = i_size_read(inode); 1632 if (unlikely(*ppos >= isize)) 1633 return 0; 1634 1635 left = isize - *ppos; 1636 if (unlikely(left < len)) 1637 len = left; 1638 1639 if (splice_grow_spd(pipe, &spd)) 1640 return -ENOMEM; 1641 1642 index = *ppos >> PAGE_CACHE_SHIFT; 1643 loff = *ppos & ~PAGE_CACHE_MASK; 1644 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1645 nr_pages = min(req_pages, spd.nr_pages_max); 1646 1647 spd.nr_pages = find_get_pages_contig(mapping, index, 1648 nr_pages, spd.pages); 1649 index += spd.nr_pages; 1650 error = 0; 1651 1652 while (spd.nr_pages < nr_pages) { 1653 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL); 1654 if (error) 1655 break; 1656 unlock_page(page); 1657 spd.pages[spd.nr_pages++] = page; 1658 index++; 1659 } 1660 1661 index = *ppos >> PAGE_CACHE_SHIFT; 1662 nr_pages = spd.nr_pages; 1663 spd.nr_pages = 0; 1664 1665 for (page_nr = 0; page_nr < nr_pages; page_nr++) { 1666 unsigned int this_len; 1667 1668 if (!len) 1669 break; 1670 1671 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); 1672 page = spd.pages[page_nr]; 1673 1674 if (!PageUptodate(page) || page->mapping != mapping) { 1675 error = shmem_getpage(inode, index, &page, 1676 SGP_CACHE, NULL); 1677 if (error) 1678 break; 1679 unlock_page(page); 1680 page_cache_release(spd.pages[page_nr]); 1681 spd.pages[page_nr] = page; 1682 } 1683 1684 isize = i_size_read(inode); 1685 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1686 if (unlikely(!isize || index > end_index)) 1687 break; 1688 1689 if (end_index == index) { 1690 unsigned int plen; 1691 1692 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1693 if (plen <= loff) 1694 break; 1695 1696 this_len = min(this_len, plen - loff); 1697 len = this_len; 1698 } 1699 1700 spd.partial[page_nr].offset = loff; 1701 spd.partial[page_nr].len = this_len; 1702 len -= this_len; 1703 loff = 0; 1704 spd.nr_pages++; 1705 index++; 1706 } 1707 1708 while (page_nr < nr_pages) 1709 page_cache_release(spd.pages[page_nr++]); 1710 1711 if (spd.nr_pages) 1712 error = splice_to_pipe(pipe, &spd); 1713 1714 splice_shrink_spd(&spd); 1715 1716 if (error > 0) { 1717 *ppos += error; 1718 file_accessed(in); 1719 } 1720 return error; 1721 } 1722 1723 /* 1724 * llseek SEEK_DATA or SEEK_HOLE through the radix_tree. 1725 */ 1726 static pgoff_t shmem_seek_hole_data(struct address_space *mapping, 1727 pgoff_t index, pgoff_t end, int whence) 1728 { 1729 struct page *page; 1730 struct pagevec pvec; 1731 pgoff_t indices[PAGEVEC_SIZE]; 1732 bool done = false; 1733 int i; 1734 1735 pagevec_init(&pvec, 0); 1736 pvec.nr = 1; /* start small: we may be there already */ 1737 while (!done) { 1738 pvec.nr = find_get_entries(mapping, index, 1739 pvec.nr, pvec.pages, indices); 1740 if (!pvec.nr) { 1741 if (whence == SEEK_DATA) 1742 index = end; 1743 break; 1744 } 1745 for (i = 0; i < pvec.nr; i++, index++) { 1746 if (index < indices[i]) { 1747 if (whence == SEEK_HOLE) { 1748 done = true; 1749 break; 1750 } 1751 index = indices[i]; 1752 } 1753 page = pvec.pages[i]; 1754 if (page && !radix_tree_exceptional_entry(page)) { 1755 if (!PageUptodate(page)) 1756 page = NULL; 1757 } 1758 if (index >= end || 1759 (page && whence == SEEK_DATA) || 1760 (!page && whence == SEEK_HOLE)) { 1761 done = true; 1762 break; 1763 } 1764 } 1765 pagevec_remove_exceptionals(&pvec); 1766 pagevec_release(&pvec); 1767 pvec.nr = PAGEVEC_SIZE; 1768 cond_resched(); 1769 } 1770 return index; 1771 } 1772 1773 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) 1774 { 1775 struct address_space *mapping = file->f_mapping; 1776 struct inode *inode = mapping->host; 1777 pgoff_t start, end; 1778 loff_t new_offset; 1779 1780 if (whence != SEEK_DATA && whence != SEEK_HOLE) 1781 return generic_file_llseek_size(file, offset, whence, 1782 MAX_LFS_FILESIZE, i_size_read(inode)); 1783 mutex_lock(&inode->i_mutex); 1784 /* We're holding i_mutex so we can access i_size directly */ 1785 1786 if (offset < 0) 1787 offset = -EINVAL; 1788 else if (offset >= inode->i_size) 1789 offset = -ENXIO; 1790 else { 1791 start = offset >> PAGE_CACHE_SHIFT; 1792 end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1793 new_offset = shmem_seek_hole_data(mapping, start, end, whence); 1794 new_offset <<= PAGE_CACHE_SHIFT; 1795 if (new_offset > offset) { 1796 if (new_offset < inode->i_size) 1797 offset = new_offset; 1798 else if (whence == SEEK_DATA) 1799 offset = -ENXIO; 1800 else 1801 offset = inode->i_size; 1802 } 1803 } 1804 1805 if (offset >= 0) 1806 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); 1807 mutex_unlock(&inode->i_mutex); 1808 return offset; 1809 } 1810 1811 static long shmem_fallocate(struct file *file, int mode, loff_t offset, 1812 loff_t len) 1813 { 1814 struct inode *inode = file_inode(file); 1815 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1816 struct shmem_falloc shmem_falloc; 1817 pgoff_t start, index, end; 1818 int error; 1819 1820 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 1821 return -EOPNOTSUPP; 1822 1823 mutex_lock(&inode->i_mutex); 1824 1825 if (mode & FALLOC_FL_PUNCH_HOLE) { 1826 struct address_space *mapping = file->f_mapping; 1827 loff_t unmap_start = round_up(offset, PAGE_SIZE); 1828 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 1829 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq); 1830 1831 shmem_falloc.waitq = &shmem_falloc_waitq; 1832 shmem_falloc.start = unmap_start >> PAGE_SHIFT; 1833 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; 1834 spin_lock(&inode->i_lock); 1835 inode->i_private = &shmem_falloc; 1836 spin_unlock(&inode->i_lock); 1837 1838 if ((u64)unmap_end > (u64)unmap_start) 1839 unmap_mapping_range(mapping, unmap_start, 1840 1 + unmap_end - unmap_start, 0); 1841 shmem_truncate_range(inode, offset, offset + len - 1); 1842 /* No need to unmap again: hole-punching leaves COWed pages */ 1843 1844 spin_lock(&inode->i_lock); 1845 inode->i_private = NULL; 1846 wake_up_all(&shmem_falloc_waitq); 1847 spin_unlock(&inode->i_lock); 1848 error = 0; 1849 goto out; 1850 } 1851 1852 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 1853 error = inode_newsize_ok(inode, offset + len); 1854 if (error) 1855 goto out; 1856 1857 start = offset >> PAGE_CACHE_SHIFT; 1858 end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1859 /* Try to avoid a swapstorm if len is impossible to satisfy */ 1860 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 1861 error = -ENOSPC; 1862 goto out; 1863 } 1864 1865 shmem_falloc.waitq = NULL; 1866 shmem_falloc.start = start; 1867 shmem_falloc.next = start; 1868 shmem_falloc.nr_falloced = 0; 1869 shmem_falloc.nr_unswapped = 0; 1870 spin_lock(&inode->i_lock); 1871 inode->i_private = &shmem_falloc; 1872 spin_unlock(&inode->i_lock); 1873 1874 for (index = start; index < end; index++) { 1875 struct page *page; 1876 1877 /* 1878 * Good, the fallocate(2) manpage permits EINTR: we may have 1879 * been interrupted because we are using up too much memory. 1880 */ 1881 if (signal_pending(current)) 1882 error = -EINTR; 1883 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) 1884 error = -ENOMEM; 1885 else 1886 error = shmem_getpage(inode, index, &page, SGP_FALLOC, 1887 NULL); 1888 if (error) { 1889 /* Remove the !PageUptodate pages we added */ 1890 shmem_undo_range(inode, 1891 (loff_t)start << PAGE_CACHE_SHIFT, 1892 (loff_t)index << PAGE_CACHE_SHIFT, true); 1893 goto undone; 1894 } 1895 1896 /* 1897 * Inform shmem_writepage() how far we have reached. 1898 * No need for lock or barrier: we have the page lock. 1899 */ 1900 shmem_falloc.next++; 1901 if (!PageUptodate(page)) 1902 shmem_falloc.nr_falloced++; 1903 1904 /* 1905 * If !PageUptodate, leave it that way so that freeable pages 1906 * can be recognized if we need to rollback on error later. 1907 * But set_page_dirty so that memory pressure will swap rather 1908 * than free the pages we are allocating (and SGP_CACHE pages 1909 * might still be clean: we now need to mark those dirty too). 1910 */ 1911 set_page_dirty(page); 1912 unlock_page(page); 1913 page_cache_release(page); 1914 cond_resched(); 1915 } 1916 1917 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 1918 i_size_write(inode, offset + len); 1919 inode->i_ctime = CURRENT_TIME; 1920 undone: 1921 spin_lock(&inode->i_lock); 1922 inode->i_private = NULL; 1923 spin_unlock(&inode->i_lock); 1924 out: 1925 mutex_unlock(&inode->i_mutex); 1926 return error; 1927 } 1928 1929 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 1930 { 1931 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 1932 1933 buf->f_type = TMPFS_MAGIC; 1934 buf->f_bsize = PAGE_CACHE_SIZE; 1935 buf->f_namelen = NAME_MAX; 1936 if (sbinfo->max_blocks) { 1937 buf->f_blocks = sbinfo->max_blocks; 1938 buf->f_bavail = 1939 buf->f_bfree = sbinfo->max_blocks - 1940 percpu_counter_sum(&sbinfo->used_blocks); 1941 } 1942 if (sbinfo->max_inodes) { 1943 buf->f_files = sbinfo->max_inodes; 1944 buf->f_ffree = sbinfo->free_inodes; 1945 } 1946 /* else leave those fields 0 like simple_statfs */ 1947 return 0; 1948 } 1949 1950 /* 1951 * File creation. Allocate an inode, and we're done.. 1952 */ 1953 static int 1954 shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) 1955 { 1956 struct inode *inode; 1957 int error = -ENOSPC; 1958 1959 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); 1960 if (inode) { 1961 error = simple_acl_create(dir, inode); 1962 if (error) 1963 goto out_iput; 1964 error = security_inode_init_security(inode, dir, 1965 &dentry->d_name, 1966 shmem_initxattrs, NULL); 1967 if (error && error != -EOPNOTSUPP) 1968 goto out_iput; 1969 1970 error = 0; 1971 dir->i_size += BOGO_DIRENT_SIZE; 1972 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1973 d_instantiate(dentry, inode); 1974 dget(dentry); /* Extra count - pin the dentry in core */ 1975 } 1976 return error; 1977 out_iput: 1978 iput(inode); 1979 return error; 1980 } 1981 1982 static int 1983 shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) 1984 { 1985 struct inode *inode; 1986 int error = -ENOSPC; 1987 1988 inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE); 1989 if (inode) { 1990 error = security_inode_init_security(inode, dir, 1991 NULL, 1992 shmem_initxattrs, NULL); 1993 if (error && error != -EOPNOTSUPP) 1994 goto out_iput; 1995 error = simple_acl_create(dir, inode); 1996 if (error) 1997 goto out_iput; 1998 d_tmpfile(dentry, inode); 1999 } 2000 return error; 2001 out_iput: 2002 iput(inode); 2003 return error; 2004 } 2005 2006 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 2007 { 2008 int error; 2009 2010 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) 2011 return error; 2012 inc_nlink(dir); 2013 return 0; 2014 } 2015 2016 static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode, 2017 bool excl) 2018 { 2019 return shmem_mknod(dir, dentry, mode | S_IFREG, 0); 2020 } 2021 2022 /* 2023 * Link a file.. 2024 */ 2025 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 2026 { 2027 struct inode *inode = old_dentry->d_inode; 2028 int ret; 2029 2030 /* 2031 * No ordinary (disk based) filesystem counts links as inodes; 2032 * but each new link needs a new dentry, pinning lowmem, and 2033 * tmpfs dentries cannot be pruned until they are unlinked. 2034 */ 2035 ret = shmem_reserve_inode(inode->i_sb); 2036 if (ret) 2037 goto out; 2038 2039 dir->i_size += BOGO_DIRENT_SIZE; 2040 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 2041 inc_nlink(inode); 2042 ihold(inode); /* New dentry reference */ 2043 dget(dentry); /* Extra pinning count for the created dentry */ 2044 d_instantiate(dentry, inode); 2045 out: 2046 return ret; 2047 } 2048 2049 static int shmem_unlink(struct inode *dir, struct dentry *dentry) 2050 { 2051 struct inode *inode = dentry->d_inode; 2052 2053 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 2054 shmem_free_inode(inode->i_sb); 2055 2056 dir->i_size -= BOGO_DIRENT_SIZE; 2057 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 2058 drop_nlink(inode); 2059 dput(dentry); /* Undo the count from "create" - this does all the work */ 2060 return 0; 2061 } 2062 2063 static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 2064 { 2065 if (!simple_empty(dentry)) 2066 return -ENOTEMPTY; 2067 2068 drop_nlink(dentry->d_inode); 2069 drop_nlink(dir); 2070 return shmem_unlink(dir, dentry); 2071 } 2072 2073 /* 2074 * The VFS layer already does all the dentry stuff for rename, 2075 * we just have to decrement the usage count for the target if 2076 * it exists so that the VFS layer correctly free's it when it 2077 * gets overwritten. 2078 */ 2079 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 2080 { 2081 struct inode *inode = old_dentry->d_inode; 2082 int they_are_dirs = S_ISDIR(inode->i_mode); 2083 2084 if (!simple_empty(new_dentry)) 2085 return -ENOTEMPTY; 2086 2087 if (new_dentry->d_inode) { 2088 (void) shmem_unlink(new_dir, new_dentry); 2089 if (they_are_dirs) 2090 drop_nlink(old_dir); 2091 } else if (they_are_dirs) { 2092 drop_nlink(old_dir); 2093 inc_nlink(new_dir); 2094 } 2095 2096 old_dir->i_size -= BOGO_DIRENT_SIZE; 2097 new_dir->i_size += BOGO_DIRENT_SIZE; 2098 old_dir->i_ctime = old_dir->i_mtime = 2099 new_dir->i_ctime = new_dir->i_mtime = 2100 inode->i_ctime = CURRENT_TIME; 2101 return 0; 2102 } 2103 2104 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) 2105 { 2106 int error; 2107 int len; 2108 struct inode *inode; 2109 struct page *page; 2110 char *kaddr; 2111 struct shmem_inode_info *info; 2112 2113 len = strlen(symname) + 1; 2114 if (len > PAGE_CACHE_SIZE) 2115 return -ENAMETOOLONG; 2116 2117 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); 2118 if (!inode) 2119 return -ENOSPC; 2120 2121 error = security_inode_init_security(inode, dir, &dentry->d_name, 2122 shmem_initxattrs, NULL); 2123 if (error) { 2124 if (error != -EOPNOTSUPP) { 2125 iput(inode); 2126 return error; 2127 } 2128 error = 0; 2129 } 2130 2131 info = SHMEM_I(inode); 2132 inode->i_size = len-1; 2133 if (len <= SHORT_SYMLINK_LEN) { 2134 info->symlink = kmemdup(symname, len, GFP_KERNEL); 2135 if (!info->symlink) { 2136 iput(inode); 2137 return -ENOMEM; 2138 } 2139 inode->i_op = &shmem_short_symlink_operations; 2140 } else { 2141 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); 2142 if (error) { 2143 iput(inode); 2144 return error; 2145 } 2146 inode->i_mapping->a_ops = &shmem_aops; 2147 inode->i_op = &shmem_symlink_inode_operations; 2148 kaddr = kmap_atomic(page); 2149 memcpy(kaddr, symname, len); 2150 kunmap_atomic(kaddr); 2151 SetPageUptodate(page); 2152 set_page_dirty(page); 2153 unlock_page(page); 2154 page_cache_release(page); 2155 } 2156 dir->i_size += BOGO_DIRENT_SIZE; 2157 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 2158 d_instantiate(dentry, inode); 2159 dget(dentry); 2160 return 0; 2161 } 2162 2163 static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd) 2164 { 2165 nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink); 2166 return NULL; 2167 } 2168 2169 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) 2170 { 2171 struct page *page = NULL; 2172 int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); 2173 nd_set_link(nd, error ? ERR_PTR(error) : kmap(page)); 2174 if (page) 2175 unlock_page(page); 2176 return page; 2177 } 2178 2179 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 2180 { 2181 if (!IS_ERR(nd_get_link(nd))) { 2182 struct page *page = cookie; 2183 kunmap(page); 2184 mark_page_accessed(page); 2185 page_cache_release(page); 2186 } 2187 } 2188 2189 #ifdef CONFIG_TMPFS_XATTR 2190 /* 2191 * Superblocks without xattr inode operations may get some security.* xattr 2192 * support from the LSM "for free". As soon as we have any other xattrs 2193 * like ACLs, we also need to implement the security.* handlers at 2194 * filesystem level, though. 2195 */ 2196 2197 /* 2198 * Callback for security_inode_init_security() for acquiring xattrs. 2199 */ 2200 static int shmem_initxattrs(struct inode *inode, 2201 const struct xattr *xattr_array, 2202 void *fs_info) 2203 { 2204 struct shmem_inode_info *info = SHMEM_I(inode); 2205 const struct xattr *xattr; 2206 struct simple_xattr *new_xattr; 2207 size_t len; 2208 2209 for (xattr = xattr_array; xattr->name != NULL; xattr++) { 2210 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); 2211 if (!new_xattr) 2212 return -ENOMEM; 2213 2214 len = strlen(xattr->name) + 1; 2215 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 2216 GFP_KERNEL); 2217 if (!new_xattr->name) { 2218 kfree(new_xattr); 2219 return -ENOMEM; 2220 } 2221 2222 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, 2223 XATTR_SECURITY_PREFIX_LEN); 2224 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, 2225 xattr->name, len); 2226 2227 simple_xattr_list_add(&info->xattrs, new_xattr); 2228 } 2229 2230 return 0; 2231 } 2232 2233 static const struct xattr_handler *shmem_xattr_handlers[] = { 2234 #ifdef CONFIG_TMPFS_POSIX_ACL 2235 &posix_acl_access_xattr_handler, 2236 &posix_acl_default_xattr_handler, 2237 #endif 2238 NULL 2239 }; 2240 2241 static int shmem_xattr_validate(const char *name) 2242 { 2243 struct { const char *prefix; size_t len; } arr[] = { 2244 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN }, 2245 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN } 2246 }; 2247 int i; 2248 2249 for (i = 0; i < ARRAY_SIZE(arr); i++) { 2250 size_t preflen = arr[i].len; 2251 if (strncmp(name, arr[i].prefix, preflen) == 0) { 2252 if (!name[preflen]) 2253 return -EINVAL; 2254 return 0; 2255 } 2256 } 2257 return -EOPNOTSUPP; 2258 } 2259 2260 static ssize_t shmem_getxattr(struct dentry *dentry, const char *name, 2261 void *buffer, size_t size) 2262 { 2263 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2264 int err; 2265 2266 /* 2267 * If this is a request for a synthetic attribute in the system.* 2268 * namespace use the generic infrastructure to resolve a handler 2269 * for it via sb->s_xattr. 2270 */ 2271 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2272 return generic_getxattr(dentry, name, buffer, size); 2273 2274 err = shmem_xattr_validate(name); 2275 if (err) 2276 return err; 2277 2278 return simple_xattr_get(&info->xattrs, name, buffer, size); 2279 } 2280 2281 static int shmem_setxattr(struct dentry *dentry, const char *name, 2282 const void *value, size_t size, int flags) 2283 { 2284 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2285 int err; 2286 2287 /* 2288 * If this is a request for a synthetic attribute in the system.* 2289 * namespace use the generic infrastructure to resolve a handler 2290 * for it via sb->s_xattr. 2291 */ 2292 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2293 return generic_setxattr(dentry, name, value, size, flags); 2294 2295 err = shmem_xattr_validate(name); 2296 if (err) 2297 return err; 2298 2299 return simple_xattr_set(&info->xattrs, name, value, size, flags); 2300 } 2301 2302 static int shmem_removexattr(struct dentry *dentry, const char *name) 2303 { 2304 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2305 int err; 2306 2307 /* 2308 * If this is a request for a synthetic attribute in the system.* 2309 * namespace use the generic infrastructure to resolve a handler 2310 * for it via sb->s_xattr. 2311 */ 2312 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2313 return generic_removexattr(dentry, name); 2314 2315 err = shmem_xattr_validate(name); 2316 if (err) 2317 return err; 2318 2319 return simple_xattr_remove(&info->xattrs, name); 2320 } 2321 2322 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 2323 { 2324 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2325 return simple_xattr_list(&info->xattrs, buffer, size); 2326 } 2327 #endif /* CONFIG_TMPFS_XATTR */ 2328 2329 static const struct inode_operations shmem_short_symlink_operations = { 2330 .readlink = generic_readlink, 2331 .follow_link = shmem_follow_short_symlink, 2332 #ifdef CONFIG_TMPFS_XATTR 2333 .setxattr = shmem_setxattr, 2334 .getxattr = shmem_getxattr, 2335 .listxattr = shmem_listxattr, 2336 .removexattr = shmem_removexattr, 2337 #endif 2338 }; 2339 2340 static const struct inode_operations shmem_symlink_inode_operations = { 2341 .readlink = generic_readlink, 2342 .follow_link = shmem_follow_link, 2343 .put_link = shmem_put_link, 2344 #ifdef CONFIG_TMPFS_XATTR 2345 .setxattr = shmem_setxattr, 2346 .getxattr = shmem_getxattr, 2347 .listxattr = shmem_listxattr, 2348 .removexattr = shmem_removexattr, 2349 #endif 2350 }; 2351 2352 static struct dentry *shmem_get_parent(struct dentry *child) 2353 { 2354 return ERR_PTR(-ESTALE); 2355 } 2356 2357 static int shmem_match(struct inode *ino, void *vfh) 2358 { 2359 __u32 *fh = vfh; 2360 __u64 inum = fh[2]; 2361 inum = (inum << 32) | fh[1]; 2362 return ino->i_ino == inum && fh[0] == ino->i_generation; 2363 } 2364 2365 static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 2366 struct fid *fid, int fh_len, int fh_type) 2367 { 2368 struct inode *inode; 2369 struct dentry *dentry = NULL; 2370 u64 inum; 2371 2372 if (fh_len < 3) 2373 return NULL; 2374 2375 inum = fid->raw[2]; 2376 inum = (inum << 32) | fid->raw[1]; 2377 2378 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 2379 shmem_match, fid->raw); 2380 if (inode) { 2381 dentry = d_find_alias(inode); 2382 iput(inode); 2383 } 2384 2385 return dentry; 2386 } 2387 2388 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, 2389 struct inode *parent) 2390 { 2391 if (*len < 3) { 2392 *len = 3; 2393 return FILEID_INVALID; 2394 } 2395 2396 if (inode_unhashed(inode)) { 2397 /* Unfortunately insert_inode_hash is not idempotent, 2398 * so as we hash inodes here rather than at creation 2399 * time, we need a lock to ensure we only try 2400 * to do it once 2401 */ 2402 static DEFINE_SPINLOCK(lock); 2403 spin_lock(&lock); 2404 if (inode_unhashed(inode)) 2405 __insert_inode_hash(inode, 2406 inode->i_ino + inode->i_generation); 2407 spin_unlock(&lock); 2408 } 2409 2410 fh[0] = inode->i_generation; 2411 fh[1] = inode->i_ino; 2412 fh[2] = ((__u64)inode->i_ino) >> 32; 2413 2414 *len = 3; 2415 return 1; 2416 } 2417 2418 static const struct export_operations shmem_export_ops = { 2419 .get_parent = shmem_get_parent, 2420 .encode_fh = shmem_encode_fh, 2421 .fh_to_dentry = shmem_fh_to_dentry, 2422 }; 2423 2424 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, 2425 bool remount) 2426 { 2427 char *this_char, *value, *rest; 2428 struct mempolicy *mpol = NULL; 2429 uid_t uid; 2430 gid_t gid; 2431 2432 while (options != NULL) { 2433 this_char = options; 2434 for (;;) { 2435 /* 2436 * NUL-terminate this option: unfortunately, 2437 * mount options form a comma-separated list, 2438 * but mpol's nodelist may also contain commas. 2439 */ 2440 options = strchr(options, ','); 2441 if (options == NULL) 2442 break; 2443 options++; 2444 if (!isdigit(*options)) { 2445 options[-1] = '\0'; 2446 break; 2447 } 2448 } 2449 if (!*this_char) 2450 continue; 2451 if ((value = strchr(this_char,'=')) != NULL) { 2452 *value++ = 0; 2453 } else { 2454 printk(KERN_ERR 2455 "tmpfs: No value for mount option '%s'\n", 2456 this_char); 2457 goto error; 2458 } 2459 2460 if (!strcmp(this_char,"size")) { 2461 unsigned long long size; 2462 size = memparse(value,&rest); 2463 if (*rest == '%') { 2464 size <<= PAGE_SHIFT; 2465 size *= totalram_pages; 2466 do_div(size, 100); 2467 rest++; 2468 } 2469 if (*rest) 2470 goto bad_val; 2471 sbinfo->max_blocks = 2472 DIV_ROUND_UP(size, PAGE_CACHE_SIZE); 2473 } else if (!strcmp(this_char,"nr_blocks")) { 2474 sbinfo->max_blocks = memparse(value, &rest); 2475 if (*rest) 2476 goto bad_val; 2477 } else if (!strcmp(this_char,"nr_inodes")) { 2478 sbinfo->max_inodes = memparse(value, &rest); 2479 if (*rest) 2480 goto bad_val; 2481 } else if (!strcmp(this_char,"mode")) { 2482 if (remount) 2483 continue; 2484 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; 2485 if (*rest) 2486 goto bad_val; 2487 } else if (!strcmp(this_char,"uid")) { 2488 if (remount) 2489 continue; 2490 uid = simple_strtoul(value, &rest, 0); 2491 if (*rest) 2492 goto bad_val; 2493 sbinfo->uid = make_kuid(current_user_ns(), uid); 2494 if (!uid_valid(sbinfo->uid)) 2495 goto bad_val; 2496 } else if (!strcmp(this_char,"gid")) { 2497 if (remount) 2498 continue; 2499 gid = simple_strtoul(value, &rest, 0); 2500 if (*rest) 2501 goto bad_val; 2502 sbinfo->gid = make_kgid(current_user_ns(), gid); 2503 if (!gid_valid(sbinfo->gid)) 2504 goto bad_val; 2505 } else if (!strcmp(this_char,"mpol")) { 2506 mpol_put(mpol); 2507 mpol = NULL; 2508 if (mpol_parse_str(value, &mpol)) 2509 goto bad_val; 2510 } else { 2511 printk(KERN_ERR "tmpfs: Bad mount option %s\n", 2512 this_char); 2513 goto error; 2514 } 2515 } 2516 sbinfo->mpol = mpol; 2517 return 0; 2518 2519 bad_val: 2520 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", 2521 value, this_char); 2522 error: 2523 mpol_put(mpol); 2524 return 1; 2525 2526 } 2527 2528 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) 2529 { 2530 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2531 struct shmem_sb_info config = *sbinfo; 2532 unsigned long inodes; 2533 int error = -EINVAL; 2534 2535 config.mpol = NULL; 2536 if (shmem_parse_options(data, &config, true)) 2537 return error; 2538 2539 spin_lock(&sbinfo->stat_lock); 2540 inodes = sbinfo->max_inodes - sbinfo->free_inodes; 2541 if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0) 2542 goto out; 2543 if (config.max_inodes < inodes) 2544 goto out; 2545 /* 2546 * Those tests disallow limited->unlimited while any are in use; 2547 * but we must separately disallow unlimited->limited, because 2548 * in that case we have no record of how much is already in use. 2549 */ 2550 if (config.max_blocks && !sbinfo->max_blocks) 2551 goto out; 2552 if (config.max_inodes && !sbinfo->max_inodes) 2553 goto out; 2554 2555 error = 0; 2556 sbinfo->max_blocks = config.max_blocks; 2557 sbinfo->max_inodes = config.max_inodes; 2558 sbinfo->free_inodes = config.max_inodes - inodes; 2559 2560 /* 2561 * Preserve previous mempolicy unless mpol remount option was specified. 2562 */ 2563 if (config.mpol) { 2564 mpol_put(sbinfo->mpol); 2565 sbinfo->mpol = config.mpol; /* transfers initial ref */ 2566 } 2567 out: 2568 spin_unlock(&sbinfo->stat_lock); 2569 return error; 2570 } 2571 2572 static int shmem_show_options(struct seq_file *seq, struct dentry *root) 2573 { 2574 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); 2575 2576 if (sbinfo->max_blocks != shmem_default_max_blocks()) 2577 seq_printf(seq, ",size=%luk", 2578 sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); 2579 if (sbinfo->max_inodes != shmem_default_max_inodes()) 2580 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 2581 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) 2582 seq_printf(seq, ",mode=%03ho", sbinfo->mode); 2583 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 2584 seq_printf(seq, ",uid=%u", 2585 from_kuid_munged(&init_user_ns, sbinfo->uid)); 2586 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 2587 seq_printf(seq, ",gid=%u", 2588 from_kgid_munged(&init_user_ns, sbinfo->gid)); 2589 shmem_show_mpol(seq, sbinfo->mpol); 2590 return 0; 2591 } 2592 #endif /* CONFIG_TMPFS */ 2593 2594 static void shmem_put_super(struct super_block *sb) 2595 { 2596 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2597 2598 percpu_counter_destroy(&sbinfo->used_blocks); 2599 mpol_put(sbinfo->mpol); 2600 kfree(sbinfo); 2601 sb->s_fs_info = NULL; 2602 } 2603 2604 int shmem_fill_super(struct super_block *sb, void *data, int silent) 2605 { 2606 struct inode *inode; 2607 struct shmem_sb_info *sbinfo; 2608 int err = -ENOMEM; 2609 2610 /* Round up to L1_CACHE_BYTES to resist false sharing */ 2611 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 2612 L1_CACHE_BYTES), GFP_KERNEL); 2613 if (!sbinfo) 2614 return -ENOMEM; 2615 2616 sbinfo->mode = S_IRWXUGO | S_ISVTX; 2617 sbinfo->uid = current_fsuid(); 2618 sbinfo->gid = current_fsgid(); 2619 sb->s_fs_info = sbinfo; 2620 2621 #ifdef CONFIG_TMPFS 2622 /* 2623 * Per default we only allow half of the physical ram per 2624 * tmpfs instance, limiting inodes to one per page of lowmem; 2625 * but the internal instance is left unlimited. 2626 */ 2627 if (!(sb->s_flags & MS_KERNMOUNT)) { 2628 sbinfo->max_blocks = shmem_default_max_blocks(); 2629 sbinfo->max_inodes = shmem_default_max_inodes(); 2630 if (shmem_parse_options(data, sbinfo, false)) { 2631 err = -EINVAL; 2632 goto failed; 2633 } 2634 } else { 2635 sb->s_flags |= MS_NOUSER; 2636 } 2637 sb->s_export_op = &shmem_export_ops; 2638 sb->s_flags |= MS_NOSEC; 2639 #else 2640 sb->s_flags |= MS_NOUSER; 2641 #endif 2642 2643 spin_lock_init(&sbinfo->stat_lock); 2644 if (percpu_counter_init(&sbinfo->used_blocks, 0)) 2645 goto failed; 2646 sbinfo->free_inodes = sbinfo->max_inodes; 2647 2648 sb->s_maxbytes = MAX_LFS_FILESIZE; 2649 sb->s_blocksize = PAGE_CACHE_SIZE; 2650 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 2651 sb->s_magic = TMPFS_MAGIC; 2652 sb->s_op = &shmem_ops; 2653 sb->s_time_gran = 1; 2654 #ifdef CONFIG_TMPFS_XATTR 2655 sb->s_xattr = shmem_xattr_handlers; 2656 #endif 2657 #ifdef CONFIG_TMPFS_POSIX_ACL 2658 sb->s_flags |= MS_POSIXACL; 2659 #endif 2660 2661 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 2662 if (!inode) 2663 goto failed; 2664 inode->i_uid = sbinfo->uid; 2665 inode->i_gid = sbinfo->gid; 2666 sb->s_root = d_make_root(inode); 2667 if (!sb->s_root) 2668 goto failed; 2669 return 0; 2670 2671 failed: 2672 shmem_put_super(sb); 2673 return err; 2674 } 2675 2676 static struct kmem_cache *shmem_inode_cachep; 2677 2678 static struct inode *shmem_alloc_inode(struct super_block *sb) 2679 { 2680 struct shmem_inode_info *info; 2681 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 2682 if (!info) 2683 return NULL; 2684 return &info->vfs_inode; 2685 } 2686 2687 static void shmem_destroy_callback(struct rcu_head *head) 2688 { 2689 struct inode *inode = container_of(head, struct inode, i_rcu); 2690 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 2691 } 2692 2693 static void shmem_destroy_inode(struct inode *inode) 2694 { 2695 if (S_ISREG(inode->i_mode)) 2696 mpol_free_shared_policy(&SHMEM_I(inode)->policy); 2697 call_rcu(&inode->i_rcu, shmem_destroy_callback); 2698 } 2699 2700 static void shmem_init_inode(void *foo) 2701 { 2702 struct shmem_inode_info *info = foo; 2703 inode_init_once(&info->vfs_inode); 2704 } 2705 2706 static int shmem_init_inodecache(void) 2707 { 2708 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 2709 sizeof(struct shmem_inode_info), 2710 0, SLAB_PANIC, shmem_init_inode); 2711 return 0; 2712 } 2713 2714 static void shmem_destroy_inodecache(void) 2715 { 2716 kmem_cache_destroy(shmem_inode_cachep); 2717 } 2718 2719 static const struct address_space_operations shmem_aops = { 2720 .writepage = shmem_writepage, 2721 .set_page_dirty = __set_page_dirty_no_writeback, 2722 #ifdef CONFIG_TMPFS 2723 .write_begin = shmem_write_begin, 2724 .write_end = shmem_write_end, 2725 #endif 2726 .migratepage = migrate_page, 2727 .error_remove_page = generic_error_remove_page, 2728 }; 2729 2730 static const struct file_operations shmem_file_operations = { 2731 .mmap = shmem_mmap, 2732 #ifdef CONFIG_TMPFS 2733 .llseek = shmem_file_llseek, 2734 .read = new_sync_read, 2735 .write = new_sync_write, 2736 .read_iter = shmem_file_read_iter, 2737 .write_iter = generic_file_write_iter, 2738 .fsync = noop_fsync, 2739 .splice_read = shmem_file_splice_read, 2740 .splice_write = iter_file_splice_write, 2741 .fallocate = shmem_fallocate, 2742 #endif 2743 }; 2744 2745 static const struct inode_operations shmem_inode_operations = { 2746 .setattr = shmem_setattr, 2747 #ifdef CONFIG_TMPFS_XATTR 2748 .setxattr = shmem_setxattr, 2749 .getxattr = shmem_getxattr, 2750 .listxattr = shmem_listxattr, 2751 .removexattr = shmem_removexattr, 2752 .set_acl = simple_set_acl, 2753 #endif 2754 }; 2755 2756 static const struct inode_operations shmem_dir_inode_operations = { 2757 #ifdef CONFIG_TMPFS 2758 .create = shmem_create, 2759 .lookup = simple_lookup, 2760 .link = shmem_link, 2761 .unlink = shmem_unlink, 2762 .symlink = shmem_symlink, 2763 .mkdir = shmem_mkdir, 2764 .rmdir = shmem_rmdir, 2765 .mknod = shmem_mknod, 2766 .rename = shmem_rename, 2767 .tmpfile = shmem_tmpfile, 2768 #endif 2769 #ifdef CONFIG_TMPFS_XATTR 2770 .setxattr = shmem_setxattr, 2771 .getxattr = shmem_getxattr, 2772 .listxattr = shmem_listxattr, 2773 .removexattr = shmem_removexattr, 2774 #endif 2775 #ifdef CONFIG_TMPFS_POSIX_ACL 2776 .setattr = shmem_setattr, 2777 .set_acl = simple_set_acl, 2778 #endif 2779 }; 2780 2781 static const struct inode_operations shmem_special_inode_operations = { 2782 #ifdef CONFIG_TMPFS_XATTR 2783 .setxattr = shmem_setxattr, 2784 .getxattr = shmem_getxattr, 2785 .listxattr = shmem_listxattr, 2786 .removexattr = shmem_removexattr, 2787 #endif 2788 #ifdef CONFIG_TMPFS_POSIX_ACL 2789 .setattr = shmem_setattr, 2790 .set_acl = simple_set_acl, 2791 #endif 2792 }; 2793 2794 static const struct super_operations shmem_ops = { 2795 .alloc_inode = shmem_alloc_inode, 2796 .destroy_inode = shmem_destroy_inode, 2797 #ifdef CONFIG_TMPFS 2798 .statfs = shmem_statfs, 2799 .remount_fs = shmem_remount_fs, 2800 .show_options = shmem_show_options, 2801 #endif 2802 .evict_inode = shmem_evict_inode, 2803 .drop_inode = generic_delete_inode, 2804 .put_super = shmem_put_super, 2805 }; 2806 2807 static const struct vm_operations_struct shmem_vm_ops = { 2808 .fault = shmem_fault, 2809 .map_pages = filemap_map_pages, 2810 #ifdef CONFIG_NUMA 2811 .set_policy = shmem_set_policy, 2812 .get_policy = shmem_get_policy, 2813 #endif 2814 .remap_pages = generic_file_remap_pages, 2815 }; 2816 2817 static struct dentry *shmem_mount(struct file_system_type *fs_type, 2818 int flags, const char *dev_name, void *data) 2819 { 2820 return mount_nodev(fs_type, flags, data, shmem_fill_super); 2821 } 2822 2823 static struct file_system_type shmem_fs_type = { 2824 .owner = THIS_MODULE, 2825 .name = "tmpfs", 2826 .mount = shmem_mount, 2827 .kill_sb = kill_litter_super, 2828 .fs_flags = FS_USERNS_MOUNT, 2829 }; 2830 2831 int __init shmem_init(void) 2832 { 2833 int error; 2834 2835 /* If rootfs called this, don't re-init */ 2836 if (shmem_inode_cachep) 2837 return 0; 2838 2839 error = bdi_init(&shmem_backing_dev_info); 2840 if (error) 2841 goto out4; 2842 2843 error = shmem_init_inodecache(); 2844 if (error) 2845 goto out3; 2846 2847 error = register_filesystem(&shmem_fs_type); 2848 if (error) { 2849 printk(KERN_ERR "Could not register tmpfs\n"); 2850 goto out2; 2851 } 2852 2853 shm_mnt = kern_mount(&shmem_fs_type); 2854 if (IS_ERR(shm_mnt)) { 2855 error = PTR_ERR(shm_mnt); 2856 printk(KERN_ERR "Could not kern_mount tmpfs\n"); 2857 goto out1; 2858 } 2859 return 0; 2860 2861 out1: 2862 unregister_filesystem(&shmem_fs_type); 2863 out2: 2864 shmem_destroy_inodecache(); 2865 out3: 2866 bdi_destroy(&shmem_backing_dev_info); 2867 out4: 2868 shm_mnt = ERR_PTR(error); 2869 return error; 2870 } 2871 2872 #else /* !CONFIG_SHMEM */ 2873 2874 /* 2875 * tiny-shmem: simple shmemfs and tmpfs using ramfs code 2876 * 2877 * This is intended for small system where the benefits of the full 2878 * shmem code (swap-backed and resource-limited) are outweighed by 2879 * their complexity. On systems without swap this code should be 2880 * effectively equivalent, but much lighter weight. 2881 */ 2882 2883 static struct file_system_type shmem_fs_type = { 2884 .name = "tmpfs", 2885 .mount = ramfs_mount, 2886 .kill_sb = kill_litter_super, 2887 .fs_flags = FS_USERNS_MOUNT, 2888 }; 2889 2890 int __init shmem_init(void) 2891 { 2892 BUG_ON(register_filesystem(&shmem_fs_type) != 0); 2893 2894 shm_mnt = kern_mount(&shmem_fs_type); 2895 BUG_ON(IS_ERR(shm_mnt)); 2896 2897 return 0; 2898 } 2899 2900 int shmem_unuse(swp_entry_t swap, struct page *page) 2901 { 2902 return 0; 2903 } 2904 2905 int shmem_lock(struct file *file, int lock, struct user_struct *user) 2906 { 2907 return 0; 2908 } 2909 2910 void shmem_unlock_mapping(struct address_space *mapping) 2911 { 2912 } 2913 2914 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 2915 { 2916 truncate_inode_pages_range(inode->i_mapping, lstart, lend); 2917 } 2918 EXPORT_SYMBOL_GPL(shmem_truncate_range); 2919 2920 #define shmem_vm_ops generic_file_vm_ops 2921 #define shmem_file_operations ramfs_file_operations 2922 #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 2923 #define shmem_acct_size(flags, size) 0 2924 #define shmem_unacct_size(flags, size) do {} while (0) 2925 2926 #endif /* CONFIG_SHMEM */ 2927 2928 /* common code */ 2929 2930 static struct dentry_operations anon_ops = { 2931 .d_dname = simple_dname 2932 }; 2933 2934 static struct file *__shmem_file_setup(const char *name, loff_t size, 2935 unsigned long flags, unsigned int i_flags) 2936 { 2937 struct file *res; 2938 struct inode *inode; 2939 struct path path; 2940 struct super_block *sb; 2941 struct qstr this; 2942 2943 if (IS_ERR(shm_mnt)) 2944 return ERR_CAST(shm_mnt); 2945 2946 if (size < 0 || size > MAX_LFS_FILESIZE) 2947 return ERR_PTR(-EINVAL); 2948 2949 if (shmem_acct_size(flags, size)) 2950 return ERR_PTR(-ENOMEM); 2951 2952 res = ERR_PTR(-ENOMEM); 2953 this.name = name; 2954 this.len = strlen(name); 2955 this.hash = 0; /* will go */ 2956 sb = shm_mnt->mnt_sb; 2957 path.mnt = mntget(shm_mnt); 2958 path.dentry = d_alloc_pseudo(sb, &this); 2959 if (!path.dentry) 2960 goto put_memory; 2961 d_set_d_op(path.dentry, &anon_ops); 2962 2963 res = ERR_PTR(-ENOSPC); 2964 inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags); 2965 if (!inode) 2966 goto put_memory; 2967 2968 inode->i_flags |= i_flags; 2969 d_instantiate(path.dentry, inode); 2970 inode->i_size = size; 2971 clear_nlink(inode); /* It is unlinked */ 2972 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); 2973 if (IS_ERR(res)) 2974 goto put_path; 2975 2976 res = alloc_file(&path, FMODE_WRITE | FMODE_READ, 2977 &shmem_file_operations); 2978 if (IS_ERR(res)) 2979 goto put_path; 2980 2981 return res; 2982 2983 put_memory: 2984 shmem_unacct_size(flags, size); 2985 put_path: 2986 path_put(&path); 2987 return res; 2988 } 2989 2990 /** 2991 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be 2992 * kernel internal. There will be NO LSM permission checks against the 2993 * underlying inode. So users of this interface must do LSM checks at a 2994 * higher layer. The one user is the big_key implementation. LSM checks 2995 * are provided at the key level rather than the inode level. 2996 * @name: name for dentry (to be seen in /proc/<pid>/maps 2997 * @size: size to be set for the file 2998 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 2999 */ 3000 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags) 3001 { 3002 return __shmem_file_setup(name, size, flags, S_PRIVATE); 3003 } 3004 3005 /** 3006 * shmem_file_setup - get an unlinked file living in tmpfs 3007 * @name: name for dentry (to be seen in /proc/<pid>/maps 3008 * @size: size to be set for the file 3009 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 3010 */ 3011 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 3012 { 3013 return __shmem_file_setup(name, size, flags, 0); 3014 } 3015 EXPORT_SYMBOL_GPL(shmem_file_setup); 3016 3017 /** 3018 * shmem_zero_setup - setup a shared anonymous mapping 3019 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff 3020 */ 3021 int shmem_zero_setup(struct vm_area_struct *vma) 3022 { 3023 struct file *file; 3024 loff_t size = vma->vm_end - vma->vm_start; 3025 3026 file = shmem_file_setup("dev/zero", size, vma->vm_flags); 3027 if (IS_ERR(file)) 3028 return PTR_ERR(file); 3029 3030 if (vma->vm_file) 3031 fput(vma->vm_file); 3032 vma->vm_file = file; 3033 vma->vm_ops = &shmem_vm_ops; 3034 return 0; 3035 } 3036 3037 /** 3038 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. 3039 * @mapping: the page's address_space 3040 * @index: the page index 3041 * @gfp: the page allocator flags to use if allocating 3042 * 3043 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 3044 * with any new page allocations done using the specified allocation flags. 3045 * But read_cache_page_gfp() uses the ->readpage() method: which does not 3046 * suit tmpfs, since it may have pages in swapcache, and needs to find those 3047 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 3048 * 3049 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 3050 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 3051 */ 3052 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 3053 pgoff_t index, gfp_t gfp) 3054 { 3055 #ifdef CONFIG_SHMEM 3056 struct inode *inode = mapping->host; 3057 struct page *page; 3058 int error; 3059 3060 BUG_ON(mapping->a_ops != &shmem_aops); 3061 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL); 3062 if (error) 3063 page = ERR_PTR(error); 3064 else 3065 unlock_page(page); 3066 return page; 3067 #else 3068 /* 3069 * The tiny !SHMEM case uses ramfs without swap 3070 */ 3071 return read_cache_page_gfp(mapping, index, gfp); 3072 #endif 3073 } 3074 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 3075