1 /* 2 * linux/mm/filemap.c 3 * 4 * Copyright (C) 1994-1999 Linus Torvalds 5 */ 6 7 /* 8 * This file handles the generic file mmap semantics used by 9 * most "normal" filesystems (but you don't /have/ to use this: 10 * the NFS filesystem used to do this differently, for example) 11 */ 12 #include <linux/module.h> 13 #include <linux/slab.h> 14 #include <linux/compiler.h> 15 #include <linux/fs.h> 16 #include <linux/uaccess.h> 17 #include <linux/aio.h> 18 #include <linux/capability.h> 19 #include <linux/kernel_stat.h> 20 #include <linux/mm.h> 21 #include <linux/swap.h> 22 #include <linux/mman.h> 23 #include <linux/pagemap.h> 24 #include <linux/file.h> 25 #include <linux/uio.h> 26 #include <linux/hash.h> 27 #include <linux/writeback.h> 28 #include <linux/pagevec.h> 29 #include <linux/blkdev.h> 30 #include <linux/security.h> 31 #include <linux/syscalls.h> 32 #include <linux/cpuset.h> 33 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */ 34 #include "internal.h" 35 36 /* 37 * FIXME: remove all knowledge of the buffer layer from the core VM 38 */ 39 #include <linux/buffer_head.h> /* for generic_osync_inode */ 40 41 #include <asm/mman.h> 42 43 static ssize_t 44 generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, 45 loff_t offset, unsigned long nr_segs); 46 47 /* 48 * Shared mappings implemented 30.11.1994. It's not fully working yet, 49 * though. 50 * 51 * Shared mappings now work. 15.8.1995 Bruno. 52 * 53 * finished 'unifying' the page and buffer cache and SMP-threaded the 54 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> 55 * 56 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> 57 */ 58 59 /* 60 * Lock ordering: 61 * 62 * ->i_mmap_lock (vmtruncate) 63 * ->private_lock (__free_pte->__set_page_dirty_buffers) 64 * ->swap_lock (exclusive_swap_page, others) 65 * ->mapping->tree_lock 66 * ->zone.lock 67 * 68 * ->i_mutex 69 * ->i_mmap_lock (truncate->unmap_mapping_range) 70 * 71 * ->mmap_sem 72 * ->i_mmap_lock 73 * ->page_table_lock or pte_lock (various, mainly in memory.c) 74 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) 75 * 76 * ->mmap_sem 77 * ->lock_page (access_process_vm) 78 * 79 * ->i_mutex (generic_file_buffered_write) 80 * ->mmap_sem (fault_in_pages_readable->do_page_fault) 81 * 82 * ->i_mutex 83 * ->i_alloc_sem (various) 84 * 85 * ->inode_lock 86 * ->sb_lock (fs/fs-writeback.c) 87 * ->mapping->tree_lock (__sync_single_inode) 88 * 89 * ->i_mmap_lock 90 * ->anon_vma.lock (vma_adjust) 91 * 92 * ->anon_vma.lock 93 * ->page_table_lock or pte_lock (anon_vma_prepare and various) 94 * 95 * ->page_table_lock or pte_lock 96 * ->swap_lock (try_to_unmap_one) 97 * ->private_lock (try_to_unmap_one) 98 * ->tree_lock (try_to_unmap_one) 99 * ->zone.lru_lock (follow_page->mark_page_accessed) 100 * ->zone.lru_lock (check_pte_range->isolate_lru_page) 101 * ->private_lock (page_remove_rmap->set_page_dirty) 102 * ->tree_lock (page_remove_rmap->set_page_dirty) 103 * ->inode_lock (page_remove_rmap->set_page_dirty) 104 * ->inode_lock (zap_pte_range->set_page_dirty) 105 * ->private_lock (zap_pte_range->__set_page_dirty_buffers) 106 * 107 * ->task->proc_lock 108 * ->dcache_lock (proc_pid_lookup) 109 */ 110 111 /* 112 * Remove a page from the page cache and free it. Caller has to make 113 * sure the page is locked and that nobody else uses it - or that usage 114 * is safe. The caller must hold a write_lock on the mapping's tree_lock. 115 */ 116 void __remove_from_page_cache(struct page *page) 117 { 118 struct address_space *mapping = page->mapping; 119 120 radix_tree_delete(&mapping->page_tree, page->index); 121 page->mapping = NULL; 122 mapping->nrpages--; 123 __dec_zone_page_state(page, NR_FILE_PAGES); 124 BUG_ON(page_mapped(page)); 125 } 126 127 void remove_from_page_cache(struct page *page) 128 { 129 struct address_space *mapping = page->mapping; 130 131 BUG_ON(!PageLocked(page)); 132 133 write_lock_irq(&mapping->tree_lock); 134 __remove_from_page_cache(page); 135 write_unlock_irq(&mapping->tree_lock); 136 } 137 138 static int sync_page(void *word) 139 { 140 struct address_space *mapping; 141 struct page *page; 142 143 page = container_of((unsigned long *)word, struct page, flags); 144 145 /* 146 * page_mapping() is being called without PG_locked held. 147 * Some knowledge of the state and use of the page is used to 148 * reduce the requirements down to a memory barrier. 149 * The danger here is of a stale page_mapping() return value 150 * indicating a struct address_space different from the one it's 151 * associated with when it is associated with one. 152 * After smp_mb(), it's either the correct page_mapping() for 153 * the page, or an old page_mapping() and the page's own 154 * page_mapping() has gone NULL. 155 * The ->sync_page() address_space operation must tolerate 156 * page_mapping() going NULL. By an amazing coincidence, 157 * this comes about because none of the users of the page 158 * in the ->sync_page() methods make essential use of the 159 * page_mapping(), merely passing the page down to the backing 160 * device's unplug functions when it's non-NULL, which in turn 161 * ignore it for all cases but swap, where only page_private(page) is 162 * of interest. When page_mapping() does go NULL, the entire 163 * call stack gracefully ignores the page and returns. 164 * -- wli 165 */ 166 smp_mb(); 167 mapping = page_mapping(page); 168 if (mapping && mapping->a_ops && mapping->a_ops->sync_page) 169 mapping->a_ops->sync_page(page); 170 io_schedule(); 171 return 0; 172 } 173 174 /** 175 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range 176 * @mapping: address space structure to write 177 * @start: offset in bytes where the range starts 178 * @end: offset in bytes where the range ends (inclusive) 179 * @sync_mode: enable synchronous operation 180 * 181 * Start writeback against all of a mapping's dirty pages that lie 182 * within the byte offsets <start, end> inclusive. 183 * 184 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as 185 * opposed to a regular memory cleansing writeback. The difference between 186 * these two operations is that if a dirty page/buffer is encountered, it must 187 * be waited upon, and not just skipped over. 188 */ 189 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 190 loff_t end, int sync_mode) 191 { 192 int ret; 193 struct writeback_control wbc = { 194 .sync_mode = sync_mode, 195 .nr_to_write = mapping->nrpages * 2, 196 .range_start = start, 197 .range_end = end, 198 }; 199 200 if (!mapping_cap_writeback_dirty(mapping)) 201 return 0; 202 203 ret = do_writepages(mapping, &wbc); 204 return ret; 205 } 206 207 static inline int __filemap_fdatawrite(struct address_space *mapping, 208 int sync_mode) 209 { 210 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); 211 } 212 213 int filemap_fdatawrite(struct address_space *mapping) 214 { 215 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); 216 } 217 EXPORT_SYMBOL(filemap_fdatawrite); 218 219 static int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 220 loff_t end) 221 { 222 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); 223 } 224 225 /** 226 * filemap_flush - mostly a non-blocking flush 227 * @mapping: target address_space 228 * 229 * This is a mostly non-blocking flush. Not suitable for data-integrity 230 * purposes - I/O may not be started against all dirty pages. 231 */ 232 int filemap_flush(struct address_space *mapping) 233 { 234 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); 235 } 236 EXPORT_SYMBOL(filemap_flush); 237 238 /** 239 * wait_on_page_writeback_range - wait for writeback to complete 240 * @mapping: target address_space 241 * @start: beginning page index 242 * @end: ending page index 243 * 244 * Wait for writeback to complete against pages indexed by start->end 245 * inclusive 246 */ 247 int wait_on_page_writeback_range(struct address_space *mapping, 248 pgoff_t start, pgoff_t end) 249 { 250 struct pagevec pvec; 251 int nr_pages; 252 int ret = 0; 253 pgoff_t index; 254 255 if (end < start) 256 return 0; 257 258 pagevec_init(&pvec, 0); 259 index = start; 260 while ((index <= end) && 261 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 262 PAGECACHE_TAG_WRITEBACK, 263 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { 264 unsigned i; 265 266 for (i = 0; i < nr_pages; i++) { 267 struct page *page = pvec.pages[i]; 268 269 /* until radix tree lookup accepts end_index */ 270 if (page->index > end) 271 continue; 272 273 wait_on_page_writeback(page); 274 if (PageError(page)) 275 ret = -EIO; 276 } 277 pagevec_release(&pvec); 278 cond_resched(); 279 } 280 281 /* Check for outstanding write errors */ 282 if (test_and_clear_bit(AS_ENOSPC, &mapping->flags)) 283 ret = -ENOSPC; 284 if (test_and_clear_bit(AS_EIO, &mapping->flags)) 285 ret = -EIO; 286 287 return ret; 288 } 289 290 /** 291 * sync_page_range - write and wait on all pages in the passed range 292 * @inode: target inode 293 * @mapping: target address_space 294 * @pos: beginning offset in pages to write 295 * @count: number of bytes to write 296 * 297 * Write and wait upon all the pages in the passed range. This is a "data 298 * integrity" operation. It waits upon in-flight writeout before starting and 299 * waiting upon new writeout. If there was an IO error, return it. 300 * 301 * We need to re-take i_mutex during the generic_osync_inode list walk because 302 * it is otherwise livelockable. 303 */ 304 int sync_page_range(struct inode *inode, struct address_space *mapping, 305 loff_t pos, loff_t count) 306 { 307 pgoff_t start = pos >> PAGE_CACHE_SHIFT; 308 pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT; 309 int ret; 310 311 if (!mapping_cap_writeback_dirty(mapping) || !count) 312 return 0; 313 ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1); 314 if (ret == 0) { 315 mutex_lock(&inode->i_mutex); 316 ret = generic_osync_inode(inode, mapping, OSYNC_METADATA); 317 mutex_unlock(&inode->i_mutex); 318 } 319 if (ret == 0) 320 ret = wait_on_page_writeback_range(mapping, start, end); 321 return ret; 322 } 323 EXPORT_SYMBOL(sync_page_range); 324 325 /** 326 * sync_page_range_nolock 327 * @inode: target inode 328 * @mapping: target address_space 329 * @pos: beginning offset in pages to write 330 * @count: number of bytes to write 331 * 332 * Note: Holding i_mutex across sync_page_range_nolock() is not a good idea 333 * as it forces O_SYNC writers to different parts of the same file 334 * to be serialised right until io completion. 335 */ 336 int sync_page_range_nolock(struct inode *inode, struct address_space *mapping, 337 loff_t pos, loff_t count) 338 { 339 pgoff_t start = pos >> PAGE_CACHE_SHIFT; 340 pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT; 341 int ret; 342 343 if (!mapping_cap_writeback_dirty(mapping) || !count) 344 return 0; 345 ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1); 346 if (ret == 0) 347 ret = generic_osync_inode(inode, mapping, OSYNC_METADATA); 348 if (ret == 0) 349 ret = wait_on_page_writeback_range(mapping, start, end); 350 return ret; 351 } 352 EXPORT_SYMBOL(sync_page_range_nolock); 353 354 /** 355 * filemap_fdatawait - wait for all under-writeback pages to complete 356 * @mapping: address space structure to wait for 357 * 358 * Walk the list of under-writeback pages of the given address space 359 * and wait for all of them. 360 */ 361 int filemap_fdatawait(struct address_space *mapping) 362 { 363 loff_t i_size = i_size_read(mapping->host); 364 365 if (i_size == 0) 366 return 0; 367 368 return wait_on_page_writeback_range(mapping, 0, 369 (i_size - 1) >> PAGE_CACHE_SHIFT); 370 } 371 EXPORT_SYMBOL(filemap_fdatawait); 372 373 int filemap_write_and_wait(struct address_space *mapping) 374 { 375 int err = 0; 376 377 if (mapping->nrpages) { 378 err = filemap_fdatawrite(mapping); 379 /* 380 * Even if the above returned error, the pages may be 381 * written partially (e.g. -ENOSPC), so we wait for it. 382 * But the -EIO is special case, it may indicate the worst 383 * thing (e.g. bug) happened, so we avoid waiting for it. 384 */ 385 if (err != -EIO) { 386 int err2 = filemap_fdatawait(mapping); 387 if (!err) 388 err = err2; 389 } 390 } 391 return err; 392 } 393 EXPORT_SYMBOL(filemap_write_and_wait); 394 395 /** 396 * filemap_write_and_wait_range - write out & wait on a file range 397 * @mapping: the address_space for the pages 398 * @lstart: offset in bytes where the range starts 399 * @lend: offset in bytes where the range ends (inclusive) 400 * 401 * Write out and wait upon file offsets lstart->lend, inclusive. 402 * 403 * Note that `lend' is inclusive (describes the last byte to be written) so 404 * that this function can be used to write to the very end-of-file (end = -1). 405 */ 406 int filemap_write_and_wait_range(struct address_space *mapping, 407 loff_t lstart, loff_t lend) 408 { 409 int err = 0; 410 411 if (mapping->nrpages) { 412 err = __filemap_fdatawrite_range(mapping, lstart, lend, 413 WB_SYNC_ALL); 414 /* See comment of filemap_write_and_wait() */ 415 if (err != -EIO) { 416 int err2 = wait_on_page_writeback_range(mapping, 417 lstart >> PAGE_CACHE_SHIFT, 418 lend >> PAGE_CACHE_SHIFT); 419 if (!err) 420 err = err2; 421 } 422 } 423 return err; 424 } 425 426 /** 427 * add_to_page_cache - add newly allocated pagecache pages 428 * @page: page to add 429 * @mapping: the page's address_space 430 * @offset: page index 431 * @gfp_mask: page allocation mode 432 * 433 * This function is used to add newly allocated pagecache pages; 434 * the page is new, so we can just run SetPageLocked() against it. 435 * The other page state flags were set by rmqueue(). 436 * 437 * This function does not add the page to the LRU. The caller must do that. 438 */ 439 int add_to_page_cache(struct page *page, struct address_space *mapping, 440 pgoff_t offset, gfp_t gfp_mask) 441 { 442 int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); 443 444 if (error == 0) { 445 write_lock_irq(&mapping->tree_lock); 446 error = radix_tree_insert(&mapping->page_tree, offset, page); 447 if (!error) { 448 page_cache_get(page); 449 SetPageLocked(page); 450 page->mapping = mapping; 451 page->index = offset; 452 mapping->nrpages++; 453 __inc_zone_page_state(page, NR_FILE_PAGES); 454 } 455 write_unlock_irq(&mapping->tree_lock); 456 radix_tree_preload_end(); 457 } 458 return error; 459 } 460 EXPORT_SYMBOL(add_to_page_cache); 461 462 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 463 pgoff_t offset, gfp_t gfp_mask) 464 { 465 int ret = add_to_page_cache(page, mapping, offset, gfp_mask); 466 if (ret == 0) 467 lru_cache_add(page); 468 return ret; 469 } 470 471 #ifdef CONFIG_NUMA 472 struct page *__page_cache_alloc(gfp_t gfp) 473 { 474 if (cpuset_do_page_mem_spread()) { 475 int n = cpuset_mem_spread_node(); 476 return alloc_pages_node(n, gfp, 0); 477 } 478 return alloc_pages(gfp, 0); 479 } 480 EXPORT_SYMBOL(__page_cache_alloc); 481 #endif 482 483 static int __sleep_on_page_lock(void *word) 484 { 485 io_schedule(); 486 return 0; 487 } 488 489 /* 490 * In order to wait for pages to become available there must be 491 * waitqueues associated with pages. By using a hash table of 492 * waitqueues where the bucket discipline is to maintain all 493 * waiters on the same queue and wake all when any of the pages 494 * become available, and for the woken contexts to check to be 495 * sure the appropriate page became available, this saves space 496 * at a cost of "thundering herd" phenomena during rare hash 497 * collisions. 498 */ 499 static wait_queue_head_t *page_waitqueue(struct page *page) 500 { 501 const struct zone *zone = page_zone(page); 502 503 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; 504 } 505 506 static inline void wake_up_page(struct page *page, int bit) 507 { 508 __wake_up_bit(page_waitqueue(page), &page->flags, bit); 509 } 510 511 void fastcall wait_on_page_bit(struct page *page, int bit_nr) 512 { 513 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 514 515 if (test_bit(bit_nr, &page->flags)) 516 __wait_on_bit(page_waitqueue(page), &wait, sync_page, 517 TASK_UNINTERRUPTIBLE); 518 } 519 EXPORT_SYMBOL(wait_on_page_bit); 520 521 /** 522 * unlock_page - unlock a locked page 523 * @page: the page 524 * 525 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). 526 * Also wakes sleepers in wait_on_page_writeback() because the wakeup 527 * mechananism between PageLocked pages and PageWriteback pages is shared. 528 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. 529 * 530 * The first mb is necessary to safely close the critical section opened by the 531 * TestSetPageLocked(), the second mb is necessary to enforce ordering between 532 * the clear_bit and the read of the waitqueue (to avoid SMP races with a 533 * parallel wait_on_page_locked()). 534 */ 535 void fastcall unlock_page(struct page *page) 536 { 537 smp_mb__before_clear_bit(); 538 if (!TestClearPageLocked(page)) 539 BUG(); 540 smp_mb__after_clear_bit(); 541 wake_up_page(page, PG_locked); 542 } 543 EXPORT_SYMBOL(unlock_page); 544 545 /** 546 * end_page_writeback - end writeback against a page 547 * @page: the page 548 */ 549 void end_page_writeback(struct page *page) 550 { 551 if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) { 552 if (!test_clear_page_writeback(page)) 553 BUG(); 554 } 555 smp_mb__after_clear_bit(); 556 wake_up_page(page, PG_writeback); 557 } 558 EXPORT_SYMBOL(end_page_writeback); 559 560 /** 561 * __lock_page - get a lock on the page, assuming we need to sleep to get it 562 * @page: the page to lock 563 * 564 * Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some 565 * random driver's requestfn sets TASK_RUNNING, we could busywait. However 566 * chances are that on the second loop, the block layer's plug list is empty, 567 * so sync_page() will then return in state TASK_UNINTERRUPTIBLE. 568 */ 569 void fastcall __lock_page(struct page *page) 570 { 571 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 572 573 __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page, 574 TASK_UNINTERRUPTIBLE); 575 } 576 EXPORT_SYMBOL(__lock_page); 577 578 /* 579 * Variant of lock_page that does not require the caller to hold a reference 580 * on the page's mapping. 581 */ 582 void fastcall __lock_page_nosync(struct page *page) 583 { 584 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 585 __wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock, 586 TASK_UNINTERRUPTIBLE); 587 } 588 589 /** 590 * find_get_page - find and get a page reference 591 * @mapping: the address_space to search 592 * @offset: the page index 593 * 594 * Is there a pagecache struct page at the given (mapping, offset) tuple? 595 * If yes, increment its refcount and return it; if no, return NULL. 596 */ 597 struct page * find_get_page(struct address_space *mapping, pgoff_t offset) 598 { 599 struct page *page; 600 601 read_lock_irq(&mapping->tree_lock); 602 page = radix_tree_lookup(&mapping->page_tree, offset); 603 if (page) 604 page_cache_get(page); 605 read_unlock_irq(&mapping->tree_lock); 606 return page; 607 } 608 EXPORT_SYMBOL(find_get_page); 609 610 /** 611 * find_lock_page - locate, pin and lock a pagecache page 612 * @mapping: the address_space to search 613 * @offset: the page index 614 * 615 * Locates the desired pagecache page, locks it, increments its reference 616 * count and returns its address. 617 * 618 * Returns zero if the page was not present. find_lock_page() may sleep. 619 */ 620 struct page *find_lock_page(struct address_space *mapping, 621 pgoff_t offset) 622 { 623 struct page *page; 624 625 repeat: 626 read_lock_irq(&mapping->tree_lock); 627 page = radix_tree_lookup(&mapping->page_tree, offset); 628 if (page) { 629 page_cache_get(page); 630 if (TestSetPageLocked(page)) { 631 read_unlock_irq(&mapping->tree_lock); 632 __lock_page(page); 633 634 /* Has the page been truncated while we slept? */ 635 if (unlikely(page->mapping != mapping)) { 636 unlock_page(page); 637 page_cache_release(page); 638 goto repeat; 639 } 640 VM_BUG_ON(page->index != offset); 641 goto out; 642 } 643 } 644 read_unlock_irq(&mapping->tree_lock); 645 out: 646 return page; 647 } 648 EXPORT_SYMBOL(find_lock_page); 649 650 /** 651 * find_or_create_page - locate or add a pagecache page 652 * @mapping: the page's address_space 653 * @index: the page's index into the mapping 654 * @gfp_mask: page allocation mode 655 * 656 * Locates a page in the pagecache. If the page is not present, a new page 657 * is allocated using @gfp_mask and is added to the pagecache and to the VM's 658 * LRU list. The returned page is locked and has its reference count 659 * incremented. 660 * 661 * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic 662 * allocation! 663 * 664 * find_or_create_page() returns the desired page's address, or zero on 665 * memory exhaustion. 666 */ 667 struct page *find_or_create_page(struct address_space *mapping, 668 pgoff_t index, gfp_t gfp_mask) 669 { 670 struct page *page; 671 int err; 672 repeat: 673 page = find_lock_page(mapping, index); 674 if (!page) { 675 page = __page_cache_alloc(gfp_mask); 676 if (!page) 677 return NULL; 678 err = add_to_page_cache_lru(page, mapping, index, gfp_mask); 679 if (unlikely(err)) { 680 page_cache_release(page); 681 page = NULL; 682 if (err == -EEXIST) 683 goto repeat; 684 } 685 } 686 return page; 687 } 688 EXPORT_SYMBOL(find_or_create_page); 689 690 /** 691 * find_get_pages - gang pagecache lookup 692 * @mapping: The address_space to search 693 * @start: The starting page index 694 * @nr_pages: The maximum number of pages 695 * @pages: Where the resulting pages are placed 696 * 697 * find_get_pages() will search for and return a group of up to 698 * @nr_pages pages in the mapping. The pages are placed at @pages. 699 * find_get_pages() takes a reference against the returned pages. 700 * 701 * The search returns a group of mapping-contiguous pages with ascending 702 * indexes. There may be holes in the indices due to not-present pages. 703 * 704 * find_get_pages() returns the number of pages which were found. 705 */ 706 unsigned find_get_pages(struct address_space *mapping, pgoff_t start, 707 unsigned int nr_pages, struct page **pages) 708 { 709 unsigned int i; 710 unsigned int ret; 711 712 read_lock_irq(&mapping->tree_lock); 713 ret = radix_tree_gang_lookup(&mapping->page_tree, 714 (void **)pages, start, nr_pages); 715 for (i = 0; i < ret; i++) 716 page_cache_get(pages[i]); 717 read_unlock_irq(&mapping->tree_lock); 718 return ret; 719 } 720 721 /** 722 * find_get_pages_contig - gang contiguous pagecache lookup 723 * @mapping: The address_space to search 724 * @index: The starting page index 725 * @nr_pages: The maximum number of pages 726 * @pages: Where the resulting pages are placed 727 * 728 * find_get_pages_contig() works exactly like find_get_pages(), except 729 * that the returned number of pages are guaranteed to be contiguous. 730 * 731 * find_get_pages_contig() returns the number of pages which were found. 732 */ 733 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, 734 unsigned int nr_pages, struct page **pages) 735 { 736 unsigned int i; 737 unsigned int ret; 738 739 read_lock_irq(&mapping->tree_lock); 740 ret = radix_tree_gang_lookup(&mapping->page_tree, 741 (void **)pages, index, nr_pages); 742 for (i = 0; i < ret; i++) { 743 if (pages[i]->mapping == NULL || pages[i]->index != index) 744 break; 745 746 page_cache_get(pages[i]); 747 index++; 748 } 749 read_unlock_irq(&mapping->tree_lock); 750 return i; 751 } 752 EXPORT_SYMBOL(find_get_pages_contig); 753 754 /** 755 * find_get_pages_tag - find and return pages that match @tag 756 * @mapping: the address_space to search 757 * @index: the starting page index 758 * @tag: the tag index 759 * @nr_pages: the maximum number of pages 760 * @pages: where the resulting pages are placed 761 * 762 * Like find_get_pages, except we only return pages which are tagged with 763 * @tag. We update @index to index the next page for the traversal. 764 */ 765 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, 766 int tag, unsigned int nr_pages, struct page **pages) 767 { 768 unsigned int i; 769 unsigned int ret; 770 771 read_lock_irq(&mapping->tree_lock); 772 ret = radix_tree_gang_lookup_tag(&mapping->page_tree, 773 (void **)pages, *index, nr_pages, tag); 774 for (i = 0; i < ret; i++) 775 page_cache_get(pages[i]); 776 if (ret) 777 *index = pages[ret - 1]->index + 1; 778 read_unlock_irq(&mapping->tree_lock); 779 return ret; 780 } 781 EXPORT_SYMBOL(find_get_pages_tag); 782 783 /** 784 * grab_cache_page_nowait - returns locked page at given index in given cache 785 * @mapping: target address_space 786 * @index: the page index 787 * 788 * Same as grab_cache_page(), but do not wait if the page is unavailable. 789 * This is intended for speculative data generators, where the data can 790 * be regenerated if the page couldn't be grabbed. This routine should 791 * be safe to call while holding the lock for another page. 792 * 793 * Clear __GFP_FS when allocating the page to avoid recursion into the fs 794 * and deadlock against the caller's locked page. 795 */ 796 struct page * 797 grab_cache_page_nowait(struct address_space *mapping, pgoff_t index) 798 { 799 struct page *page = find_get_page(mapping, index); 800 801 if (page) { 802 if (!TestSetPageLocked(page)) 803 return page; 804 page_cache_release(page); 805 return NULL; 806 } 807 page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS); 808 if (page && add_to_page_cache_lru(page, mapping, index, GFP_KERNEL)) { 809 page_cache_release(page); 810 page = NULL; 811 } 812 return page; 813 } 814 EXPORT_SYMBOL(grab_cache_page_nowait); 815 816 /* 817 * CD/DVDs are error prone. When a medium error occurs, the driver may fail 818 * a _large_ part of the i/o request. Imagine the worst scenario: 819 * 820 * ---R__________________________________________B__________ 821 * ^ reading here ^ bad block(assume 4k) 822 * 823 * read(R) => miss => readahead(R...B) => media error => frustrating retries 824 * => failing the whole request => read(R) => read(R+1) => 825 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => 826 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => 827 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... 828 * 829 * It is going insane. Fix it by quickly scaling down the readahead size. 830 */ 831 static void shrink_readahead_size_eio(struct file *filp, 832 struct file_ra_state *ra) 833 { 834 if (!ra->ra_pages) 835 return; 836 837 ra->ra_pages /= 4; 838 } 839 840 /** 841 * do_generic_mapping_read - generic file read routine 842 * @mapping: address_space to be read 843 * @_ra: file's readahead state 844 * @filp: the file to read 845 * @ppos: current file position 846 * @desc: read_descriptor 847 * @actor: read method 848 * 849 * This is a generic file read routine, and uses the 850 * mapping->a_ops->readpage() function for the actual low-level stuff. 851 * 852 * This is really ugly. But the goto's actually try to clarify some 853 * of the logic when it comes to error handling etc. 854 * 855 * Note the struct file* is only passed for the use of readpage. 856 * It may be NULL. 857 */ 858 void do_generic_mapping_read(struct address_space *mapping, 859 struct file_ra_state *ra, 860 struct file *filp, 861 loff_t *ppos, 862 read_descriptor_t *desc, 863 read_actor_t actor) 864 { 865 struct inode *inode = mapping->host; 866 pgoff_t index; 867 pgoff_t last_index; 868 pgoff_t prev_index; 869 unsigned long offset; /* offset into pagecache page */ 870 unsigned int prev_offset; 871 int error; 872 873 index = *ppos >> PAGE_CACHE_SHIFT; 874 prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT; 875 prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1); 876 last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; 877 offset = *ppos & ~PAGE_CACHE_MASK; 878 879 for (;;) { 880 struct page *page; 881 pgoff_t end_index; 882 loff_t isize; 883 unsigned long nr, ret; 884 885 cond_resched(); 886 find_page: 887 page = find_get_page(mapping, index); 888 if (!page) { 889 page_cache_sync_readahead(mapping, 890 ra, filp, 891 index, last_index - index); 892 page = find_get_page(mapping, index); 893 if (unlikely(page == NULL)) 894 goto no_cached_page; 895 } 896 if (PageReadahead(page)) { 897 page_cache_async_readahead(mapping, 898 ra, filp, page, 899 index, last_index - index); 900 } 901 if (!PageUptodate(page)) 902 goto page_not_up_to_date; 903 page_ok: 904 /* 905 * i_size must be checked after we know the page is Uptodate. 906 * 907 * Checking i_size after the check allows us to calculate 908 * the correct value for "nr", which means the zero-filled 909 * part of the page is not copied back to userspace (unless 910 * another truncate extends the file - this is desired though). 911 */ 912 913 isize = i_size_read(inode); 914 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 915 if (unlikely(!isize || index > end_index)) { 916 page_cache_release(page); 917 goto out; 918 } 919 920 /* nr is the maximum number of bytes to copy from this page */ 921 nr = PAGE_CACHE_SIZE; 922 if (index == end_index) { 923 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 924 if (nr <= offset) { 925 page_cache_release(page); 926 goto out; 927 } 928 } 929 nr = nr - offset; 930 931 /* If users can be writing to this page using arbitrary 932 * virtual addresses, take care about potential aliasing 933 * before reading the page on the kernel side. 934 */ 935 if (mapping_writably_mapped(mapping)) 936 flush_dcache_page(page); 937 938 /* 939 * When a sequential read accesses a page several times, 940 * only mark it as accessed the first time. 941 */ 942 if (prev_index != index || offset != prev_offset) 943 mark_page_accessed(page); 944 prev_index = index; 945 946 /* 947 * Ok, we have the page, and it's up-to-date, so 948 * now we can copy it to user space... 949 * 950 * The actor routine returns how many bytes were actually used.. 951 * NOTE! This may not be the same as how much of a user buffer 952 * we filled up (we may be padding etc), so we can only update 953 * "pos" here (the actor routine has to update the user buffer 954 * pointers and the remaining count). 955 */ 956 ret = actor(desc, page, offset, nr); 957 offset += ret; 958 index += offset >> PAGE_CACHE_SHIFT; 959 offset &= ~PAGE_CACHE_MASK; 960 prev_offset = offset; 961 962 page_cache_release(page); 963 if (ret == nr && desc->count) 964 continue; 965 goto out; 966 967 page_not_up_to_date: 968 /* Get exclusive access to the page ... */ 969 lock_page(page); 970 971 /* Did it get truncated before we got the lock? */ 972 if (!page->mapping) { 973 unlock_page(page); 974 page_cache_release(page); 975 continue; 976 } 977 978 /* Did somebody else fill it already? */ 979 if (PageUptodate(page)) { 980 unlock_page(page); 981 goto page_ok; 982 } 983 984 readpage: 985 /* Start the actual read. The read will unlock the page. */ 986 error = mapping->a_ops->readpage(filp, page); 987 988 if (unlikely(error)) { 989 if (error == AOP_TRUNCATED_PAGE) { 990 page_cache_release(page); 991 goto find_page; 992 } 993 goto readpage_error; 994 } 995 996 if (!PageUptodate(page)) { 997 lock_page(page); 998 if (!PageUptodate(page)) { 999 if (page->mapping == NULL) { 1000 /* 1001 * invalidate_inode_pages got it 1002 */ 1003 unlock_page(page); 1004 page_cache_release(page); 1005 goto find_page; 1006 } 1007 unlock_page(page); 1008 error = -EIO; 1009 shrink_readahead_size_eio(filp, ra); 1010 goto readpage_error; 1011 } 1012 unlock_page(page); 1013 } 1014 1015 goto page_ok; 1016 1017 readpage_error: 1018 /* UHHUH! A synchronous read error occurred. Report it */ 1019 desc->error = error; 1020 page_cache_release(page); 1021 goto out; 1022 1023 no_cached_page: 1024 /* 1025 * Ok, it wasn't cached, so we need to create a new 1026 * page.. 1027 */ 1028 page = page_cache_alloc_cold(mapping); 1029 if (!page) { 1030 desc->error = -ENOMEM; 1031 goto out; 1032 } 1033 error = add_to_page_cache_lru(page, mapping, 1034 index, GFP_KERNEL); 1035 if (error) { 1036 page_cache_release(page); 1037 if (error == -EEXIST) 1038 goto find_page; 1039 desc->error = error; 1040 goto out; 1041 } 1042 goto readpage; 1043 } 1044 1045 out: 1046 ra->prev_pos = prev_index; 1047 ra->prev_pos <<= PAGE_CACHE_SHIFT; 1048 ra->prev_pos |= prev_offset; 1049 1050 *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset; 1051 if (filp) 1052 file_accessed(filp); 1053 } 1054 EXPORT_SYMBOL(do_generic_mapping_read); 1055 1056 int file_read_actor(read_descriptor_t *desc, struct page *page, 1057 unsigned long offset, unsigned long size) 1058 { 1059 char *kaddr; 1060 unsigned long left, count = desc->count; 1061 1062 if (size > count) 1063 size = count; 1064 1065 /* 1066 * Faults on the destination of a read are common, so do it before 1067 * taking the kmap. 1068 */ 1069 if (!fault_in_pages_writeable(desc->arg.buf, size)) { 1070 kaddr = kmap_atomic(page, KM_USER0); 1071 left = __copy_to_user_inatomic(desc->arg.buf, 1072 kaddr + offset, size); 1073 kunmap_atomic(kaddr, KM_USER0); 1074 if (left == 0) 1075 goto success; 1076 } 1077 1078 /* Do it the slow way */ 1079 kaddr = kmap(page); 1080 left = __copy_to_user(desc->arg.buf, kaddr + offset, size); 1081 kunmap(page); 1082 1083 if (left) { 1084 size -= left; 1085 desc->error = -EFAULT; 1086 } 1087 success: 1088 desc->count = count - size; 1089 desc->written += size; 1090 desc->arg.buf += size; 1091 return size; 1092 } 1093 1094 /* 1095 * Performs necessary checks before doing a write 1096 * @iov: io vector request 1097 * @nr_segs: number of segments in the iovec 1098 * @count: number of bytes to write 1099 * @access_flags: type of access: %VERIFY_READ or %VERIFY_WRITE 1100 * 1101 * Adjust number of segments and amount of bytes to write (nr_segs should be 1102 * properly initialized first). Returns appropriate error code that caller 1103 * should return or zero in case that write should be allowed. 1104 */ 1105 int generic_segment_checks(const struct iovec *iov, 1106 unsigned long *nr_segs, size_t *count, int access_flags) 1107 { 1108 unsigned long seg; 1109 size_t cnt = 0; 1110 for (seg = 0; seg < *nr_segs; seg++) { 1111 const struct iovec *iv = &iov[seg]; 1112 1113 /* 1114 * If any segment has a negative length, or the cumulative 1115 * length ever wraps negative then return -EINVAL. 1116 */ 1117 cnt += iv->iov_len; 1118 if (unlikely((ssize_t)(cnt|iv->iov_len) < 0)) 1119 return -EINVAL; 1120 if (access_ok(access_flags, iv->iov_base, iv->iov_len)) 1121 continue; 1122 if (seg == 0) 1123 return -EFAULT; 1124 *nr_segs = seg; 1125 cnt -= iv->iov_len; /* This segment is no good */ 1126 break; 1127 } 1128 *count = cnt; 1129 return 0; 1130 } 1131 EXPORT_SYMBOL(generic_segment_checks); 1132 1133 /** 1134 * generic_file_aio_read - generic filesystem read routine 1135 * @iocb: kernel I/O control block 1136 * @iov: io vector request 1137 * @nr_segs: number of segments in the iovec 1138 * @pos: current file position 1139 * 1140 * This is the "read()" routine for all filesystems 1141 * that can use the page cache directly. 1142 */ 1143 ssize_t 1144 generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, 1145 unsigned long nr_segs, loff_t pos) 1146 { 1147 struct file *filp = iocb->ki_filp; 1148 ssize_t retval; 1149 unsigned long seg; 1150 size_t count; 1151 loff_t *ppos = &iocb->ki_pos; 1152 1153 count = 0; 1154 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); 1155 if (retval) 1156 return retval; 1157 1158 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ 1159 if (filp->f_flags & O_DIRECT) { 1160 loff_t size; 1161 struct address_space *mapping; 1162 struct inode *inode; 1163 1164 mapping = filp->f_mapping; 1165 inode = mapping->host; 1166 retval = 0; 1167 if (!count) 1168 goto out; /* skip atime */ 1169 size = i_size_read(inode); 1170 if (pos < size) { 1171 retval = generic_file_direct_IO(READ, iocb, 1172 iov, pos, nr_segs); 1173 if (retval > 0) 1174 *ppos = pos + retval; 1175 } 1176 if (likely(retval != 0)) { 1177 file_accessed(filp); 1178 goto out; 1179 } 1180 } 1181 1182 retval = 0; 1183 if (count) { 1184 for (seg = 0; seg < nr_segs; seg++) { 1185 read_descriptor_t desc; 1186 1187 desc.written = 0; 1188 desc.arg.buf = iov[seg].iov_base; 1189 desc.count = iov[seg].iov_len; 1190 if (desc.count == 0) 1191 continue; 1192 desc.error = 0; 1193 do_generic_file_read(filp,ppos,&desc,file_read_actor); 1194 retval += desc.written; 1195 if (desc.error) { 1196 retval = retval ?: desc.error; 1197 break; 1198 } 1199 if (desc.count > 0) 1200 break; 1201 } 1202 } 1203 out: 1204 return retval; 1205 } 1206 EXPORT_SYMBOL(generic_file_aio_read); 1207 1208 static ssize_t 1209 do_readahead(struct address_space *mapping, struct file *filp, 1210 pgoff_t index, unsigned long nr) 1211 { 1212 if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage) 1213 return -EINVAL; 1214 1215 force_page_cache_readahead(mapping, filp, index, 1216 max_sane_readahead(nr)); 1217 return 0; 1218 } 1219 1220 asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count) 1221 { 1222 ssize_t ret; 1223 struct file *file; 1224 1225 ret = -EBADF; 1226 file = fget(fd); 1227 if (file) { 1228 if (file->f_mode & FMODE_READ) { 1229 struct address_space *mapping = file->f_mapping; 1230 pgoff_t start = offset >> PAGE_CACHE_SHIFT; 1231 pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT; 1232 unsigned long len = end - start + 1; 1233 ret = do_readahead(mapping, file, start, len); 1234 } 1235 fput(file); 1236 } 1237 return ret; 1238 } 1239 1240 #ifdef CONFIG_MMU 1241 /** 1242 * page_cache_read - adds requested page to the page cache if not already there 1243 * @file: file to read 1244 * @offset: page index 1245 * 1246 * This adds the requested page to the page cache if it isn't already there, 1247 * and schedules an I/O to read in its contents from disk. 1248 */ 1249 static int fastcall page_cache_read(struct file * file, pgoff_t offset) 1250 { 1251 struct address_space *mapping = file->f_mapping; 1252 struct page *page; 1253 int ret; 1254 1255 do { 1256 page = page_cache_alloc_cold(mapping); 1257 if (!page) 1258 return -ENOMEM; 1259 1260 ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL); 1261 if (ret == 0) 1262 ret = mapping->a_ops->readpage(file, page); 1263 else if (ret == -EEXIST) 1264 ret = 0; /* losing race to add is OK */ 1265 1266 page_cache_release(page); 1267 1268 } while (ret == AOP_TRUNCATED_PAGE); 1269 1270 return ret; 1271 } 1272 1273 #define MMAP_LOTSAMISS (100) 1274 1275 /** 1276 * filemap_fault - read in file data for page fault handling 1277 * @vma: vma in which the fault was taken 1278 * @vmf: struct vm_fault containing details of the fault 1279 * 1280 * filemap_fault() is invoked via the vma operations vector for a 1281 * mapped memory region to read in file data during a page fault. 1282 * 1283 * The goto's are kind of ugly, but this streamlines the normal case of having 1284 * it in the page cache, and handles the special cases reasonably without 1285 * having a lot of duplicated code. 1286 */ 1287 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1288 { 1289 int error; 1290 struct file *file = vma->vm_file; 1291 struct address_space *mapping = file->f_mapping; 1292 struct file_ra_state *ra = &file->f_ra; 1293 struct inode *inode = mapping->host; 1294 struct page *page; 1295 unsigned long size; 1296 int did_readaround = 0; 1297 int ret = 0; 1298 1299 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1300 if (vmf->pgoff >= size) 1301 goto outside_data_content; 1302 1303 /* If we don't want any read-ahead, don't bother */ 1304 if (VM_RandomReadHint(vma)) 1305 goto no_cached_page; 1306 1307 /* 1308 * Do we have something in the page cache already? 1309 */ 1310 retry_find: 1311 page = find_lock_page(mapping, vmf->pgoff); 1312 /* 1313 * For sequential accesses, we use the generic readahead logic. 1314 */ 1315 if (VM_SequentialReadHint(vma)) { 1316 if (!page) { 1317 page_cache_sync_readahead(mapping, ra, file, 1318 vmf->pgoff, 1); 1319 page = find_lock_page(mapping, vmf->pgoff); 1320 if (!page) 1321 goto no_cached_page; 1322 } 1323 if (PageReadahead(page)) { 1324 page_cache_async_readahead(mapping, ra, file, page, 1325 vmf->pgoff, 1); 1326 } 1327 } 1328 1329 if (!page) { 1330 unsigned long ra_pages; 1331 1332 ra->mmap_miss++; 1333 1334 /* 1335 * Do we miss much more than hit in this file? If so, 1336 * stop bothering with read-ahead. It will only hurt. 1337 */ 1338 if (ra->mmap_miss > MMAP_LOTSAMISS) 1339 goto no_cached_page; 1340 1341 /* 1342 * To keep the pgmajfault counter straight, we need to 1343 * check did_readaround, as this is an inner loop. 1344 */ 1345 if (!did_readaround) { 1346 ret = VM_FAULT_MAJOR; 1347 count_vm_event(PGMAJFAULT); 1348 } 1349 did_readaround = 1; 1350 ra_pages = max_sane_readahead(file->f_ra.ra_pages); 1351 if (ra_pages) { 1352 pgoff_t start = 0; 1353 1354 if (vmf->pgoff > ra_pages / 2) 1355 start = vmf->pgoff - ra_pages / 2; 1356 do_page_cache_readahead(mapping, file, start, ra_pages); 1357 } 1358 page = find_lock_page(mapping, vmf->pgoff); 1359 if (!page) 1360 goto no_cached_page; 1361 } 1362 1363 if (!did_readaround) 1364 ra->mmap_miss--; 1365 1366 /* 1367 * We have a locked page in the page cache, now we need to check 1368 * that it's up-to-date. If not, it is going to be due to an error. 1369 */ 1370 if (unlikely(!PageUptodate(page))) 1371 goto page_not_uptodate; 1372 1373 /* Must recheck i_size under page lock */ 1374 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1375 if (unlikely(vmf->pgoff >= size)) { 1376 unlock_page(page); 1377 page_cache_release(page); 1378 goto outside_data_content; 1379 } 1380 1381 /* 1382 * Found the page and have a reference on it. 1383 */ 1384 mark_page_accessed(page); 1385 ra->prev_pos = (loff_t)page->index << PAGE_CACHE_SHIFT; 1386 vmf->page = page; 1387 return ret | VM_FAULT_LOCKED; 1388 1389 outside_data_content: 1390 /* 1391 * An external ptracer can access pages that normally aren't 1392 * accessible.. 1393 */ 1394 if (vma->vm_mm == current->mm) 1395 return VM_FAULT_SIGBUS; 1396 1397 /* Fall through to the non-read-ahead case */ 1398 no_cached_page: 1399 /* 1400 * We're only likely to ever get here if MADV_RANDOM is in 1401 * effect. 1402 */ 1403 error = page_cache_read(file, vmf->pgoff); 1404 1405 /* 1406 * The page we want has now been added to the page cache. 1407 * In the unlikely event that someone removed it in the 1408 * meantime, we'll just come back here and read it again. 1409 */ 1410 if (error >= 0) 1411 goto retry_find; 1412 1413 /* 1414 * An error return from page_cache_read can result if the 1415 * system is low on memory, or a problem occurs while trying 1416 * to schedule I/O. 1417 */ 1418 if (error == -ENOMEM) 1419 return VM_FAULT_OOM; 1420 return VM_FAULT_SIGBUS; 1421 1422 page_not_uptodate: 1423 /* IO error path */ 1424 if (!did_readaround) { 1425 ret = VM_FAULT_MAJOR; 1426 count_vm_event(PGMAJFAULT); 1427 } 1428 1429 /* 1430 * Umm, take care of errors if the page isn't up-to-date. 1431 * Try to re-read it _once_. We do this synchronously, 1432 * because there really aren't any performance issues here 1433 * and we need to check for errors. 1434 */ 1435 ClearPageError(page); 1436 error = mapping->a_ops->readpage(file, page); 1437 page_cache_release(page); 1438 1439 if (!error || error == AOP_TRUNCATED_PAGE) 1440 goto retry_find; 1441 1442 /* Things didn't work out. Return zero to tell the mm layer so. */ 1443 shrink_readahead_size_eio(file, ra); 1444 return VM_FAULT_SIGBUS; 1445 } 1446 EXPORT_SYMBOL(filemap_fault); 1447 1448 struct vm_operations_struct generic_file_vm_ops = { 1449 .fault = filemap_fault, 1450 }; 1451 1452 /* This is used for a general mmap of a disk file */ 1453 1454 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 1455 { 1456 struct address_space *mapping = file->f_mapping; 1457 1458 if (!mapping->a_ops->readpage) 1459 return -ENOEXEC; 1460 file_accessed(file); 1461 vma->vm_ops = &generic_file_vm_ops; 1462 vma->vm_flags |= VM_CAN_NONLINEAR; 1463 return 0; 1464 } 1465 1466 /* 1467 * This is for filesystems which do not implement ->writepage. 1468 */ 1469 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) 1470 { 1471 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) 1472 return -EINVAL; 1473 return generic_file_mmap(file, vma); 1474 } 1475 #else 1476 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 1477 { 1478 return -ENOSYS; 1479 } 1480 int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) 1481 { 1482 return -ENOSYS; 1483 } 1484 #endif /* CONFIG_MMU */ 1485 1486 EXPORT_SYMBOL(generic_file_mmap); 1487 EXPORT_SYMBOL(generic_file_readonly_mmap); 1488 1489 static struct page *__read_cache_page(struct address_space *mapping, 1490 pgoff_t index, 1491 int (*filler)(void *,struct page*), 1492 void *data) 1493 { 1494 struct page *page; 1495 int err; 1496 repeat: 1497 page = find_get_page(mapping, index); 1498 if (!page) { 1499 page = page_cache_alloc_cold(mapping); 1500 if (!page) 1501 return ERR_PTR(-ENOMEM); 1502 err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); 1503 if (unlikely(err)) { 1504 page_cache_release(page); 1505 if (err == -EEXIST) 1506 goto repeat; 1507 /* Presumably ENOMEM for radix tree node */ 1508 return ERR_PTR(err); 1509 } 1510 err = filler(data, page); 1511 if (err < 0) { 1512 page_cache_release(page); 1513 page = ERR_PTR(err); 1514 } 1515 } 1516 return page; 1517 } 1518 1519 /* 1520 * Same as read_cache_page, but don't wait for page to become unlocked 1521 * after submitting it to the filler. 1522 */ 1523 struct page *read_cache_page_async(struct address_space *mapping, 1524 pgoff_t index, 1525 int (*filler)(void *,struct page*), 1526 void *data) 1527 { 1528 struct page *page; 1529 int err; 1530 1531 retry: 1532 page = __read_cache_page(mapping, index, filler, data); 1533 if (IS_ERR(page)) 1534 return page; 1535 if (PageUptodate(page)) 1536 goto out; 1537 1538 lock_page(page); 1539 if (!page->mapping) { 1540 unlock_page(page); 1541 page_cache_release(page); 1542 goto retry; 1543 } 1544 if (PageUptodate(page)) { 1545 unlock_page(page); 1546 goto out; 1547 } 1548 err = filler(data, page); 1549 if (err < 0) { 1550 page_cache_release(page); 1551 return ERR_PTR(err); 1552 } 1553 out: 1554 mark_page_accessed(page); 1555 return page; 1556 } 1557 EXPORT_SYMBOL(read_cache_page_async); 1558 1559 /** 1560 * read_cache_page - read into page cache, fill it if needed 1561 * @mapping: the page's address_space 1562 * @index: the page index 1563 * @filler: function to perform the read 1564 * @data: destination for read data 1565 * 1566 * Read into the page cache. If a page already exists, and PageUptodate() is 1567 * not set, try to fill the page then wait for it to become unlocked. 1568 * 1569 * If the page does not get brought uptodate, return -EIO. 1570 */ 1571 struct page *read_cache_page(struct address_space *mapping, 1572 pgoff_t index, 1573 int (*filler)(void *,struct page*), 1574 void *data) 1575 { 1576 struct page *page; 1577 1578 page = read_cache_page_async(mapping, index, filler, data); 1579 if (IS_ERR(page)) 1580 goto out; 1581 wait_on_page_locked(page); 1582 if (!PageUptodate(page)) { 1583 page_cache_release(page); 1584 page = ERR_PTR(-EIO); 1585 } 1586 out: 1587 return page; 1588 } 1589 EXPORT_SYMBOL(read_cache_page); 1590 1591 /* 1592 * The logic we want is 1593 * 1594 * if suid or (sgid and xgrp) 1595 * remove privs 1596 */ 1597 int should_remove_suid(struct dentry *dentry) 1598 { 1599 mode_t mode = dentry->d_inode->i_mode; 1600 int kill = 0; 1601 1602 /* suid always must be killed */ 1603 if (unlikely(mode & S_ISUID)) 1604 kill = ATTR_KILL_SUID; 1605 1606 /* 1607 * sgid without any exec bits is just a mandatory locking mark; leave 1608 * it alone. If some exec bits are set, it's a real sgid; kill it. 1609 */ 1610 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) 1611 kill |= ATTR_KILL_SGID; 1612 1613 if (unlikely(kill && !capable(CAP_FSETID))) 1614 return kill; 1615 1616 return 0; 1617 } 1618 EXPORT_SYMBOL(should_remove_suid); 1619 1620 int __remove_suid(struct dentry *dentry, int kill) 1621 { 1622 struct iattr newattrs; 1623 1624 newattrs.ia_valid = ATTR_FORCE | kill; 1625 return notify_change(dentry, &newattrs); 1626 } 1627 1628 int remove_suid(struct dentry *dentry) 1629 { 1630 int kill = should_remove_suid(dentry); 1631 1632 if (unlikely(kill)) 1633 return __remove_suid(dentry, kill); 1634 1635 return 0; 1636 } 1637 EXPORT_SYMBOL(remove_suid); 1638 1639 static size_t __iovec_copy_from_user_inatomic(char *vaddr, 1640 const struct iovec *iov, size_t base, size_t bytes) 1641 { 1642 size_t copied = 0, left = 0; 1643 1644 while (bytes) { 1645 char __user *buf = iov->iov_base + base; 1646 int copy = min(bytes, iov->iov_len - base); 1647 1648 base = 0; 1649 left = __copy_from_user_inatomic_nocache(vaddr, buf, copy); 1650 copied += copy; 1651 bytes -= copy; 1652 vaddr += copy; 1653 iov++; 1654 1655 if (unlikely(left)) 1656 break; 1657 } 1658 return copied - left; 1659 } 1660 1661 /* 1662 * Copy as much as we can into the page and return the number of bytes which 1663 * were sucessfully copied. If a fault is encountered then return the number of 1664 * bytes which were copied. 1665 */ 1666 size_t iov_iter_copy_from_user_atomic(struct page *page, 1667 struct iov_iter *i, unsigned long offset, size_t bytes) 1668 { 1669 char *kaddr; 1670 size_t copied; 1671 1672 BUG_ON(!in_atomic()); 1673 kaddr = kmap_atomic(page, KM_USER0); 1674 if (likely(i->nr_segs == 1)) { 1675 int left; 1676 char __user *buf = i->iov->iov_base + i->iov_offset; 1677 left = __copy_from_user_inatomic_nocache(kaddr + offset, 1678 buf, bytes); 1679 copied = bytes - left; 1680 } else { 1681 copied = __iovec_copy_from_user_inatomic(kaddr + offset, 1682 i->iov, i->iov_offset, bytes); 1683 } 1684 kunmap_atomic(kaddr, KM_USER0); 1685 1686 return copied; 1687 } 1688 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic); 1689 1690 /* 1691 * This has the same sideeffects and return value as 1692 * iov_iter_copy_from_user_atomic(). 1693 * The difference is that it attempts to resolve faults. 1694 * Page must not be locked. 1695 */ 1696 size_t iov_iter_copy_from_user(struct page *page, 1697 struct iov_iter *i, unsigned long offset, size_t bytes) 1698 { 1699 char *kaddr; 1700 size_t copied; 1701 1702 kaddr = kmap(page); 1703 if (likely(i->nr_segs == 1)) { 1704 int left; 1705 char __user *buf = i->iov->iov_base + i->iov_offset; 1706 left = __copy_from_user_nocache(kaddr + offset, buf, bytes); 1707 copied = bytes - left; 1708 } else { 1709 copied = __iovec_copy_from_user_inatomic(kaddr + offset, 1710 i->iov, i->iov_offset, bytes); 1711 } 1712 kunmap(page); 1713 return copied; 1714 } 1715 EXPORT_SYMBOL(iov_iter_copy_from_user); 1716 1717 static void __iov_iter_advance_iov(struct iov_iter *i, size_t bytes) 1718 { 1719 if (likely(i->nr_segs == 1)) { 1720 i->iov_offset += bytes; 1721 } else { 1722 const struct iovec *iov = i->iov; 1723 size_t base = i->iov_offset; 1724 1725 while (bytes) { 1726 int copy = min(bytes, iov->iov_len - base); 1727 1728 bytes -= copy; 1729 base += copy; 1730 if (iov->iov_len == base) { 1731 iov++; 1732 base = 0; 1733 } 1734 } 1735 i->iov = iov; 1736 i->iov_offset = base; 1737 } 1738 } 1739 1740 void iov_iter_advance(struct iov_iter *i, size_t bytes) 1741 { 1742 BUG_ON(i->count < bytes); 1743 1744 __iov_iter_advance_iov(i, bytes); 1745 i->count -= bytes; 1746 } 1747 EXPORT_SYMBOL(iov_iter_advance); 1748 1749 /* 1750 * Fault in the first iovec of the given iov_iter, to a maximum length 1751 * of bytes. Returns 0 on success, or non-zero if the memory could not be 1752 * accessed (ie. because it is an invalid address). 1753 * 1754 * writev-intensive code may want this to prefault several iovecs -- that 1755 * would be possible (callers must not rely on the fact that _only_ the 1756 * first iovec will be faulted with the current implementation). 1757 */ 1758 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes) 1759 { 1760 char __user *buf = i->iov->iov_base + i->iov_offset; 1761 bytes = min(bytes, i->iov->iov_len - i->iov_offset); 1762 return fault_in_pages_readable(buf, bytes); 1763 } 1764 EXPORT_SYMBOL(iov_iter_fault_in_readable); 1765 1766 /* 1767 * Return the count of just the current iov_iter segment. 1768 */ 1769 size_t iov_iter_single_seg_count(struct iov_iter *i) 1770 { 1771 const struct iovec *iov = i->iov; 1772 if (i->nr_segs == 1) 1773 return i->count; 1774 else 1775 return min(i->count, iov->iov_len - i->iov_offset); 1776 } 1777 EXPORT_SYMBOL(iov_iter_single_seg_count); 1778 1779 /* 1780 * Performs necessary checks before doing a write 1781 * 1782 * Can adjust writing position or amount of bytes to write. 1783 * Returns appropriate error code that caller should return or 1784 * zero in case that write should be allowed. 1785 */ 1786 inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk) 1787 { 1788 struct inode *inode = file->f_mapping->host; 1789 unsigned long limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; 1790 1791 if (unlikely(*pos < 0)) 1792 return -EINVAL; 1793 1794 if (!isblk) { 1795 /* FIXME: this is for backwards compatibility with 2.4 */ 1796 if (file->f_flags & O_APPEND) 1797 *pos = i_size_read(inode); 1798 1799 if (limit != RLIM_INFINITY) { 1800 if (*pos >= limit) { 1801 send_sig(SIGXFSZ, current, 0); 1802 return -EFBIG; 1803 } 1804 if (*count > limit - (typeof(limit))*pos) { 1805 *count = limit - (typeof(limit))*pos; 1806 } 1807 } 1808 } 1809 1810 /* 1811 * LFS rule 1812 */ 1813 if (unlikely(*pos + *count > MAX_NON_LFS && 1814 !(file->f_flags & O_LARGEFILE))) { 1815 if (*pos >= MAX_NON_LFS) { 1816 return -EFBIG; 1817 } 1818 if (*count > MAX_NON_LFS - (unsigned long)*pos) { 1819 *count = MAX_NON_LFS - (unsigned long)*pos; 1820 } 1821 } 1822 1823 /* 1824 * Are we about to exceed the fs block limit ? 1825 * 1826 * If we have written data it becomes a short write. If we have 1827 * exceeded without writing data we send a signal and return EFBIG. 1828 * Linus frestrict idea will clean these up nicely.. 1829 */ 1830 if (likely(!isblk)) { 1831 if (unlikely(*pos >= inode->i_sb->s_maxbytes)) { 1832 if (*count || *pos > inode->i_sb->s_maxbytes) { 1833 return -EFBIG; 1834 } 1835 /* zero-length writes at ->s_maxbytes are OK */ 1836 } 1837 1838 if (unlikely(*pos + *count > inode->i_sb->s_maxbytes)) 1839 *count = inode->i_sb->s_maxbytes - *pos; 1840 } else { 1841 #ifdef CONFIG_BLOCK 1842 loff_t isize; 1843 if (bdev_read_only(I_BDEV(inode))) 1844 return -EPERM; 1845 isize = i_size_read(inode); 1846 if (*pos >= isize) { 1847 if (*count || *pos > isize) 1848 return -ENOSPC; 1849 } 1850 1851 if (*pos + *count > isize) 1852 *count = isize - *pos; 1853 #else 1854 return -EPERM; 1855 #endif 1856 } 1857 return 0; 1858 } 1859 EXPORT_SYMBOL(generic_write_checks); 1860 1861 int pagecache_write_begin(struct file *file, struct address_space *mapping, 1862 loff_t pos, unsigned len, unsigned flags, 1863 struct page **pagep, void **fsdata) 1864 { 1865 const struct address_space_operations *aops = mapping->a_ops; 1866 1867 if (aops->write_begin) { 1868 return aops->write_begin(file, mapping, pos, len, flags, 1869 pagep, fsdata); 1870 } else { 1871 int ret; 1872 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1873 unsigned offset = pos & (PAGE_CACHE_SIZE - 1); 1874 struct inode *inode = mapping->host; 1875 struct page *page; 1876 again: 1877 page = __grab_cache_page(mapping, index); 1878 *pagep = page; 1879 if (!page) 1880 return -ENOMEM; 1881 1882 if (flags & AOP_FLAG_UNINTERRUPTIBLE && !PageUptodate(page)) { 1883 /* 1884 * There is no way to resolve a short write situation 1885 * for a !Uptodate page (except by double copying in 1886 * the caller done by generic_perform_write_2copy). 1887 * 1888 * Instead, we have to bring it uptodate here. 1889 */ 1890 ret = aops->readpage(file, page); 1891 page_cache_release(page); 1892 if (ret) { 1893 if (ret == AOP_TRUNCATED_PAGE) 1894 goto again; 1895 return ret; 1896 } 1897 goto again; 1898 } 1899 1900 ret = aops->prepare_write(file, page, offset, offset+len); 1901 if (ret) { 1902 unlock_page(page); 1903 page_cache_release(page); 1904 if (pos + len > inode->i_size) 1905 vmtruncate(inode, inode->i_size); 1906 } 1907 return ret; 1908 } 1909 } 1910 EXPORT_SYMBOL(pagecache_write_begin); 1911 1912 int pagecache_write_end(struct file *file, struct address_space *mapping, 1913 loff_t pos, unsigned len, unsigned copied, 1914 struct page *page, void *fsdata) 1915 { 1916 const struct address_space_operations *aops = mapping->a_ops; 1917 int ret; 1918 1919 if (aops->write_end) { 1920 mark_page_accessed(page); 1921 ret = aops->write_end(file, mapping, pos, len, copied, 1922 page, fsdata); 1923 } else { 1924 unsigned offset = pos & (PAGE_CACHE_SIZE - 1); 1925 struct inode *inode = mapping->host; 1926 1927 flush_dcache_page(page); 1928 ret = aops->commit_write(file, page, offset, offset+len); 1929 unlock_page(page); 1930 mark_page_accessed(page); 1931 page_cache_release(page); 1932 1933 if (ret < 0) { 1934 if (pos + len > inode->i_size) 1935 vmtruncate(inode, inode->i_size); 1936 } else if (ret > 0) 1937 ret = min_t(size_t, copied, ret); 1938 else 1939 ret = copied; 1940 } 1941 1942 return ret; 1943 } 1944 EXPORT_SYMBOL(pagecache_write_end); 1945 1946 ssize_t 1947 generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov, 1948 unsigned long *nr_segs, loff_t pos, loff_t *ppos, 1949 size_t count, size_t ocount) 1950 { 1951 struct file *file = iocb->ki_filp; 1952 struct address_space *mapping = file->f_mapping; 1953 struct inode *inode = mapping->host; 1954 ssize_t written; 1955 1956 if (count != ocount) 1957 *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count); 1958 1959 written = generic_file_direct_IO(WRITE, iocb, iov, pos, *nr_segs); 1960 if (written > 0) { 1961 loff_t end = pos + written; 1962 if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { 1963 i_size_write(inode, end); 1964 mark_inode_dirty(inode); 1965 } 1966 *ppos = end; 1967 } 1968 1969 /* 1970 * Sync the fs metadata but not the minor inode changes and 1971 * of course not the data as we did direct DMA for the IO. 1972 * i_mutex is held, which protects generic_osync_inode() from 1973 * livelocking. AIO O_DIRECT ops attempt to sync metadata here. 1974 */ 1975 if ((written >= 0 || written == -EIOCBQUEUED) && 1976 ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { 1977 int err = generic_osync_inode(inode, mapping, OSYNC_METADATA); 1978 if (err < 0) 1979 written = err; 1980 } 1981 return written; 1982 } 1983 EXPORT_SYMBOL(generic_file_direct_write); 1984 1985 /* 1986 * Find or create a page at the given pagecache position. Return the locked 1987 * page. This function is specifically for buffered writes. 1988 */ 1989 struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index) 1990 { 1991 int status; 1992 struct page *page; 1993 repeat: 1994 page = find_lock_page(mapping, index); 1995 if (likely(page)) 1996 return page; 1997 1998 page = page_cache_alloc(mapping); 1999 if (!page) 2000 return NULL; 2001 status = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); 2002 if (unlikely(status)) { 2003 page_cache_release(page); 2004 if (status == -EEXIST) 2005 goto repeat; 2006 return NULL; 2007 } 2008 return page; 2009 } 2010 EXPORT_SYMBOL(__grab_cache_page); 2011 2012 static ssize_t generic_perform_write_2copy(struct file *file, 2013 struct iov_iter *i, loff_t pos) 2014 { 2015 struct address_space *mapping = file->f_mapping; 2016 const struct address_space_operations *a_ops = mapping->a_ops; 2017 struct inode *inode = mapping->host; 2018 long status = 0; 2019 ssize_t written = 0; 2020 2021 do { 2022 struct page *src_page; 2023 struct page *page; 2024 pgoff_t index; /* Pagecache index for current page */ 2025 unsigned long offset; /* Offset into pagecache page */ 2026 unsigned long bytes; /* Bytes to write to page */ 2027 size_t copied; /* Bytes copied from user */ 2028 2029 offset = (pos & (PAGE_CACHE_SIZE - 1)); 2030 index = pos >> PAGE_CACHE_SHIFT; 2031 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, 2032 iov_iter_count(i)); 2033 2034 /* 2035 * a non-NULL src_page indicates that we're doing the 2036 * copy via get_user_pages and kmap. 2037 */ 2038 src_page = NULL; 2039 2040 /* 2041 * Bring in the user page that we will copy from _first_. 2042 * Otherwise there's a nasty deadlock on copying from the 2043 * same page as we're writing to, without it being marked 2044 * up-to-date. 2045 * 2046 * Not only is this an optimisation, but it is also required 2047 * to check that the address is actually valid, when atomic 2048 * usercopies are used, below. 2049 */ 2050 if (unlikely(iov_iter_fault_in_readable(i, bytes))) { 2051 status = -EFAULT; 2052 break; 2053 } 2054 2055 page = __grab_cache_page(mapping, index); 2056 if (!page) { 2057 status = -ENOMEM; 2058 break; 2059 } 2060 2061 /* 2062 * non-uptodate pages cannot cope with short copies, and we 2063 * cannot take a pagefault with the destination page locked. 2064 * So pin the source page to copy it. 2065 */ 2066 if (!PageUptodate(page) && !segment_eq(get_fs(), KERNEL_DS)) { 2067 unlock_page(page); 2068 2069 src_page = alloc_page(GFP_KERNEL); 2070 if (!src_page) { 2071 page_cache_release(page); 2072 status = -ENOMEM; 2073 break; 2074 } 2075 2076 /* 2077 * Cannot get_user_pages with a page locked for the 2078 * same reason as we can't take a page fault with a 2079 * page locked (as explained below). 2080 */ 2081 copied = iov_iter_copy_from_user(src_page, i, 2082 offset, bytes); 2083 if (unlikely(copied == 0)) { 2084 status = -EFAULT; 2085 page_cache_release(page); 2086 page_cache_release(src_page); 2087 break; 2088 } 2089 bytes = copied; 2090 2091 lock_page(page); 2092 /* 2093 * Can't handle the page going uptodate here, because 2094 * that means we would use non-atomic usercopies, which 2095 * zero out the tail of the page, which can cause 2096 * zeroes to become transiently visible. We could just 2097 * use a non-zeroing copy, but the APIs aren't too 2098 * consistent. 2099 */ 2100 if (unlikely(!page->mapping || PageUptodate(page))) { 2101 unlock_page(page); 2102 page_cache_release(page); 2103 page_cache_release(src_page); 2104 continue; 2105 } 2106 } 2107 2108 status = a_ops->prepare_write(file, page, offset, offset+bytes); 2109 if (unlikely(status)) 2110 goto fs_write_aop_error; 2111 2112 if (!src_page) { 2113 /* 2114 * Must not enter the pagefault handler here, because 2115 * we hold the page lock, so we might recursively 2116 * deadlock on the same lock, or get an ABBA deadlock 2117 * against a different lock, or against the mmap_sem 2118 * (which nests outside the page lock). So increment 2119 * preempt count, and use _atomic usercopies. 2120 * 2121 * The page is uptodate so we are OK to encounter a 2122 * short copy: if unmodified parts of the page are 2123 * marked dirty and written out to disk, it doesn't 2124 * really matter. 2125 */ 2126 pagefault_disable(); 2127 copied = iov_iter_copy_from_user_atomic(page, i, 2128 offset, bytes); 2129 pagefault_enable(); 2130 } else { 2131 void *src, *dst; 2132 src = kmap_atomic(src_page, KM_USER0); 2133 dst = kmap_atomic(page, KM_USER1); 2134 memcpy(dst + offset, src + offset, bytes); 2135 kunmap_atomic(dst, KM_USER1); 2136 kunmap_atomic(src, KM_USER0); 2137 copied = bytes; 2138 } 2139 flush_dcache_page(page); 2140 2141 status = a_ops->commit_write(file, page, offset, offset+bytes); 2142 if (unlikely(status < 0)) 2143 goto fs_write_aop_error; 2144 if (unlikely(status > 0)) /* filesystem did partial write */ 2145 copied = min_t(size_t, copied, status); 2146 2147 unlock_page(page); 2148 mark_page_accessed(page); 2149 page_cache_release(page); 2150 if (src_page) 2151 page_cache_release(src_page); 2152 2153 iov_iter_advance(i, copied); 2154 pos += copied; 2155 written += copied; 2156 2157 balance_dirty_pages_ratelimited(mapping); 2158 cond_resched(); 2159 continue; 2160 2161 fs_write_aop_error: 2162 unlock_page(page); 2163 page_cache_release(page); 2164 if (src_page) 2165 page_cache_release(src_page); 2166 2167 /* 2168 * prepare_write() may have instantiated a few blocks 2169 * outside i_size. Trim these off again. Don't need 2170 * i_size_read because we hold i_mutex. 2171 */ 2172 if (pos + bytes > inode->i_size) 2173 vmtruncate(inode, inode->i_size); 2174 break; 2175 } while (iov_iter_count(i)); 2176 2177 return written ? written : status; 2178 } 2179 2180 static ssize_t generic_perform_write(struct file *file, 2181 struct iov_iter *i, loff_t pos) 2182 { 2183 struct address_space *mapping = file->f_mapping; 2184 const struct address_space_operations *a_ops = mapping->a_ops; 2185 long status = 0; 2186 ssize_t written = 0; 2187 unsigned int flags = 0; 2188 2189 /* 2190 * Copies from kernel address space cannot fail (NFSD is a big user). 2191 */ 2192 if (segment_eq(get_fs(), KERNEL_DS)) 2193 flags |= AOP_FLAG_UNINTERRUPTIBLE; 2194 2195 do { 2196 struct page *page; 2197 pgoff_t index; /* Pagecache index for current page */ 2198 unsigned long offset; /* Offset into pagecache page */ 2199 unsigned long bytes; /* Bytes to write to page */ 2200 size_t copied; /* Bytes copied from user */ 2201 void *fsdata; 2202 2203 offset = (pos & (PAGE_CACHE_SIZE - 1)); 2204 index = pos >> PAGE_CACHE_SHIFT; 2205 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, 2206 iov_iter_count(i)); 2207 2208 again: 2209 2210 /* 2211 * Bring in the user page that we will copy from _first_. 2212 * Otherwise there's a nasty deadlock on copying from the 2213 * same page as we're writing to, without it being marked 2214 * up-to-date. 2215 * 2216 * Not only is this an optimisation, but it is also required 2217 * to check that the address is actually valid, when atomic 2218 * usercopies are used, below. 2219 */ 2220 if (unlikely(iov_iter_fault_in_readable(i, bytes))) { 2221 status = -EFAULT; 2222 break; 2223 } 2224 2225 status = a_ops->write_begin(file, mapping, pos, bytes, flags, 2226 &page, &fsdata); 2227 if (unlikely(status)) 2228 break; 2229 2230 pagefault_disable(); 2231 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 2232 pagefault_enable(); 2233 flush_dcache_page(page); 2234 2235 status = a_ops->write_end(file, mapping, pos, bytes, copied, 2236 page, fsdata); 2237 if (unlikely(status < 0)) 2238 break; 2239 copied = status; 2240 2241 cond_resched(); 2242 2243 if (unlikely(copied == 0)) { 2244 /* 2245 * If we were unable to copy any data at all, we must 2246 * fall back to a single segment length write. 2247 * 2248 * If we didn't fallback here, we could livelock 2249 * because not all segments in the iov can be copied at 2250 * once without a pagefault. 2251 */ 2252 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, 2253 iov_iter_single_seg_count(i)); 2254 goto again; 2255 } 2256 iov_iter_advance(i, copied); 2257 pos += copied; 2258 written += copied; 2259 2260 balance_dirty_pages_ratelimited(mapping); 2261 2262 } while (iov_iter_count(i)); 2263 2264 return written ? written : status; 2265 } 2266 2267 ssize_t 2268 generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, 2269 unsigned long nr_segs, loff_t pos, loff_t *ppos, 2270 size_t count, ssize_t written) 2271 { 2272 struct file *file = iocb->ki_filp; 2273 struct address_space *mapping = file->f_mapping; 2274 const struct address_space_operations *a_ops = mapping->a_ops; 2275 struct inode *inode = mapping->host; 2276 ssize_t status; 2277 struct iov_iter i; 2278 2279 iov_iter_init(&i, iov, nr_segs, count, written); 2280 if (a_ops->write_begin) 2281 status = generic_perform_write(file, &i, pos); 2282 else 2283 status = generic_perform_write_2copy(file, &i, pos); 2284 2285 if (likely(status >= 0)) { 2286 written += status; 2287 *ppos = pos + status; 2288 2289 /* 2290 * For now, when the user asks for O_SYNC, we'll actually give 2291 * O_DSYNC 2292 */ 2293 if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) { 2294 if (!a_ops->writepage || !is_sync_kiocb(iocb)) 2295 status = generic_osync_inode(inode, mapping, 2296 OSYNC_METADATA|OSYNC_DATA); 2297 } 2298 } 2299 2300 /* 2301 * If we get here for O_DIRECT writes then we must have fallen through 2302 * to buffered writes (block instantiation inside i_size). So we sync 2303 * the file data here, to try to honour O_DIRECT expectations. 2304 */ 2305 if (unlikely(file->f_flags & O_DIRECT) && written) 2306 status = filemap_write_and_wait(mapping); 2307 2308 return written ? written : status; 2309 } 2310 EXPORT_SYMBOL(generic_file_buffered_write); 2311 2312 static ssize_t 2313 __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, 2314 unsigned long nr_segs, loff_t *ppos) 2315 { 2316 struct file *file = iocb->ki_filp; 2317 struct address_space * mapping = file->f_mapping; 2318 size_t ocount; /* original count */ 2319 size_t count; /* after file limit checks */ 2320 struct inode *inode = mapping->host; 2321 loff_t pos; 2322 ssize_t written; 2323 ssize_t err; 2324 2325 ocount = 0; 2326 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); 2327 if (err) 2328 return err; 2329 2330 count = ocount; 2331 pos = *ppos; 2332 2333 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 2334 2335 /* We can write back this queue in page reclaim */ 2336 current->backing_dev_info = mapping->backing_dev_info; 2337 written = 0; 2338 2339 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 2340 if (err) 2341 goto out; 2342 2343 if (count == 0) 2344 goto out; 2345 2346 err = remove_suid(file->f_path.dentry); 2347 if (err) 2348 goto out; 2349 2350 file_update_time(file); 2351 2352 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ 2353 if (unlikely(file->f_flags & O_DIRECT)) { 2354 loff_t endbyte; 2355 ssize_t written_buffered; 2356 2357 written = generic_file_direct_write(iocb, iov, &nr_segs, pos, 2358 ppos, count, ocount); 2359 if (written < 0 || written == count) 2360 goto out; 2361 /* 2362 * direct-io write to a hole: fall through to buffered I/O 2363 * for completing the rest of the request. 2364 */ 2365 pos += written; 2366 count -= written; 2367 written_buffered = generic_file_buffered_write(iocb, iov, 2368 nr_segs, pos, ppos, count, 2369 written); 2370 /* 2371 * If generic_file_buffered_write() retuned a synchronous error 2372 * then we want to return the number of bytes which were 2373 * direct-written, or the error code if that was zero. Note 2374 * that this differs from normal direct-io semantics, which 2375 * will return -EFOO even if some bytes were written. 2376 */ 2377 if (written_buffered < 0) { 2378 err = written_buffered; 2379 goto out; 2380 } 2381 2382 /* 2383 * We need to ensure that the page cache pages are written to 2384 * disk and invalidated to preserve the expected O_DIRECT 2385 * semantics. 2386 */ 2387 endbyte = pos + written_buffered - written - 1; 2388 err = do_sync_mapping_range(file->f_mapping, pos, endbyte, 2389 SYNC_FILE_RANGE_WAIT_BEFORE| 2390 SYNC_FILE_RANGE_WRITE| 2391 SYNC_FILE_RANGE_WAIT_AFTER); 2392 if (err == 0) { 2393 written = written_buffered; 2394 invalidate_mapping_pages(mapping, 2395 pos >> PAGE_CACHE_SHIFT, 2396 endbyte >> PAGE_CACHE_SHIFT); 2397 } else { 2398 /* 2399 * We don't know how much we wrote, so just return 2400 * the number of bytes which were direct-written 2401 */ 2402 } 2403 } else { 2404 written = generic_file_buffered_write(iocb, iov, nr_segs, 2405 pos, ppos, count, written); 2406 } 2407 out: 2408 current->backing_dev_info = NULL; 2409 return written ? written : err; 2410 } 2411 2412 ssize_t generic_file_aio_write_nolock(struct kiocb *iocb, 2413 const struct iovec *iov, unsigned long nr_segs, loff_t pos) 2414 { 2415 struct file *file = iocb->ki_filp; 2416 struct address_space *mapping = file->f_mapping; 2417 struct inode *inode = mapping->host; 2418 ssize_t ret; 2419 2420 BUG_ON(iocb->ki_pos != pos); 2421 2422 ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs, 2423 &iocb->ki_pos); 2424 2425 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { 2426 ssize_t err; 2427 2428 err = sync_page_range_nolock(inode, mapping, pos, ret); 2429 if (err < 0) 2430 ret = err; 2431 } 2432 return ret; 2433 } 2434 EXPORT_SYMBOL(generic_file_aio_write_nolock); 2435 2436 ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, 2437 unsigned long nr_segs, loff_t pos) 2438 { 2439 struct file *file = iocb->ki_filp; 2440 struct address_space *mapping = file->f_mapping; 2441 struct inode *inode = mapping->host; 2442 ssize_t ret; 2443 2444 BUG_ON(iocb->ki_pos != pos); 2445 2446 mutex_lock(&inode->i_mutex); 2447 ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs, 2448 &iocb->ki_pos); 2449 mutex_unlock(&inode->i_mutex); 2450 2451 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { 2452 ssize_t err; 2453 2454 err = sync_page_range(inode, mapping, pos, ret); 2455 if (err < 0) 2456 ret = err; 2457 } 2458 return ret; 2459 } 2460 EXPORT_SYMBOL(generic_file_aio_write); 2461 2462 /* 2463 * Called under i_mutex for writes to S_ISREG files. Returns -EIO if something 2464 * went wrong during pagecache shootdown. 2465 */ 2466 static ssize_t 2467 generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, 2468 loff_t offset, unsigned long nr_segs) 2469 { 2470 struct file *file = iocb->ki_filp; 2471 struct address_space *mapping = file->f_mapping; 2472 ssize_t retval; 2473 size_t write_len; 2474 pgoff_t end = 0; /* silence gcc */ 2475 2476 /* 2477 * If it's a write, unmap all mmappings of the file up-front. This 2478 * will cause any pte dirty bits to be propagated into the pageframes 2479 * for the subsequent filemap_write_and_wait(). 2480 */ 2481 if (rw == WRITE) { 2482 write_len = iov_length(iov, nr_segs); 2483 end = (offset + write_len - 1) >> PAGE_CACHE_SHIFT; 2484 if (mapping_mapped(mapping)) 2485 unmap_mapping_range(mapping, offset, write_len, 0); 2486 } 2487 2488 retval = filemap_write_and_wait(mapping); 2489 if (retval) 2490 goto out; 2491 2492 /* 2493 * After a write we want buffered reads to be sure to go to disk to get 2494 * the new data. We invalidate clean cached page from the region we're 2495 * about to write. We do this *before* the write so that we can return 2496 * -EIO without clobbering -EIOCBQUEUED from ->direct_IO(). 2497 */ 2498 if (rw == WRITE && mapping->nrpages) { 2499 retval = invalidate_inode_pages2_range(mapping, 2500 offset >> PAGE_CACHE_SHIFT, end); 2501 if (retval) 2502 goto out; 2503 } 2504 2505 retval = mapping->a_ops->direct_IO(rw, iocb, iov, offset, nr_segs); 2506 if (retval) 2507 goto out; 2508 2509 /* 2510 * Finally, try again to invalidate clean pages which might have been 2511 * faulted in by get_user_pages() if the source of the write was an 2512 * mmap()ed region of the file we're writing. That's a pretty crazy 2513 * thing to do, so we don't support it 100%. If this invalidation 2514 * fails and we have -EIOCBQUEUED we ignore the failure. 2515 */ 2516 if (rw == WRITE && mapping->nrpages) { 2517 int err = invalidate_inode_pages2_range(mapping, 2518 offset >> PAGE_CACHE_SHIFT, end); 2519 if (err && retval >= 0) 2520 retval = err; 2521 } 2522 out: 2523 return retval; 2524 } 2525 2526 /** 2527 * try_to_release_page() - release old fs-specific metadata on a page 2528 * 2529 * @page: the page which the kernel is trying to free 2530 * @gfp_mask: memory allocation flags (and I/O mode) 2531 * 2532 * The address_space is to try to release any data against the page 2533 * (presumably at page->private). If the release was successful, return `1'. 2534 * Otherwise return zero. 2535 * 2536 * The @gfp_mask argument specifies whether I/O may be performed to release 2537 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT). 2538 * 2539 * NOTE: @gfp_mask may go away, and this function may become non-blocking. 2540 */ 2541 int try_to_release_page(struct page *page, gfp_t gfp_mask) 2542 { 2543 struct address_space * const mapping = page->mapping; 2544 2545 BUG_ON(!PageLocked(page)); 2546 if (PageWriteback(page)) 2547 return 0; 2548 2549 if (mapping && mapping->a_ops->releasepage) 2550 return mapping->a_ops->releasepage(page, gfp_mask); 2551 return try_to_free_buffers(page); 2552 } 2553 2554 EXPORT_SYMBOL(try_to_release_page); 2555