1 /* 2 * linux/mm/filemap.c 3 * 4 * Copyright (C) 1994-1999 Linus Torvalds 5 */ 6 7 /* 8 * This file handles the generic file mmap semantics used by 9 * most "normal" filesystems (but you don't /have/ to use this: 10 * the NFS filesystem used to do this differently, for example) 11 */ 12 #include <linux/config.h> 13 #include <linux/module.h> 14 #include <linux/slab.h> 15 #include <linux/compiler.h> 16 #include <linux/fs.h> 17 #include <linux/uaccess.h> 18 #include <linux/aio.h> 19 #include <linux/capability.h> 20 #include <linux/kernel_stat.h> 21 #include <linux/mm.h> 22 #include <linux/swap.h> 23 #include <linux/mman.h> 24 #include <linux/pagemap.h> 25 #include <linux/file.h> 26 #include <linux/uio.h> 27 #include <linux/hash.h> 28 #include <linux/writeback.h> 29 #include <linux/pagevec.h> 30 #include <linux/blkdev.h> 31 #include <linux/security.h> 32 #include <linux/syscalls.h> 33 #include <linux/cpuset.h> 34 #include "filemap.h" 35 #include "internal.h" 36 37 /* 38 * FIXME: remove all knowledge of the buffer layer from the core VM 39 */ 40 #include <linux/buffer_head.h> /* for generic_osync_inode */ 41 42 #include <asm/mman.h> 43 44 static ssize_t 45 generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, 46 loff_t offset, unsigned long nr_segs); 47 48 /* 49 * Shared mappings implemented 30.11.1994. It's not fully working yet, 50 * though. 51 * 52 * Shared mappings now work. 15.8.1995 Bruno. 53 * 54 * finished 'unifying' the page and buffer cache and SMP-threaded the 55 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> 56 * 57 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> 58 */ 59 60 /* 61 * Lock ordering: 62 * 63 * ->i_mmap_lock (vmtruncate) 64 * ->private_lock (__free_pte->__set_page_dirty_buffers) 65 * ->swap_lock (exclusive_swap_page, others) 66 * ->mapping->tree_lock 67 * 68 * ->i_mutex 69 * ->i_mmap_lock (truncate->unmap_mapping_range) 70 * 71 * ->mmap_sem 72 * ->i_mmap_lock 73 * ->page_table_lock or pte_lock (various, mainly in memory.c) 74 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) 75 * 76 * ->mmap_sem 77 * ->lock_page (access_process_vm) 78 * 79 * ->mmap_sem 80 * ->i_mutex (msync) 81 * 82 * ->i_mutex 83 * ->i_alloc_sem (various) 84 * 85 * ->inode_lock 86 * ->sb_lock (fs/fs-writeback.c) 87 * ->mapping->tree_lock (__sync_single_inode) 88 * 89 * ->i_mmap_lock 90 * ->anon_vma.lock (vma_adjust) 91 * 92 * ->anon_vma.lock 93 * ->page_table_lock or pte_lock (anon_vma_prepare and various) 94 * 95 * ->page_table_lock or pte_lock 96 * ->swap_lock (try_to_unmap_one) 97 * ->private_lock (try_to_unmap_one) 98 * ->tree_lock (try_to_unmap_one) 99 * ->zone.lru_lock (follow_page->mark_page_accessed) 100 * ->zone.lru_lock (check_pte_range->isolate_lru_page) 101 * ->private_lock (page_remove_rmap->set_page_dirty) 102 * ->tree_lock (page_remove_rmap->set_page_dirty) 103 * ->inode_lock (page_remove_rmap->set_page_dirty) 104 * ->inode_lock (zap_pte_range->set_page_dirty) 105 * ->private_lock (zap_pte_range->__set_page_dirty_buffers) 106 * 107 * ->task->proc_lock 108 * ->dcache_lock (proc_pid_lookup) 109 */ 110 111 /* 112 * Remove a page from the page cache and free it. Caller has to make 113 * sure the page is locked and that nobody else uses it - or that usage 114 * is safe. The caller must hold a write_lock on the mapping's tree_lock. 115 */ 116 void __remove_from_page_cache(struct page *page) 117 { 118 struct address_space *mapping = page->mapping; 119 120 radix_tree_delete(&mapping->page_tree, page->index); 121 page->mapping = NULL; 122 mapping->nrpages--; 123 __dec_zone_page_state(page, NR_FILE_PAGES); 124 } 125 126 void remove_from_page_cache(struct page *page) 127 { 128 struct address_space *mapping = page->mapping; 129 130 BUG_ON(!PageLocked(page)); 131 132 write_lock_irq(&mapping->tree_lock); 133 __remove_from_page_cache(page); 134 write_unlock_irq(&mapping->tree_lock); 135 } 136 137 static int sync_page(void *word) 138 { 139 struct address_space *mapping; 140 struct page *page; 141 142 page = container_of((unsigned long *)word, struct page, flags); 143 144 /* 145 * page_mapping() is being called without PG_locked held. 146 * Some knowledge of the state and use of the page is used to 147 * reduce the requirements down to a memory barrier. 148 * The danger here is of a stale page_mapping() return value 149 * indicating a struct address_space different from the one it's 150 * associated with when it is associated with one. 151 * After smp_mb(), it's either the correct page_mapping() for 152 * the page, or an old page_mapping() and the page's own 153 * page_mapping() has gone NULL. 154 * The ->sync_page() address_space operation must tolerate 155 * page_mapping() going NULL. By an amazing coincidence, 156 * this comes about because none of the users of the page 157 * in the ->sync_page() methods make essential use of the 158 * page_mapping(), merely passing the page down to the backing 159 * device's unplug functions when it's non-NULL, which in turn 160 * ignore it for all cases but swap, where only page_private(page) is 161 * of interest. When page_mapping() does go NULL, the entire 162 * call stack gracefully ignores the page and returns. 163 * -- wli 164 */ 165 smp_mb(); 166 mapping = page_mapping(page); 167 if (mapping && mapping->a_ops && mapping->a_ops->sync_page) 168 mapping->a_ops->sync_page(page); 169 io_schedule(); 170 return 0; 171 } 172 173 /** 174 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range 175 * @mapping: address space structure to write 176 * @start: offset in bytes where the range starts 177 * @end: offset in bytes where the range ends (inclusive) 178 * @sync_mode: enable synchronous operation 179 * 180 * Start writeback against all of a mapping's dirty pages that lie 181 * within the byte offsets <start, end> inclusive. 182 * 183 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as 184 * opposed to a regular memory cleansing writeback. The difference between 185 * these two operations is that if a dirty page/buffer is encountered, it must 186 * be waited upon, and not just skipped over. 187 */ 188 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 189 loff_t end, int sync_mode) 190 { 191 int ret; 192 struct writeback_control wbc = { 193 .sync_mode = sync_mode, 194 .nr_to_write = mapping->nrpages * 2, 195 .range_start = start, 196 .range_end = end, 197 }; 198 199 if (!mapping_cap_writeback_dirty(mapping)) 200 return 0; 201 202 ret = do_writepages(mapping, &wbc); 203 return ret; 204 } 205 206 static inline int __filemap_fdatawrite(struct address_space *mapping, 207 int sync_mode) 208 { 209 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); 210 } 211 212 int filemap_fdatawrite(struct address_space *mapping) 213 { 214 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); 215 } 216 EXPORT_SYMBOL(filemap_fdatawrite); 217 218 static int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 219 loff_t end) 220 { 221 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); 222 } 223 224 /** 225 * filemap_flush - mostly a non-blocking flush 226 * @mapping: target address_space 227 * 228 * This is a mostly non-blocking flush. Not suitable for data-integrity 229 * purposes - I/O may not be started against all dirty pages. 230 */ 231 int filemap_flush(struct address_space *mapping) 232 { 233 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); 234 } 235 EXPORT_SYMBOL(filemap_flush); 236 237 /** 238 * wait_on_page_writeback_range - wait for writeback to complete 239 * @mapping: target address_space 240 * @start: beginning page index 241 * @end: ending page index 242 * 243 * Wait for writeback to complete against pages indexed by start->end 244 * inclusive 245 */ 246 int wait_on_page_writeback_range(struct address_space *mapping, 247 pgoff_t start, pgoff_t end) 248 { 249 struct pagevec pvec; 250 int nr_pages; 251 int ret = 0; 252 pgoff_t index; 253 254 if (end < start) 255 return 0; 256 257 pagevec_init(&pvec, 0); 258 index = start; 259 while ((index <= end) && 260 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 261 PAGECACHE_TAG_WRITEBACK, 262 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { 263 unsigned i; 264 265 for (i = 0; i < nr_pages; i++) { 266 struct page *page = pvec.pages[i]; 267 268 /* until radix tree lookup accepts end_index */ 269 if (page->index > end) 270 continue; 271 272 wait_on_page_writeback(page); 273 if (PageError(page)) 274 ret = -EIO; 275 } 276 pagevec_release(&pvec); 277 cond_resched(); 278 } 279 280 /* Check for outstanding write errors */ 281 if (test_and_clear_bit(AS_ENOSPC, &mapping->flags)) 282 ret = -ENOSPC; 283 if (test_and_clear_bit(AS_EIO, &mapping->flags)) 284 ret = -EIO; 285 286 return ret; 287 } 288 289 /** 290 * sync_page_range - write and wait on all pages in the passed range 291 * @inode: target inode 292 * @mapping: target address_space 293 * @pos: beginning offset in pages to write 294 * @count: number of bytes to write 295 * 296 * Write and wait upon all the pages in the passed range. This is a "data 297 * integrity" operation. It waits upon in-flight writeout before starting and 298 * waiting upon new writeout. If there was an IO error, return it. 299 * 300 * We need to re-take i_mutex during the generic_osync_inode list walk because 301 * it is otherwise livelockable. 302 */ 303 int sync_page_range(struct inode *inode, struct address_space *mapping, 304 loff_t pos, loff_t count) 305 { 306 pgoff_t start = pos >> PAGE_CACHE_SHIFT; 307 pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT; 308 int ret; 309 310 if (!mapping_cap_writeback_dirty(mapping) || !count) 311 return 0; 312 ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1); 313 if (ret == 0) { 314 mutex_lock(&inode->i_mutex); 315 ret = generic_osync_inode(inode, mapping, OSYNC_METADATA); 316 mutex_unlock(&inode->i_mutex); 317 } 318 if (ret == 0) 319 ret = wait_on_page_writeback_range(mapping, start, end); 320 return ret; 321 } 322 EXPORT_SYMBOL(sync_page_range); 323 324 /** 325 * sync_page_range_nolock 326 * @inode: target inode 327 * @mapping: target address_space 328 * @pos: beginning offset in pages to write 329 * @count: number of bytes to write 330 * 331 * Note: Holding i_mutex across sync_page_range_nolock is not a good idea 332 * as it forces O_SYNC writers to different parts of the same file 333 * to be serialised right until io completion. 334 */ 335 int sync_page_range_nolock(struct inode *inode, struct address_space *mapping, 336 loff_t pos, loff_t count) 337 { 338 pgoff_t start = pos >> PAGE_CACHE_SHIFT; 339 pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT; 340 int ret; 341 342 if (!mapping_cap_writeback_dirty(mapping) || !count) 343 return 0; 344 ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1); 345 if (ret == 0) 346 ret = generic_osync_inode(inode, mapping, OSYNC_METADATA); 347 if (ret == 0) 348 ret = wait_on_page_writeback_range(mapping, start, end); 349 return ret; 350 } 351 EXPORT_SYMBOL(sync_page_range_nolock); 352 353 /** 354 * filemap_fdatawait - wait for all under-writeback pages to complete 355 * @mapping: address space structure to wait for 356 * 357 * Walk the list of under-writeback pages of the given address space 358 * and wait for all of them. 359 */ 360 int filemap_fdatawait(struct address_space *mapping) 361 { 362 loff_t i_size = i_size_read(mapping->host); 363 364 if (i_size == 0) 365 return 0; 366 367 return wait_on_page_writeback_range(mapping, 0, 368 (i_size - 1) >> PAGE_CACHE_SHIFT); 369 } 370 EXPORT_SYMBOL(filemap_fdatawait); 371 372 int filemap_write_and_wait(struct address_space *mapping) 373 { 374 int err = 0; 375 376 if (mapping->nrpages) { 377 err = filemap_fdatawrite(mapping); 378 /* 379 * Even if the above returned error, the pages may be 380 * written partially (e.g. -ENOSPC), so we wait for it. 381 * But the -EIO is special case, it may indicate the worst 382 * thing (e.g. bug) happened, so we avoid waiting for it. 383 */ 384 if (err != -EIO) { 385 int err2 = filemap_fdatawait(mapping); 386 if (!err) 387 err = err2; 388 } 389 } 390 return err; 391 } 392 EXPORT_SYMBOL(filemap_write_and_wait); 393 394 /** 395 * filemap_write_and_wait_range - write out & wait on a file range 396 * @mapping: the address_space for the pages 397 * @lstart: offset in bytes where the range starts 398 * @lend: offset in bytes where the range ends (inclusive) 399 * 400 * Write out and wait upon file offsets lstart->lend, inclusive. 401 * 402 * Note that `lend' is inclusive (describes the last byte to be written) so 403 * that this function can be used to write to the very end-of-file (end = -1). 404 */ 405 int filemap_write_and_wait_range(struct address_space *mapping, 406 loff_t lstart, loff_t lend) 407 { 408 int err = 0; 409 410 if (mapping->nrpages) { 411 err = __filemap_fdatawrite_range(mapping, lstart, lend, 412 WB_SYNC_ALL); 413 /* See comment of filemap_write_and_wait() */ 414 if (err != -EIO) { 415 int err2 = wait_on_page_writeback_range(mapping, 416 lstart >> PAGE_CACHE_SHIFT, 417 lend >> PAGE_CACHE_SHIFT); 418 if (!err) 419 err = err2; 420 } 421 } 422 return err; 423 } 424 425 /** 426 * add_to_page_cache - add newly allocated pagecache pages 427 * @page: page to add 428 * @mapping: the page's address_space 429 * @offset: page index 430 * @gfp_mask: page allocation mode 431 * 432 * This function is used to add newly allocated pagecache pages; 433 * the page is new, so we can just run SetPageLocked() against it. 434 * The other page state flags were set by rmqueue(). 435 * 436 * This function does not add the page to the LRU. The caller must do that. 437 */ 438 int add_to_page_cache(struct page *page, struct address_space *mapping, 439 pgoff_t offset, gfp_t gfp_mask) 440 { 441 int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); 442 443 if (error == 0) { 444 write_lock_irq(&mapping->tree_lock); 445 error = radix_tree_insert(&mapping->page_tree, offset, page); 446 if (!error) { 447 page_cache_get(page); 448 SetPageLocked(page); 449 page->mapping = mapping; 450 page->index = offset; 451 mapping->nrpages++; 452 __inc_zone_page_state(page, NR_FILE_PAGES); 453 } 454 write_unlock_irq(&mapping->tree_lock); 455 radix_tree_preload_end(); 456 } 457 return error; 458 } 459 EXPORT_SYMBOL(add_to_page_cache); 460 461 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 462 pgoff_t offset, gfp_t gfp_mask) 463 { 464 int ret = add_to_page_cache(page, mapping, offset, gfp_mask); 465 if (ret == 0) 466 lru_cache_add(page); 467 return ret; 468 } 469 470 #ifdef CONFIG_NUMA 471 struct page *page_cache_alloc(struct address_space *x) 472 { 473 if (cpuset_do_page_mem_spread()) { 474 int n = cpuset_mem_spread_node(); 475 return alloc_pages_node(n, mapping_gfp_mask(x), 0); 476 } 477 return alloc_pages(mapping_gfp_mask(x), 0); 478 } 479 EXPORT_SYMBOL(page_cache_alloc); 480 481 struct page *page_cache_alloc_cold(struct address_space *x) 482 { 483 if (cpuset_do_page_mem_spread()) { 484 int n = cpuset_mem_spread_node(); 485 return alloc_pages_node(n, mapping_gfp_mask(x)|__GFP_COLD, 0); 486 } 487 return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0); 488 } 489 EXPORT_SYMBOL(page_cache_alloc_cold); 490 #endif 491 492 /* 493 * In order to wait for pages to become available there must be 494 * waitqueues associated with pages. By using a hash table of 495 * waitqueues where the bucket discipline is to maintain all 496 * waiters on the same queue and wake all when any of the pages 497 * become available, and for the woken contexts to check to be 498 * sure the appropriate page became available, this saves space 499 * at a cost of "thundering herd" phenomena during rare hash 500 * collisions. 501 */ 502 static wait_queue_head_t *page_waitqueue(struct page *page) 503 { 504 const struct zone *zone = page_zone(page); 505 506 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; 507 } 508 509 static inline void wake_up_page(struct page *page, int bit) 510 { 511 __wake_up_bit(page_waitqueue(page), &page->flags, bit); 512 } 513 514 void fastcall wait_on_page_bit(struct page *page, int bit_nr) 515 { 516 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 517 518 if (test_bit(bit_nr, &page->flags)) 519 __wait_on_bit(page_waitqueue(page), &wait, sync_page, 520 TASK_UNINTERRUPTIBLE); 521 } 522 EXPORT_SYMBOL(wait_on_page_bit); 523 524 /** 525 * unlock_page - unlock a locked page 526 * @page: the page 527 * 528 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). 529 * Also wakes sleepers in wait_on_page_writeback() because the wakeup 530 * mechananism between PageLocked pages and PageWriteback pages is shared. 531 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. 532 * 533 * The first mb is necessary to safely close the critical section opened by the 534 * TestSetPageLocked(), the second mb is necessary to enforce ordering between 535 * the clear_bit and the read of the waitqueue (to avoid SMP races with a 536 * parallel wait_on_page_locked()). 537 */ 538 void fastcall unlock_page(struct page *page) 539 { 540 smp_mb__before_clear_bit(); 541 if (!TestClearPageLocked(page)) 542 BUG(); 543 smp_mb__after_clear_bit(); 544 wake_up_page(page, PG_locked); 545 } 546 EXPORT_SYMBOL(unlock_page); 547 548 /** 549 * end_page_writeback - end writeback against a page 550 * @page: the page 551 */ 552 void end_page_writeback(struct page *page) 553 { 554 if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) { 555 if (!test_clear_page_writeback(page)) 556 BUG(); 557 } 558 smp_mb__after_clear_bit(); 559 wake_up_page(page, PG_writeback); 560 } 561 EXPORT_SYMBOL(end_page_writeback); 562 563 /** 564 * __lock_page - get a lock on the page, assuming we need to sleep to get it 565 * @page: the page to lock 566 * 567 * Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some 568 * random driver's requestfn sets TASK_RUNNING, we could busywait. However 569 * chances are that on the second loop, the block layer's plug list is empty, 570 * so sync_page() will then return in state TASK_UNINTERRUPTIBLE. 571 */ 572 void fastcall __lock_page(struct page *page) 573 { 574 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 575 576 __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page, 577 TASK_UNINTERRUPTIBLE); 578 } 579 EXPORT_SYMBOL(__lock_page); 580 581 /** 582 * find_get_page - find and get a page reference 583 * @mapping: the address_space to search 584 * @offset: the page index 585 * 586 * A rather lightweight function, finding and getting a reference to a 587 * hashed page atomically. 588 */ 589 struct page * find_get_page(struct address_space *mapping, unsigned long offset) 590 { 591 struct page *page; 592 593 read_lock_irq(&mapping->tree_lock); 594 page = radix_tree_lookup(&mapping->page_tree, offset); 595 if (page) 596 page_cache_get(page); 597 read_unlock_irq(&mapping->tree_lock); 598 return page; 599 } 600 EXPORT_SYMBOL(find_get_page); 601 602 /** 603 * find_trylock_page - find and lock a page 604 * @mapping: the address_space to search 605 * @offset: the page index 606 * 607 * Same as find_get_page(), but trylock it instead of incrementing the count. 608 */ 609 struct page *find_trylock_page(struct address_space *mapping, unsigned long offset) 610 { 611 struct page *page; 612 613 read_lock_irq(&mapping->tree_lock); 614 page = radix_tree_lookup(&mapping->page_tree, offset); 615 if (page && TestSetPageLocked(page)) 616 page = NULL; 617 read_unlock_irq(&mapping->tree_lock); 618 return page; 619 } 620 EXPORT_SYMBOL(find_trylock_page); 621 622 /** 623 * find_lock_page - locate, pin and lock a pagecache page 624 * @mapping: the address_space to search 625 * @offset: the page index 626 * 627 * Locates the desired pagecache page, locks it, increments its reference 628 * count and returns its address. 629 * 630 * Returns zero if the page was not present. find_lock_page() may sleep. 631 */ 632 struct page *find_lock_page(struct address_space *mapping, 633 unsigned long offset) 634 { 635 struct page *page; 636 637 read_lock_irq(&mapping->tree_lock); 638 repeat: 639 page = radix_tree_lookup(&mapping->page_tree, offset); 640 if (page) { 641 page_cache_get(page); 642 if (TestSetPageLocked(page)) { 643 read_unlock_irq(&mapping->tree_lock); 644 __lock_page(page); 645 read_lock_irq(&mapping->tree_lock); 646 647 /* Has the page been truncated while we slept? */ 648 if (unlikely(page->mapping != mapping || 649 page->index != offset)) { 650 unlock_page(page); 651 page_cache_release(page); 652 goto repeat; 653 } 654 } 655 } 656 read_unlock_irq(&mapping->tree_lock); 657 return page; 658 } 659 EXPORT_SYMBOL(find_lock_page); 660 661 /** 662 * find_or_create_page - locate or add a pagecache page 663 * @mapping: the page's address_space 664 * @index: the page's index into the mapping 665 * @gfp_mask: page allocation mode 666 * 667 * Locates a page in the pagecache. If the page is not present, a new page 668 * is allocated using @gfp_mask and is added to the pagecache and to the VM's 669 * LRU list. The returned page is locked and has its reference count 670 * incremented. 671 * 672 * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic 673 * allocation! 674 * 675 * find_or_create_page() returns the desired page's address, or zero on 676 * memory exhaustion. 677 */ 678 struct page *find_or_create_page(struct address_space *mapping, 679 unsigned long index, gfp_t gfp_mask) 680 { 681 struct page *page, *cached_page = NULL; 682 int err; 683 repeat: 684 page = find_lock_page(mapping, index); 685 if (!page) { 686 if (!cached_page) { 687 cached_page = alloc_page(gfp_mask); 688 if (!cached_page) 689 return NULL; 690 } 691 err = add_to_page_cache_lru(cached_page, mapping, 692 index, gfp_mask); 693 if (!err) { 694 page = cached_page; 695 cached_page = NULL; 696 } else if (err == -EEXIST) 697 goto repeat; 698 } 699 if (cached_page) 700 page_cache_release(cached_page); 701 return page; 702 } 703 EXPORT_SYMBOL(find_or_create_page); 704 705 /** 706 * find_get_pages - gang pagecache lookup 707 * @mapping: The address_space to search 708 * @start: The starting page index 709 * @nr_pages: The maximum number of pages 710 * @pages: Where the resulting pages are placed 711 * 712 * find_get_pages() will search for and return a group of up to 713 * @nr_pages pages in the mapping. The pages are placed at @pages. 714 * find_get_pages() takes a reference against the returned pages. 715 * 716 * The search returns a group of mapping-contiguous pages with ascending 717 * indexes. There may be holes in the indices due to not-present pages. 718 * 719 * find_get_pages() returns the number of pages which were found. 720 */ 721 unsigned find_get_pages(struct address_space *mapping, pgoff_t start, 722 unsigned int nr_pages, struct page **pages) 723 { 724 unsigned int i; 725 unsigned int ret; 726 727 read_lock_irq(&mapping->tree_lock); 728 ret = radix_tree_gang_lookup(&mapping->page_tree, 729 (void **)pages, start, nr_pages); 730 for (i = 0; i < ret; i++) 731 page_cache_get(pages[i]); 732 read_unlock_irq(&mapping->tree_lock); 733 return ret; 734 } 735 736 /** 737 * find_get_pages_contig - gang contiguous pagecache lookup 738 * @mapping: The address_space to search 739 * @index: The starting page index 740 * @nr_pages: The maximum number of pages 741 * @pages: Where the resulting pages are placed 742 * 743 * find_get_pages_contig() works exactly like find_get_pages(), except 744 * that the returned number of pages are guaranteed to be contiguous. 745 * 746 * find_get_pages_contig() returns the number of pages which were found. 747 */ 748 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, 749 unsigned int nr_pages, struct page **pages) 750 { 751 unsigned int i; 752 unsigned int ret; 753 754 read_lock_irq(&mapping->tree_lock); 755 ret = radix_tree_gang_lookup(&mapping->page_tree, 756 (void **)pages, index, nr_pages); 757 for (i = 0; i < ret; i++) { 758 if (pages[i]->mapping == NULL || pages[i]->index != index) 759 break; 760 761 page_cache_get(pages[i]); 762 index++; 763 } 764 read_unlock_irq(&mapping->tree_lock); 765 return i; 766 } 767 768 /** 769 * find_get_pages_tag - find and return pages that match @tag 770 * @mapping: the address_space to search 771 * @index: the starting page index 772 * @tag: the tag index 773 * @nr_pages: the maximum number of pages 774 * @pages: where the resulting pages are placed 775 * 776 * Like find_get_pages, except we only return pages which are tagged with 777 * @tag. We update @index to index the next page for the traversal. 778 */ 779 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, 780 int tag, unsigned int nr_pages, struct page **pages) 781 { 782 unsigned int i; 783 unsigned int ret; 784 785 read_lock_irq(&mapping->tree_lock); 786 ret = radix_tree_gang_lookup_tag(&mapping->page_tree, 787 (void **)pages, *index, nr_pages, tag); 788 for (i = 0; i < ret; i++) 789 page_cache_get(pages[i]); 790 if (ret) 791 *index = pages[ret - 1]->index + 1; 792 read_unlock_irq(&mapping->tree_lock); 793 return ret; 794 } 795 796 /** 797 * grab_cache_page_nowait - returns locked page at given index in given cache 798 * @mapping: target address_space 799 * @index: the page index 800 * 801 * Same as grab_cache_page, but do not wait if the page is unavailable. 802 * This is intended for speculative data generators, where the data can 803 * be regenerated if the page couldn't be grabbed. This routine should 804 * be safe to call while holding the lock for another page. 805 * 806 * Clear __GFP_FS when allocating the page to avoid recursion into the fs 807 * and deadlock against the caller's locked page. 808 */ 809 struct page * 810 grab_cache_page_nowait(struct address_space *mapping, unsigned long index) 811 { 812 struct page *page = find_get_page(mapping, index); 813 gfp_t gfp_mask; 814 815 if (page) { 816 if (!TestSetPageLocked(page)) 817 return page; 818 page_cache_release(page); 819 return NULL; 820 } 821 gfp_mask = mapping_gfp_mask(mapping) & ~__GFP_FS; 822 page = alloc_pages(gfp_mask, 0); 823 if (page && add_to_page_cache_lru(page, mapping, index, gfp_mask)) { 824 page_cache_release(page); 825 page = NULL; 826 } 827 return page; 828 } 829 EXPORT_SYMBOL(grab_cache_page_nowait); 830 831 /* 832 * CD/DVDs are error prone. When a medium error occurs, the driver may fail 833 * a _large_ part of the i/o request. Imagine the worst scenario: 834 * 835 * ---R__________________________________________B__________ 836 * ^ reading here ^ bad block(assume 4k) 837 * 838 * read(R) => miss => readahead(R...B) => media error => frustrating retries 839 * => failing the whole request => read(R) => read(R+1) => 840 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => 841 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => 842 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... 843 * 844 * It is going insane. Fix it by quickly scaling down the readahead size. 845 */ 846 static void shrink_readahead_size_eio(struct file *filp, 847 struct file_ra_state *ra) 848 { 849 if (!ra->ra_pages) 850 return; 851 852 ra->ra_pages /= 4; 853 printk(KERN_WARNING "Reducing readahead size to %luK\n", 854 ra->ra_pages << (PAGE_CACHE_SHIFT - 10)); 855 } 856 857 /** 858 * do_generic_mapping_read - generic file read routine 859 * @mapping: address_space to be read 860 * @_ra: file's readahead state 861 * @filp: the file to read 862 * @ppos: current file position 863 * @desc: read_descriptor 864 * @actor: read method 865 * 866 * This is a generic file read routine, and uses the 867 * mapping->a_ops->readpage() function for the actual low-level stuff. 868 * 869 * This is really ugly. But the goto's actually try to clarify some 870 * of the logic when it comes to error handling etc. 871 * 872 * Note the struct file* is only passed for the use of readpage. 873 * It may be NULL. 874 */ 875 void do_generic_mapping_read(struct address_space *mapping, 876 struct file_ra_state *_ra, 877 struct file *filp, 878 loff_t *ppos, 879 read_descriptor_t *desc, 880 read_actor_t actor) 881 { 882 struct inode *inode = mapping->host; 883 unsigned long index; 884 unsigned long end_index; 885 unsigned long offset; 886 unsigned long last_index; 887 unsigned long next_index; 888 unsigned long prev_index; 889 loff_t isize; 890 struct page *cached_page; 891 int error; 892 struct file_ra_state ra = *_ra; 893 894 cached_page = NULL; 895 index = *ppos >> PAGE_CACHE_SHIFT; 896 next_index = index; 897 prev_index = ra.prev_page; 898 last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; 899 offset = *ppos & ~PAGE_CACHE_MASK; 900 901 isize = i_size_read(inode); 902 if (!isize) 903 goto out; 904 905 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 906 for (;;) { 907 struct page *page; 908 unsigned long nr, ret; 909 910 /* nr is the maximum number of bytes to copy from this page */ 911 nr = PAGE_CACHE_SIZE; 912 if (index >= end_index) { 913 if (index > end_index) 914 goto out; 915 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 916 if (nr <= offset) { 917 goto out; 918 } 919 } 920 nr = nr - offset; 921 922 cond_resched(); 923 if (index == next_index) 924 next_index = page_cache_readahead(mapping, &ra, filp, 925 index, last_index - index); 926 927 find_page: 928 page = find_get_page(mapping, index); 929 if (unlikely(page == NULL)) { 930 handle_ra_miss(mapping, &ra, index); 931 goto no_cached_page; 932 } 933 if (!PageUptodate(page)) 934 goto page_not_up_to_date; 935 page_ok: 936 937 /* If users can be writing to this page using arbitrary 938 * virtual addresses, take care about potential aliasing 939 * before reading the page on the kernel side. 940 */ 941 if (mapping_writably_mapped(mapping)) 942 flush_dcache_page(page); 943 944 /* 945 * When (part of) the same page is read multiple times 946 * in succession, only mark it as accessed the first time. 947 */ 948 if (prev_index != index) 949 mark_page_accessed(page); 950 prev_index = index; 951 952 /* 953 * Ok, we have the page, and it's up-to-date, so 954 * now we can copy it to user space... 955 * 956 * The actor routine returns how many bytes were actually used.. 957 * NOTE! This may not be the same as how much of a user buffer 958 * we filled up (we may be padding etc), so we can only update 959 * "pos" here (the actor routine has to update the user buffer 960 * pointers and the remaining count). 961 */ 962 ret = actor(desc, page, offset, nr); 963 offset += ret; 964 index += offset >> PAGE_CACHE_SHIFT; 965 offset &= ~PAGE_CACHE_MASK; 966 967 page_cache_release(page); 968 if (ret == nr && desc->count) 969 continue; 970 goto out; 971 972 page_not_up_to_date: 973 /* Get exclusive access to the page ... */ 974 lock_page(page); 975 976 /* Did it get unhashed before we got the lock? */ 977 if (!page->mapping) { 978 unlock_page(page); 979 page_cache_release(page); 980 continue; 981 } 982 983 /* Did somebody else fill it already? */ 984 if (PageUptodate(page)) { 985 unlock_page(page); 986 goto page_ok; 987 } 988 989 readpage: 990 /* Start the actual read. The read will unlock the page. */ 991 error = mapping->a_ops->readpage(filp, page); 992 993 if (unlikely(error)) { 994 if (error == AOP_TRUNCATED_PAGE) { 995 page_cache_release(page); 996 goto find_page; 997 } 998 goto readpage_error; 999 } 1000 1001 if (!PageUptodate(page)) { 1002 lock_page(page); 1003 if (!PageUptodate(page)) { 1004 if (page->mapping == NULL) { 1005 /* 1006 * invalidate_inode_pages got it 1007 */ 1008 unlock_page(page); 1009 page_cache_release(page); 1010 goto find_page; 1011 } 1012 unlock_page(page); 1013 error = -EIO; 1014 shrink_readahead_size_eio(filp, &ra); 1015 goto readpage_error; 1016 } 1017 unlock_page(page); 1018 } 1019 1020 /* 1021 * i_size must be checked after we have done ->readpage. 1022 * 1023 * Checking i_size after the readpage allows us to calculate 1024 * the correct value for "nr", which means the zero-filled 1025 * part of the page is not copied back to userspace (unless 1026 * another truncate extends the file - this is desired though). 1027 */ 1028 isize = i_size_read(inode); 1029 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1030 if (unlikely(!isize || index > end_index)) { 1031 page_cache_release(page); 1032 goto out; 1033 } 1034 1035 /* nr is the maximum number of bytes to copy from this page */ 1036 nr = PAGE_CACHE_SIZE; 1037 if (index == end_index) { 1038 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1039 if (nr <= offset) { 1040 page_cache_release(page); 1041 goto out; 1042 } 1043 } 1044 nr = nr - offset; 1045 goto page_ok; 1046 1047 readpage_error: 1048 /* UHHUH! A synchronous read error occurred. Report it */ 1049 desc->error = error; 1050 page_cache_release(page); 1051 goto out; 1052 1053 no_cached_page: 1054 /* 1055 * Ok, it wasn't cached, so we need to create a new 1056 * page.. 1057 */ 1058 if (!cached_page) { 1059 cached_page = page_cache_alloc_cold(mapping); 1060 if (!cached_page) { 1061 desc->error = -ENOMEM; 1062 goto out; 1063 } 1064 } 1065 error = add_to_page_cache_lru(cached_page, mapping, 1066 index, GFP_KERNEL); 1067 if (error) { 1068 if (error == -EEXIST) 1069 goto find_page; 1070 desc->error = error; 1071 goto out; 1072 } 1073 page = cached_page; 1074 cached_page = NULL; 1075 goto readpage; 1076 } 1077 1078 out: 1079 *_ra = ra; 1080 1081 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 1082 if (cached_page) 1083 page_cache_release(cached_page); 1084 if (filp) 1085 file_accessed(filp); 1086 } 1087 EXPORT_SYMBOL(do_generic_mapping_read); 1088 1089 int file_read_actor(read_descriptor_t *desc, struct page *page, 1090 unsigned long offset, unsigned long size) 1091 { 1092 char *kaddr; 1093 unsigned long left, count = desc->count; 1094 1095 if (size > count) 1096 size = count; 1097 1098 /* 1099 * Faults on the destination of a read are common, so do it before 1100 * taking the kmap. 1101 */ 1102 if (!fault_in_pages_writeable(desc->arg.buf, size)) { 1103 kaddr = kmap_atomic(page, KM_USER0); 1104 left = __copy_to_user_inatomic(desc->arg.buf, 1105 kaddr + offset, size); 1106 kunmap_atomic(kaddr, KM_USER0); 1107 if (left == 0) 1108 goto success; 1109 } 1110 1111 /* Do it the slow way */ 1112 kaddr = kmap(page); 1113 left = __copy_to_user(desc->arg.buf, kaddr + offset, size); 1114 kunmap(page); 1115 1116 if (left) { 1117 size -= left; 1118 desc->error = -EFAULT; 1119 } 1120 success: 1121 desc->count = count - size; 1122 desc->written += size; 1123 desc->arg.buf += size; 1124 return size; 1125 } 1126 1127 /** 1128 * __generic_file_aio_read - generic filesystem read routine 1129 * @iocb: kernel I/O control block 1130 * @iov: io vector request 1131 * @nr_segs: number of segments in the iovec 1132 * @ppos: current file position 1133 * 1134 * This is the "read()" routine for all filesystems 1135 * that can use the page cache directly. 1136 */ 1137 ssize_t 1138 __generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, 1139 unsigned long nr_segs, loff_t *ppos) 1140 { 1141 struct file *filp = iocb->ki_filp; 1142 ssize_t retval; 1143 unsigned long seg; 1144 size_t count; 1145 1146 count = 0; 1147 for (seg = 0; seg < nr_segs; seg++) { 1148 const struct iovec *iv = &iov[seg]; 1149 1150 /* 1151 * If any segment has a negative length, or the cumulative 1152 * length ever wraps negative then return -EINVAL. 1153 */ 1154 count += iv->iov_len; 1155 if (unlikely((ssize_t)(count|iv->iov_len) < 0)) 1156 return -EINVAL; 1157 if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len)) 1158 continue; 1159 if (seg == 0) 1160 return -EFAULT; 1161 nr_segs = seg; 1162 count -= iv->iov_len; /* This segment is no good */ 1163 break; 1164 } 1165 1166 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ 1167 if (filp->f_flags & O_DIRECT) { 1168 loff_t pos = *ppos, size; 1169 struct address_space *mapping; 1170 struct inode *inode; 1171 1172 mapping = filp->f_mapping; 1173 inode = mapping->host; 1174 retval = 0; 1175 if (!count) 1176 goto out; /* skip atime */ 1177 size = i_size_read(inode); 1178 if (pos < size) { 1179 retval = generic_file_direct_IO(READ, iocb, 1180 iov, pos, nr_segs); 1181 if (retval > 0 && !is_sync_kiocb(iocb)) 1182 retval = -EIOCBQUEUED; 1183 if (retval > 0) 1184 *ppos = pos + retval; 1185 } 1186 file_accessed(filp); 1187 goto out; 1188 } 1189 1190 retval = 0; 1191 if (count) { 1192 for (seg = 0; seg < nr_segs; seg++) { 1193 read_descriptor_t desc; 1194 1195 desc.written = 0; 1196 desc.arg.buf = iov[seg].iov_base; 1197 desc.count = iov[seg].iov_len; 1198 if (desc.count == 0) 1199 continue; 1200 desc.error = 0; 1201 do_generic_file_read(filp,ppos,&desc,file_read_actor); 1202 retval += desc.written; 1203 if (desc.error) { 1204 retval = retval ?: desc.error; 1205 break; 1206 } 1207 } 1208 } 1209 out: 1210 return retval; 1211 } 1212 EXPORT_SYMBOL(__generic_file_aio_read); 1213 1214 ssize_t 1215 generic_file_aio_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos) 1216 { 1217 struct iovec local_iov = { .iov_base = buf, .iov_len = count }; 1218 1219 BUG_ON(iocb->ki_pos != pos); 1220 return __generic_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos); 1221 } 1222 EXPORT_SYMBOL(generic_file_aio_read); 1223 1224 ssize_t 1225 generic_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) 1226 { 1227 struct iovec local_iov = { .iov_base = buf, .iov_len = count }; 1228 struct kiocb kiocb; 1229 ssize_t ret; 1230 1231 init_sync_kiocb(&kiocb, filp); 1232 ret = __generic_file_aio_read(&kiocb, &local_iov, 1, ppos); 1233 if (-EIOCBQUEUED == ret) 1234 ret = wait_on_sync_kiocb(&kiocb); 1235 return ret; 1236 } 1237 EXPORT_SYMBOL(generic_file_read); 1238 1239 int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size) 1240 { 1241 ssize_t written; 1242 unsigned long count = desc->count; 1243 struct file *file = desc->arg.data; 1244 1245 if (size > count) 1246 size = count; 1247 1248 written = file->f_op->sendpage(file, page, offset, 1249 size, &file->f_pos, size<count); 1250 if (written < 0) { 1251 desc->error = written; 1252 written = 0; 1253 } 1254 desc->count = count - written; 1255 desc->written += written; 1256 return written; 1257 } 1258 1259 ssize_t generic_file_sendfile(struct file *in_file, loff_t *ppos, 1260 size_t count, read_actor_t actor, void *target) 1261 { 1262 read_descriptor_t desc; 1263 1264 if (!count) 1265 return 0; 1266 1267 desc.written = 0; 1268 desc.count = count; 1269 desc.arg.data = target; 1270 desc.error = 0; 1271 1272 do_generic_file_read(in_file, ppos, &desc, actor); 1273 if (desc.written) 1274 return desc.written; 1275 return desc.error; 1276 } 1277 EXPORT_SYMBOL(generic_file_sendfile); 1278 1279 static ssize_t 1280 do_readahead(struct address_space *mapping, struct file *filp, 1281 unsigned long index, unsigned long nr) 1282 { 1283 if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage) 1284 return -EINVAL; 1285 1286 force_page_cache_readahead(mapping, filp, index, 1287 max_sane_readahead(nr)); 1288 return 0; 1289 } 1290 1291 asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count) 1292 { 1293 ssize_t ret; 1294 struct file *file; 1295 1296 ret = -EBADF; 1297 file = fget(fd); 1298 if (file) { 1299 if (file->f_mode & FMODE_READ) { 1300 struct address_space *mapping = file->f_mapping; 1301 unsigned long start = offset >> PAGE_CACHE_SHIFT; 1302 unsigned long end = (offset + count - 1) >> PAGE_CACHE_SHIFT; 1303 unsigned long len = end - start + 1; 1304 ret = do_readahead(mapping, file, start, len); 1305 } 1306 fput(file); 1307 } 1308 return ret; 1309 } 1310 1311 #ifdef CONFIG_MMU 1312 static int FASTCALL(page_cache_read(struct file * file, unsigned long offset)); 1313 /** 1314 * page_cache_read - adds requested page to the page cache if not already there 1315 * @file: file to read 1316 * @offset: page index 1317 * 1318 * This adds the requested page to the page cache if it isn't already there, 1319 * and schedules an I/O to read in its contents from disk. 1320 */ 1321 static int fastcall page_cache_read(struct file * file, unsigned long offset) 1322 { 1323 struct address_space *mapping = file->f_mapping; 1324 struct page *page; 1325 int ret; 1326 1327 do { 1328 page = page_cache_alloc_cold(mapping); 1329 if (!page) 1330 return -ENOMEM; 1331 1332 ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL); 1333 if (ret == 0) 1334 ret = mapping->a_ops->readpage(file, page); 1335 else if (ret == -EEXIST) 1336 ret = 0; /* losing race to add is OK */ 1337 1338 page_cache_release(page); 1339 1340 } while (ret == AOP_TRUNCATED_PAGE); 1341 1342 return ret; 1343 } 1344 1345 #define MMAP_LOTSAMISS (100) 1346 1347 /** 1348 * filemap_nopage - read in file data for page fault handling 1349 * @area: the applicable vm_area 1350 * @address: target address to read in 1351 * @type: returned with VM_FAULT_{MINOR,MAJOR} if not %NULL 1352 * 1353 * filemap_nopage() is invoked via the vma operations vector for a 1354 * mapped memory region to read in file data during a page fault. 1355 * 1356 * The goto's are kind of ugly, but this streamlines the normal case of having 1357 * it in the page cache, and handles the special cases reasonably without 1358 * having a lot of duplicated code. 1359 */ 1360 struct page *filemap_nopage(struct vm_area_struct *area, 1361 unsigned long address, int *type) 1362 { 1363 int error; 1364 struct file *file = area->vm_file; 1365 struct address_space *mapping = file->f_mapping; 1366 struct file_ra_state *ra = &file->f_ra; 1367 struct inode *inode = mapping->host; 1368 struct page *page; 1369 unsigned long size, pgoff; 1370 int did_readaround = 0, majmin = VM_FAULT_MINOR; 1371 1372 pgoff = ((address-area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff; 1373 1374 retry_all: 1375 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1376 if (pgoff >= size) 1377 goto outside_data_content; 1378 1379 /* If we don't want any read-ahead, don't bother */ 1380 if (VM_RandomReadHint(area)) 1381 goto no_cached_page; 1382 1383 /* 1384 * The readahead code wants to be told about each and every page 1385 * so it can build and shrink its windows appropriately 1386 * 1387 * For sequential accesses, we use the generic readahead logic. 1388 */ 1389 if (VM_SequentialReadHint(area)) 1390 page_cache_readahead(mapping, ra, file, pgoff, 1); 1391 1392 /* 1393 * Do we have something in the page cache already? 1394 */ 1395 retry_find: 1396 page = find_get_page(mapping, pgoff); 1397 if (!page) { 1398 unsigned long ra_pages; 1399 1400 if (VM_SequentialReadHint(area)) { 1401 handle_ra_miss(mapping, ra, pgoff); 1402 goto no_cached_page; 1403 } 1404 ra->mmap_miss++; 1405 1406 /* 1407 * Do we miss much more than hit in this file? If so, 1408 * stop bothering with read-ahead. It will only hurt. 1409 */ 1410 if (ra->mmap_miss > ra->mmap_hit + MMAP_LOTSAMISS) 1411 goto no_cached_page; 1412 1413 /* 1414 * To keep the pgmajfault counter straight, we need to 1415 * check did_readaround, as this is an inner loop. 1416 */ 1417 if (!did_readaround) { 1418 majmin = VM_FAULT_MAJOR; 1419 count_vm_event(PGMAJFAULT); 1420 } 1421 did_readaround = 1; 1422 ra_pages = max_sane_readahead(file->f_ra.ra_pages); 1423 if (ra_pages) { 1424 pgoff_t start = 0; 1425 1426 if (pgoff > ra_pages / 2) 1427 start = pgoff - ra_pages / 2; 1428 do_page_cache_readahead(mapping, file, start, ra_pages); 1429 } 1430 page = find_get_page(mapping, pgoff); 1431 if (!page) 1432 goto no_cached_page; 1433 } 1434 1435 if (!did_readaround) 1436 ra->mmap_hit++; 1437 1438 /* 1439 * Ok, found a page in the page cache, now we need to check 1440 * that it's up-to-date. 1441 */ 1442 if (!PageUptodate(page)) 1443 goto page_not_uptodate; 1444 1445 success: 1446 /* 1447 * Found the page and have a reference on it. 1448 */ 1449 mark_page_accessed(page); 1450 if (type) 1451 *type = majmin; 1452 return page; 1453 1454 outside_data_content: 1455 /* 1456 * An external ptracer can access pages that normally aren't 1457 * accessible.. 1458 */ 1459 if (area->vm_mm == current->mm) 1460 return NULL; 1461 /* Fall through to the non-read-ahead case */ 1462 no_cached_page: 1463 /* 1464 * We're only likely to ever get here if MADV_RANDOM is in 1465 * effect. 1466 */ 1467 error = page_cache_read(file, pgoff); 1468 grab_swap_token(); 1469 1470 /* 1471 * The page we want has now been added to the page cache. 1472 * In the unlikely event that someone removed it in the 1473 * meantime, we'll just come back here and read it again. 1474 */ 1475 if (error >= 0) 1476 goto retry_find; 1477 1478 /* 1479 * An error return from page_cache_read can result if the 1480 * system is low on memory, or a problem occurs while trying 1481 * to schedule I/O. 1482 */ 1483 if (error == -ENOMEM) 1484 return NOPAGE_OOM; 1485 return NULL; 1486 1487 page_not_uptodate: 1488 if (!did_readaround) { 1489 majmin = VM_FAULT_MAJOR; 1490 count_vm_event(PGMAJFAULT); 1491 } 1492 lock_page(page); 1493 1494 /* Did it get unhashed while we waited for it? */ 1495 if (!page->mapping) { 1496 unlock_page(page); 1497 page_cache_release(page); 1498 goto retry_all; 1499 } 1500 1501 /* Did somebody else get it up-to-date? */ 1502 if (PageUptodate(page)) { 1503 unlock_page(page); 1504 goto success; 1505 } 1506 1507 error = mapping->a_ops->readpage(file, page); 1508 if (!error) { 1509 wait_on_page_locked(page); 1510 if (PageUptodate(page)) 1511 goto success; 1512 } else if (error == AOP_TRUNCATED_PAGE) { 1513 page_cache_release(page); 1514 goto retry_find; 1515 } 1516 1517 /* 1518 * Umm, take care of errors if the page isn't up-to-date. 1519 * Try to re-read it _once_. We do this synchronously, 1520 * because there really aren't any performance issues here 1521 * and we need to check for errors. 1522 */ 1523 lock_page(page); 1524 1525 /* Somebody truncated the page on us? */ 1526 if (!page->mapping) { 1527 unlock_page(page); 1528 page_cache_release(page); 1529 goto retry_all; 1530 } 1531 1532 /* Somebody else successfully read it in? */ 1533 if (PageUptodate(page)) { 1534 unlock_page(page); 1535 goto success; 1536 } 1537 ClearPageError(page); 1538 error = mapping->a_ops->readpage(file, page); 1539 if (!error) { 1540 wait_on_page_locked(page); 1541 if (PageUptodate(page)) 1542 goto success; 1543 } else if (error == AOP_TRUNCATED_PAGE) { 1544 page_cache_release(page); 1545 goto retry_find; 1546 } 1547 1548 /* 1549 * Things didn't work out. Return zero to tell the 1550 * mm layer so, possibly freeing the page cache page first. 1551 */ 1552 shrink_readahead_size_eio(file, ra); 1553 page_cache_release(page); 1554 return NULL; 1555 } 1556 EXPORT_SYMBOL(filemap_nopage); 1557 1558 static struct page * filemap_getpage(struct file *file, unsigned long pgoff, 1559 int nonblock) 1560 { 1561 struct address_space *mapping = file->f_mapping; 1562 struct page *page; 1563 int error; 1564 1565 /* 1566 * Do we have something in the page cache already? 1567 */ 1568 retry_find: 1569 page = find_get_page(mapping, pgoff); 1570 if (!page) { 1571 if (nonblock) 1572 return NULL; 1573 goto no_cached_page; 1574 } 1575 1576 /* 1577 * Ok, found a page in the page cache, now we need to check 1578 * that it's up-to-date. 1579 */ 1580 if (!PageUptodate(page)) { 1581 if (nonblock) { 1582 page_cache_release(page); 1583 return NULL; 1584 } 1585 goto page_not_uptodate; 1586 } 1587 1588 success: 1589 /* 1590 * Found the page and have a reference on it. 1591 */ 1592 mark_page_accessed(page); 1593 return page; 1594 1595 no_cached_page: 1596 error = page_cache_read(file, pgoff); 1597 1598 /* 1599 * The page we want has now been added to the page cache. 1600 * In the unlikely event that someone removed it in the 1601 * meantime, we'll just come back here and read it again. 1602 */ 1603 if (error >= 0) 1604 goto retry_find; 1605 1606 /* 1607 * An error return from page_cache_read can result if the 1608 * system is low on memory, or a problem occurs while trying 1609 * to schedule I/O. 1610 */ 1611 return NULL; 1612 1613 page_not_uptodate: 1614 lock_page(page); 1615 1616 /* Did it get unhashed while we waited for it? */ 1617 if (!page->mapping) { 1618 unlock_page(page); 1619 goto err; 1620 } 1621 1622 /* Did somebody else get it up-to-date? */ 1623 if (PageUptodate(page)) { 1624 unlock_page(page); 1625 goto success; 1626 } 1627 1628 error = mapping->a_ops->readpage(file, page); 1629 if (!error) { 1630 wait_on_page_locked(page); 1631 if (PageUptodate(page)) 1632 goto success; 1633 } else if (error == AOP_TRUNCATED_PAGE) { 1634 page_cache_release(page); 1635 goto retry_find; 1636 } 1637 1638 /* 1639 * Umm, take care of errors if the page isn't up-to-date. 1640 * Try to re-read it _once_. We do this synchronously, 1641 * because there really aren't any performance issues here 1642 * and we need to check for errors. 1643 */ 1644 lock_page(page); 1645 1646 /* Somebody truncated the page on us? */ 1647 if (!page->mapping) { 1648 unlock_page(page); 1649 goto err; 1650 } 1651 /* Somebody else successfully read it in? */ 1652 if (PageUptodate(page)) { 1653 unlock_page(page); 1654 goto success; 1655 } 1656 1657 ClearPageError(page); 1658 error = mapping->a_ops->readpage(file, page); 1659 if (!error) { 1660 wait_on_page_locked(page); 1661 if (PageUptodate(page)) 1662 goto success; 1663 } else if (error == AOP_TRUNCATED_PAGE) { 1664 page_cache_release(page); 1665 goto retry_find; 1666 } 1667 1668 /* 1669 * Things didn't work out. Return zero to tell the 1670 * mm layer so, possibly freeing the page cache page first. 1671 */ 1672 err: 1673 page_cache_release(page); 1674 1675 return NULL; 1676 } 1677 1678 int filemap_populate(struct vm_area_struct *vma, unsigned long addr, 1679 unsigned long len, pgprot_t prot, unsigned long pgoff, 1680 int nonblock) 1681 { 1682 struct file *file = vma->vm_file; 1683 struct address_space *mapping = file->f_mapping; 1684 struct inode *inode = mapping->host; 1685 unsigned long size; 1686 struct mm_struct *mm = vma->vm_mm; 1687 struct page *page; 1688 int err; 1689 1690 if (!nonblock) 1691 force_page_cache_readahead(mapping, vma->vm_file, 1692 pgoff, len >> PAGE_CACHE_SHIFT); 1693 1694 repeat: 1695 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1696 if (pgoff + (len >> PAGE_CACHE_SHIFT) > size) 1697 return -EINVAL; 1698 1699 page = filemap_getpage(file, pgoff, nonblock); 1700 1701 /* XXX: This is wrong, a filesystem I/O error may have happened. Fix that as 1702 * done in shmem_populate calling shmem_getpage */ 1703 if (!page && !nonblock) 1704 return -ENOMEM; 1705 1706 if (page) { 1707 err = install_page(mm, vma, addr, page, prot); 1708 if (err) { 1709 page_cache_release(page); 1710 return err; 1711 } 1712 } else if (vma->vm_flags & VM_NONLINEAR) { 1713 /* No page was found just because we can't read it in now (being 1714 * here implies nonblock != 0), but the page may exist, so set 1715 * the PTE to fault it in later. */ 1716 err = install_file_pte(mm, vma, addr, pgoff, prot); 1717 if (err) 1718 return err; 1719 } 1720 1721 len -= PAGE_SIZE; 1722 addr += PAGE_SIZE; 1723 pgoff++; 1724 if (len) 1725 goto repeat; 1726 1727 return 0; 1728 } 1729 EXPORT_SYMBOL(filemap_populate); 1730 1731 struct vm_operations_struct generic_file_vm_ops = { 1732 .nopage = filemap_nopage, 1733 .populate = filemap_populate, 1734 }; 1735 1736 /* This is used for a general mmap of a disk file */ 1737 1738 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 1739 { 1740 struct address_space *mapping = file->f_mapping; 1741 1742 if (!mapping->a_ops->readpage) 1743 return -ENOEXEC; 1744 file_accessed(file); 1745 vma->vm_ops = &generic_file_vm_ops; 1746 return 0; 1747 } 1748 1749 /* 1750 * This is for filesystems which do not implement ->writepage. 1751 */ 1752 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) 1753 { 1754 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) 1755 return -EINVAL; 1756 return generic_file_mmap(file, vma); 1757 } 1758 #else 1759 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 1760 { 1761 return -ENOSYS; 1762 } 1763 int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) 1764 { 1765 return -ENOSYS; 1766 } 1767 #endif /* CONFIG_MMU */ 1768 1769 EXPORT_SYMBOL(generic_file_mmap); 1770 EXPORT_SYMBOL(generic_file_readonly_mmap); 1771 1772 static inline struct page *__read_cache_page(struct address_space *mapping, 1773 unsigned long index, 1774 int (*filler)(void *,struct page*), 1775 void *data) 1776 { 1777 struct page *page, *cached_page = NULL; 1778 int err; 1779 repeat: 1780 page = find_get_page(mapping, index); 1781 if (!page) { 1782 if (!cached_page) { 1783 cached_page = page_cache_alloc_cold(mapping); 1784 if (!cached_page) 1785 return ERR_PTR(-ENOMEM); 1786 } 1787 err = add_to_page_cache_lru(cached_page, mapping, 1788 index, GFP_KERNEL); 1789 if (err == -EEXIST) 1790 goto repeat; 1791 if (err < 0) { 1792 /* Presumably ENOMEM for radix tree node */ 1793 page_cache_release(cached_page); 1794 return ERR_PTR(err); 1795 } 1796 page = cached_page; 1797 cached_page = NULL; 1798 err = filler(data, page); 1799 if (err < 0) { 1800 page_cache_release(page); 1801 page = ERR_PTR(err); 1802 } 1803 } 1804 if (cached_page) 1805 page_cache_release(cached_page); 1806 return page; 1807 } 1808 1809 /** 1810 * read_cache_page - read into page cache, fill it if needed 1811 * @mapping: the page's address_space 1812 * @index: the page index 1813 * @filler: function to perform the read 1814 * @data: destination for read data 1815 * 1816 * Read into the page cache. If a page already exists, 1817 * and PageUptodate() is not set, try to fill the page. 1818 */ 1819 struct page *read_cache_page(struct address_space *mapping, 1820 unsigned long index, 1821 int (*filler)(void *,struct page*), 1822 void *data) 1823 { 1824 struct page *page; 1825 int err; 1826 1827 retry: 1828 page = __read_cache_page(mapping, index, filler, data); 1829 if (IS_ERR(page)) 1830 goto out; 1831 mark_page_accessed(page); 1832 if (PageUptodate(page)) 1833 goto out; 1834 1835 lock_page(page); 1836 if (!page->mapping) { 1837 unlock_page(page); 1838 page_cache_release(page); 1839 goto retry; 1840 } 1841 if (PageUptodate(page)) { 1842 unlock_page(page); 1843 goto out; 1844 } 1845 err = filler(data, page); 1846 if (err < 0) { 1847 page_cache_release(page); 1848 page = ERR_PTR(err); 1849 } 1850 out: 1851 return page; 1852 } 1853 EXPORT_SYMBOL(read_cache_page); 1854 1855 /* 1856 * If the page was newly created, increment its refcount and add it to the 1857 * caller's lru-buffering pagevec. This function is specifically for 1858 * generic_file_write(). 1859 */ 1860 static inline struct page * 1861 __grab_cache_page(struct address_space *mapping, unsigned long index, 1862 struct page **cached_page, struct pagevec *lru_pvec) 1863 { 1864 int err; 1865 struct page *page; 1866 repeat: 1867 page = find_lock_page(mapping, index); 1868 if (!page) { 1869 if (!*cached_page) { 1870 *cached_page = page_cache_alloc(mapping); 1871 if (!*cached_page) 1872 return NULL; 1873 } 1874 err = add_to_page_cache(*cached_page, mapping, 1875 index, GFP_KERNEL); 1876 if (err == -EEXIST) 1877 goto repeat; 1878 if (err == 0) { 1879 page = *cached_page; 1880 page_cache_get(page); 1881 if (!pagevec_add(lru_pvec, page)) 1882 __pagevec_lru_add(lru_pvec); 1883 *cached_page = NULL; 1884 } 1885 } 1886 return page; 1887 } 1888 1889 /* 1890 * The logic we want is 1891 * 1892 * if suid or (sgid and xgrp) 1893 * remove privs 1894 */ 1895 int remove_suid(struct dentry *dentry) 1896 { 1897 mode_t mode = dentry->d_inode->i_mode; 1898 int kill = 0; 1899 int result = 0; 1900 1901 /* suid always must be killed */ 1902 if (unlikely(mode & S_ISUID)) 1903 kill = ATTR_KILL_SUID; 1904 1905 /* 1906 * sgid without any exec bits is just a mandatory locking mark; leave 1907 * it alone. If some exec bits are set, it's a real sgid; kill it. 1908 */ 1909 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) 1910 kill |= ATTR_KILL_SGID; 1911 1912 if (unlikely(kill && !capable(CAP_FSETID))) { 1913 struct iattr newattrs; 1914 1915 newattrs.ia_valid = ATTR_FORCE | kill; 1916 result = notify_change(dentry, &newattrs); 1917 } 1918 return result; 1919 } 1920 EXPORT_SYMBOL(remove_suid); 1921 1922 size_t 1923 __filemap_copy_from_user_iovec_inatomic(char *vaddr, 1924 const struct iovec *iov, size_t base, size_t bytes) 1925 { 1926 size_t copied = 0, left = 0; 1927 1928 while (bytes) { 1929 char __user *buf = iov->iov_base + base; 1930 int copy = min(bytes, iov->iov_len - base); 1931 1932 base = 0; 1933 left = __copy_from_user_inatomic_nocache(vaddr, buf, copy); 1934 copied += copy; 1935 bytes -= copy; 1936 vaddr += copy; 1937 iov++; 1938 1939 if (unlikely(left)) 1940 break; 1941 } 1942 return copied - left; 1943 } 1944 1945 /* 1946 * Performs necessary checks before doing a write 1947 * 1948 * Can adjust writing position or amount of bytes to write. 1949 * Returns appropriate error code that caller should return or 1950 * zero in case that write should be allowed. 1951 */ 1952 inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk) 1953 { 1954 struct inode *inode = file->f_mapping->host; 1955 unsigned long limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; 1956 1957 if (unlikely(*pos < 0)) 1958 return -EINVAL; 1959 1960 if (!isblk) { 1961 /* FIXME: this is for backwards compatibility with 2.4 */ 1962 if (file->f_flags & O_APPEND) 1963 *pos = i_size_read(inode); 1964 1965 if (limit != RLIM_INFINITY) { 1966 if (*pos >= limit) { 1967 send_sig(SIGXFSZ, current, 0); 1968 return -EFBIG; 1969 } 1970 if (*count > limit - (typeof(limit))*pos) { 1971 *count = limit - (typeof(limit))*pos; 1972 } 1973 } 1974 } 1975 1976 /* 1977 * LFS rule 1978 */ 1979 if (unlikely(*pos + *count > MAX_NON_LFS && 1980 !(file->f_flags & O_LARGEFILE))) { 1981 if (*pos >= MAX_NON_LFS) { 1982 send_sig(SIGXFSZ, current, 0); 1983 return -EFBIG; 1984 } 1985 if (*count > MAX_NON_LFS - (unsigned long)*pos) { 1986 *count = MAX_NON_LFS - (unsigned long)*pos; 1987 } 1988 } 1989 1990 /* 1991 * Are we about to exceed the fs block limit ? 1992 * 1993 * If we have written data it becomes a short write. If we have 1994 * exceeded without writing data we send a signal and return EFBIG. 1995 * Linus frestrict idea will clean these up nicely.. 1996 */ 1997 if (likely(!isblk)) { 1998 if (unlikely(*pos >= inode->i_sb->s_maxbytes)) { 1999 if (*count || *pos > inode->i_sb->s_maxbytes) { 2000 send_sig(SIGXFSZ, current, 0); 2001 return -EFBIG; 2002 } 2003 /* zero-length writes at ->s_maxbytes are OK */ 2004 } 2005 2006 if (unlikely(*pos + *count > inode->i_sb->s_maxbytes)) 2007 *count = inode->i_sb->s_maxbytes - *pos; 2008 } else { 2009 loff_t isize; 2010 if (bdev_read_only(I_BDEV(inode))) 2011 return -EPERM; 2012 isize = i_size_read(inode); 2013 if (*pos >= isize) { 2014 if (*count || *pos > isize) 2015 return -ENOSPC; 2016 } 2017 2018 if (*pos + *count > isize) 2019 *count = isize - *pos; 2020 } 2021 return 0; 2022 } 2023 EXPORT_SYMBOL(generic_write_checks); 2024 2025 ssize_t 2026 generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov, 2027 unsigned long *nr_segs, loff_t pos, loff_t *ppos, 2028 size_t count, size_t ocount) 2029 { 2030 struct file *file = iocb->ki_filp; 2031 struct address_space *mapping = file->f_mapping; 2032 struct inode *inode = mapping->host; 2033 ssize_t written; 2034 2035 if (count != ocount) 2036 *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count); 2037 2038 written = generic_file_direct_IO(WRITE, iocb, iov, pos, *nr_segs); 2039 if (written > 0) { 2040 loff_t end = pos + written; 2041 if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { 2042 i_size_write(inode, end); 2043 mark_inode_dirty(inode); 2044 } 2045 *ppos = end; 2046 } 2047 2048 /* 2049 * Sync the fs metadata but not the minor inode changes and 2050 * of course not the data as we did direct DMA for the IO. 2051 * i_mutex is held, which protects generic_osync_inode() from 2052 * livelocking. 2053 */ 2054 if (written >= 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { 2055 int err = generic_osync_inode(inode, mapping, OSYNC_METADATA); 2056 if (err < 0) 2057 written = err; 2058 } 2059 if (written == count && !is_sync_kiocb(iocb)) 2060 written = -EIOCBQUEUED; 2061 return written; 2062 } 2063 EXPORT_SYMBOL(generic_file_direct_write); 2064 2065 ssize_t 2066 generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, 2067 unsigned long nr_segs, loff_t pos, loff_t *ppos, 2068 size_t count, ssize_t written) 2069 { 2070 struct file *file = iocb->ki_filp; 2071 struct address_space * mapping = file->f_mapping; 2072 const struct address_space_operations *a_ops = mapping->a_ops; 2073 struct inode *inode = mapping->host; 2074 long status = 0; 2075 struct page *page; 2076 struct page *cached_page = NULL; 2077 size_t bytes; 2078 struct pagevec lru_pvec; 2079 const struct iovec *cur_iov = iov; /* current iovec */ 2080 size_t iov_base = 0; /* offset in the current iovec */ 2081 char __user *buf; 2082 2083 pagevec_init(&lru_pvec, 0); 2084 2085 /* 2086 * handle partial DIO write. Adjust cur_iov if needed. 2087 */ 2088 if (likely(nr_segs == 1)) 2089 buf = iov->iov_base + written; 2090 else { 2091 filemap_set_next_iovec(&cur_iov, &iov_base, written); 2092 buf = cur_iov->iov_base + iov_base; 2093 } 2094 2095 do { 2096 unsigned long index; 2097 unsigned long offset; 2098 size_t copied; 2099 2100 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ 2101 index = pos >> PAGE_CACHE_SHIFT; 2102 bytes = PAGE_CACHE_SIZE - offset; 2103 2104 /* Limit the size of the copy to the caller's write size */ 2105 bytes = min(bytes, count); 2106 2107 /* 2108 * Limit the size of the copy to that of the current segment, 2109 * because fault_in_pages_readable() doesn't know how to walk 2110 * segments. 2111 */ 2112 bytes = min(bytes, cur_iov->iov_len - iov_base); 2113 2114 /* 2115 * Bring in the user page that we will copy from _first_. 2116 * Otherwise there's a nasty deadlock on copying from the 2117 * same page as we're writing to, without it being marked 2118 * up-to-date. 2119 */ 2120 fault_in_pages_readable(buf, bytes); 2121 2122 page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec); 2123 if (!page) { 2124 status = -ENOMEM; 2125 break; 2126 } 2127 2128 if (unlikely(bytes == 0)) { 2129 status = 0; 2130 copied = 0; 2131 goto zero_length_segment; 2132 } 2133 2134 status = a_ops->prepare_write(file, page, offset, offset+bytes); 2135 if (unlikely(status)) { 2136 loff_t isize = i_size_read(inode); 2137 2138 if (status != AOP_TRUNCATED_PAGE) 2139 unlock_page(page); 2140 page_cache_release(page); 2141 if (status == AOP_TRUNCATED_PAGE) 2142 continue; 2143 /* 2144 * prepare_write() may have instantiated a few blocks 2145 * outside i_size. Trim these off again. 2146 */ 2147 if (pos + bytes > isize) 2148 vmtruncate(inode, isize); 2149 break; 2150 } 2151 if (likely(nr_segs == 1)) 2152 copied = filemap_copy_from_user(page, offset, 2153 buf, bytes); 2154 else 2155 copied = filemap_copy_from_user_iovec(page, offset, 2156 cur_iov, iov_base, bytes); 2157 flush_dcache_page(page); 2158 status = a_ops->commit_write(file, page, offset, offset+bytes); 2159 if (status == AOP_TRUNCATED_PAGE) { 2160 page_cache_release(page); 2161 continue; 2162 } 2163 zero_length_segment: 2164 if (likely(copied >= 0)) { 2165 if (!status) 2166 status = copied; 2167 2168 if (status >= 0) { 2169 written += status; 2170 count -= status; 2171 pos += status; 2172 buf += status; 2173 if (unlikely(nr_segs > 1)) { 2174 filemap_set_next_iovec(&cur_iov, 2175 &iov_base, status); 2176 if (count) 2177 buf = cur_iov->iov_base + 2178 iov_base; 2179 } else { 2180 iov_base += status; 2181 } 2182 } 2183 } 2184 if (unlikely(copied != bytes)) 2185 if (status >= 0) 2186 status = -EFAULT; 2187 unlock_page(page); 2188 mark_page_accessed(page); 2189 page_cache_release(page); 2190 if (status < 0) 2191 break; 2192 balance_dirty_pages_ratelimited(mapping); 2193 cond_resched(); 2194 } while (count); 2195 *ppos = pos; 2196 2197 if (cached_page) 2198 page_cache_release(cached_page); 2199 2200 /* 2201 * For now, when the user asks for O_SYNC, we'll actually give O_DSYNC 2202 */ 2203 if (likely(status >= 0)) { 2204 if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) { 2205 if (!a_ops->writepage || !is_sync_kiocb(iocb)) 2206 status = generic_osync_inode(inode, mapping, 2207 OSYNC_METADATA|OSYNC_DATA); 2208 } 2209 } 2210 2211 /* 2212 * If we get here for O_DIRECT writes then we must have fallen through 2213 * to buffered writes (block instantiation inside i_size). So we sync 2214 * the file data here, to try to honour O_DIRECT expectations. 2215 */ 2216 if (unlikely(file->f_flags & O_DIRECT) && written) 2217 status = filemap_write_and_wait(mapping); 2218 2219 pagevec_lru_add(&lru_pvec); 2220 return written ? written : status; 2221 } 2222 EXPORT_SYMBOL(generic_file_buffered_write); 2223 2224 static ssize_t 2225 __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, 2226 unsigned long nr_segs, loff_t *ppos) 2227 { 2228 struct file *file = iocb->ki_filp; 2229 const struct address_space * mapping = file->f_mapping; 2230 size_t ocount; /* original count */ 2231 size_t count; /* after file limit checks */ 2232 struct inode *inode = mapping->host; 2233 unsigned long seg; 2234 loff_t pos; 2235 ssize_t written; 2236 ssize_t err; 2237 2238 ocount = 0; 2239 for (seg = 0; seg < nr_segs; seg++) { 2240 const struct iovec *iv = &iov[seg]; 2241 2242 /* 2243 * If any segment has a negative length, or the cumulative 2244 * length ever wraps negative then return -EINVAL. 2245 */ 2246 ocount += iv->iov_len; 2247 if (unlikely((ssize_t)(ocount|iv->iov_len) < 0)) 2248 return -EINVAL; 2249 if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len)) 2250 continue; 2251 if (seg == 0) 2252 return -EFAULT; 2253 nr_segs = seg; 2254 ocount -= iv->iov_len; /* This segment is no good */ 2255 break; 2256 } 2257 2258 count = ocount; 2259 pos = *ppos; 2260 2261 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 2262 2263 /* We can write back this queue in page reclaim */ 2264 current->backing_dev_info = mapping->backing_dev_info; 2265 written = 0; 2266 2267 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 2268 if (err) 2269 goto out; 2270 2271 if (count == 0) 2272 goto out; 2273 2274 err = remove_suid(file->f_dentry); 2275 if (err) 2276 goto out; 2277 2278 file_update_time(file); 2279 2280 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ 2281 if (unlikely(file->f_flags & O_DIRECT)) { 2282 written = generic_file_direct_write(iocb, iov, 2283 &nr_segs, pos, ppos, count, ocount); 2284 if (written < 0 || written == count) 2285 goto out; 2286 /* 2287 * direct-io write to a hole: fall through to buffered I/O 2288 * for completing the rest of the request. 2289 */ 2290 pos += written; 2291 count -= written; 2292 } 2293 2294 written = generic_file_buffered_write(iocb, iov, nr_segs, 2295 pos, ppos, count, written); 2296 out: 2297 current->backing_dev_info = NULL; 2298 return written ? written : err; 2299 } 2300 EXPORT_SYMBOL(generic_file_aio_write_nolock); 2301 2302 ssize_t 2303 generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, 2304 unsigned long nr_segs, loff_t *ppos) 2305 { 2306 struct file *file = iocb->ki_filp; 2307 struct address_space *mapping = file->f_mapping; 2308 struct inode *inode = mapping->host; 2309 ssize_t ret; 2310 loff_t pos = *ppos; 2311 2312 ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs, ppos); 2313 2314 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { 2315 int err; 2316 2317 err = sync_page_range_nolock(inode, mapping, pos, ret); 2318 if (err < 0) 2319 ret = err; 2320 } 2321 return ret; 2322 } 2323 2324 static ssize_t 2325 __generic_file_write_nolock(struct file *file, const struct iovec *iov, 2326 unsigned long nr_segs, loff_t *ppos) 2327 { 2328 struct kiocb kiocb; 2329 ssize_t ret; 2330 2331 init_sync_kiocb(&kiocb, file); 2332 ret = __generic_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos); 2333 if (ret == -EIOCBQUEUED) 2334 ret = wait_on_sync_kiocb(&kiocb); 2335 return ret; 2336 } 2337 2338 ssize_t 2339 generic_file_write_nolock(struct file *file, const struct iovec *iov, 2340 unsigned long nr_segs, loff_t *ppos) 2341 { 2342 struct kiocb kiocb; 2343 ssize_t ret; 2344 2345 init_sync_kiocb(&kiocb, file); 2346 ret = generic_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos); 2347 if (-EIOCBQUEUED == ret) 2348 ret = wait_on_sync_kiocb(&kiocb); 2349 return ret; 2350 } 2351 EXPORT_SYMBOL(generic_file_write_nolock); 2352 2353 ssize_t generic_file_aio_write(struct kiocb *iocb, const char __user *buf, 2354 size_t count, loff_t pos) 2355 { 2356 struct file *file = iocb->ki_filp; 2357 struct address_space *mapping = file->f_mapping; 2358 struct inode *inode = mapping->host; 2359 ssize_t ret; 2360 struct iovec local_iov = { .iov_base = (void __user *)buf, 2361 .iov_len = count }; 2362 2363 BUG_ON(iocb->ki_pos != pos); 2364 2365 mutex_lock(&inode->i_mutex); 2366 ret = __generic_file_aio_write_nolock(iocb, &local_iov, 1, 2367 &iocb->ki_pos); 2368 mutex_unlock(&inode->i_mutex); 2369 2370 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { 2371 ssize_t err; 2372 2373 err = sync_page_range(inode, mapping, pos, ret); 2374 if (err < 0) 2375 ret = err; 2376 } 2377 return ret; 2378 } 2379 EXPORT_SYMBOL(generic_file_aio_write); 2380 2381 ssize_t generic_file_write(struct file *file, const char __user *buf, 2382 size_t count, loff_t *ppos) 2383 { 2384 struct address_space *mapping = file->f_mapping; 2385 struct inode *inode = mapping->host; 2386 ssize_t ret; 2387 struct iovec local_iov = { .iov_base = (void __user *)buf, 2388 .iov_len = count }; 2389 2390 mutex_lock(&inode->i_mutex); 2391 ret = __generic_file_write_nolock(file, &local_iov, 1, ppos); 2392 mutex_unlock(&inode->i_mutex); 2393 2394 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { 2395 ssize_t err; 2396 2397 err = sync_page_range(inode, mapping, *ppos - ret, ret); 2398 if (err < 0) 2399 ret = err; 2400 } 2401 return ret; 2402 } 2403 EXPORT_SYMBOL(generic_file_write); 2404 2405 ssize_t generic_file_readv(struct file *filp, const struct iovec *iov, 2406 unsigned long nr_segs, loff_t *ppos) 2407 { 2408 struct kiocb kiocb; 2409 ssize_t ret; 2410 2411 init_sync_kiocb(&kiocb, filp); 2412 ret = __generic_file_aio_read(&kiocb, iov, nr_segs, ppos); 2413 if (-EIOCBQUEUED == ret) 2414 ret = wait_on_sync_kiocb(&kiocb); 2415 return ret; 2416 } 2417 EXPORT_SYMBOL(generic_file_readv); 2418 2419 ssize_t generic_file_writev(struct file *file, const struct iovec *iov, 2420 unsigned long nr_segs, loff_t *ppos) 2421 { 2422 struct address_space *mapping = file->f_mapping; 2423 struct inode *inode = mapping->host; 2424 ssize_t ret; 2425 2426 mutex_lock(&inode->i_mutex); 2427 ret = __generic_file_write_nolock(file, iov, nr_segs, ppos); 2428 mutex_unlock(&inode->i_mutex); 2429 2430 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { 2431 int err; 2432 2433 err = sync_page_range(inode, mapping, *ppos - ret, ret); 2434 if (err < 0) 2435 ret = err; 2436 } 2437 return ret; 2438 } 2439 EXPORT_SYMBOL(generic_file_writev); 2440 2441 /* 2442 * Called under i_mutex for writes to S_ISREG files. Returns -EIO if something 2443 * went wrong during pagecache shootdown. 2444 */ 2445 static ssize_t 2446 generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, 2447 loff_t offset, unsigned long nr_segs) 2448 { 2449 struct file *file = iocb->ki_filp; 2450 struct address_space *mapping = file->f_mapping; 2451 ssize_t retval; 2452 size_t write_len = 0; 2453 2454 /* 2455 * If it's a write, unmap all mmappings of the file up-front. This 2456 * will cause any pte dirty bits to be propagated into the pageframes 2457 * for the subsequent filemap_write_and_wait(). 2458 */ 2459 if (rw == WRITE) { 2460 write_len = iov_length(iov, nr_segs); 2461 if (mapping_mapped(mapping)) 2462 unmap_mapping_range(mapping, offset, write_len, 0); 2463 } 2464 2465 retval = filemap_write_and_wait(mapping); 2466 if (retval == 0) { 2467 retval = mapping->a_ops->direct_IO(rw, iocb, iov, 2468 offset, nr_segs); 2469 if (rw == WRITE && mapping->nrpages) { 2470 pgoff_t end = (offset + write_len - 1) 2471 >> PAGE_CACHE_SHIFT; 2472 int err = invalidate_inode_pages2_range(mapping, 2473 offset >> PAGE_CACHE_SHIFT, end); 2474 if (err) 2475 retval = err; 2476 } 2477 } 2478 return retval; 2479 } 2480