1 /* 2 * linux/mm/filemap.c 3 * 4 * Copyright (C) 1994-1999 Linus Torvalds 5 */ 6 7 /* 8 * This file handles the generic file mmap semantics used by 9 * most "normal" filesystems (but you don't /have/ to use this: 10 * the NFS filesystem used to do this differently, for example) 11 */ 12 #include <linux/config.h> 13 #include <linux/module.h> 14 #include <linux/slab.h> 15 #include <linux/compiler.h> 16 #include <linux/fs.h> 17 #include <linux/aio.h> 18 #include <linux/capability.h> 19 #include <linux/kernel_stat.h> 20 #include <linux/mm.h> 21 #include <linux/swap.h> 22 #include <linux/mman.h> 23 #include <linux/pagemap.h> 24 #include <linux/file.h> 25 #include <linux/uio.h> 26 #include <linux/hash.h> 27 #include <linux/writeback.h> 28 #include <linux/pagevec.h> 29 #include <linux/blkdev.h> 30 #include <linux/security.h> 31 #include <linux/syscalls.h> 32 #include <linux/cpuset.h> 33 #include "filemap.h" 34 #include "internal.h" 35 36 /* 37 * FIXME: remove all knowledge of the buffer layer from the core VM 38 */ 39 #include <linux/buffer_head.h> /* for generic_osync_inode */ 40 41 #include <asm/uaccess.h> 42 #include <asm/mman.h> 43 44 static ssize_t 45 generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, 46 loff_t offset, unsigned long nr_segs); 47 48 /* 49 * Shared mappings implemented 30.11.1994. It's not fully working yet, 50 * though. 51 * 52 * Shared mappings now work. 15.8.1995 Bruno. 53 * 54 * finished 'unifying' the page and buffer cache and SMP-threaded the 55 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> 56 * 57 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> 58 */ 59 60 /* 61 * Lock ordering: 62 * 63 * ->i_mmap_lock (vmtruncate) 64 * ->private_lock (__free_pte->__set_page_dirty_buffers) 65 * ->swap_lock (exclusive_swap_page, others) 66 * ->mapping->tree_lock 67 * 68 * ->i_mutex 69 * ->i_mmap_lock (truncate->unmap_mapping_range) 70 * 71 * ->mmap_sem 72 * ->i_mmap_lock 73 * ->page_table_lock or pte_lock (various, mainly in memory.c) 74 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) 75 * 76 * ->mmap_sem 77 * ->lock_page (access_process_vm) 78 * 79 * ->mmap_sem 80 * ->i_mutex (msync) 81 * 82 * ->i_mutex 83 * ->i_alloc_sem (various) 84 * 85 * ->inode_lock 86 * ->sb_lock (fs/fs-writeback.c) 87 * ->mapping->tree_lock (__sync_single_inode) 88 * 89 * ->i_mmap_lock 90 * ->anon_vma.lock (vma_adjust) 91 * 92 * ->anon_vma.lock 93 * ->page_table_lock or pte_lock (anon_vma_prepare and various) 94 * 95 * ->page_table_lock or pte_lock 96 * ->swap_lock (try_to_unmap_one) 97 * ->private_lock (try_to_unmap_one) 98 * ->tree_lock (try_to_unmap_one) 99 * ->zone.lru_lock (follow_page->mark_page_accessed) 100 * ->zone.lru_lock (check_pte_range->isolate_lru_page) 101 * ->private_lock (page_remove_rmap->set_page_dirty) 102 * ->tree_lock (page_remove_rmap->set_page_dirty) 103 * ->inode_lock (page_remove_rmap->set_page_dirty) 104 * ->inode_lock (zap_pte_range->set_page_dirty) 105 * ->private_lock (zap_pte_range->__set_page_dirty_buffers) 106 * 107 * ->task->proc_lock 108 * ->dcache_lock (proc_pid_lookup) 109 */ 110 111 /* 112 * Remove a page from the page cache and free it. Caller has to make 113 * sure the page is locked and that nobody else uses it - or that usage 114 * is safe. The caller must hold a write_lock on the mapping's tree_lock. 115 */ 116 void __remove_from_page_cache(struct page *page) 117 { 118 struct address_space *mapping = page->mapping; 119 120 radix_tree_delete(&mapping->page_tree, page->index); 121 page->mapping = NULL; 122 mapping->nrpages--; 123 pagecache_acct(-1); 124 } 125 126 void remove_from_page_cache(struct page *page) 127 { 128 struct address_space *mapping = page->mapping; 129 130 BUG_ON(!PageLocked(page)); 131 132 write_lock_irq(&mapping->tree_lock); 133 __remove_from_page_cache(page); 134 write_unlock_irq(&mapping->tree_lock); 135 } 136 137 static int sync_page(void *word) 138 { 139 struct address_space *mapping; 140 struct page *page; 141 142 page = container_of((unsigned long *)word, struct page, flags); 143 144 /* 145 * page_mapping() is being called without PG_locked held. 146 * Some knowledge of the state and use of the page is used to 147 * reduce the requirements down to a memory barrier. 148 * The danger here is of a stale page_mapping() return value 149 * indicating a struct address_space different from the one it's 150 * associated with when it is associated with one. 151 * After smp_mb(), it's either the correct page_mapping() for 152 * the page, or an old page_mapping() and the page's own 153 * page_mapping() has gone NULL. 154 * The ->sync_page() address_space operation must tolerate 155 * page_mapping() going NULL. By an amazing coincidence, 156 * this comes about because none of the users of the page 157 * in the ->sync_page() methods make essential use of the 158 * page_mapping(), merely passing the page down to the backing 159 * device's unplug functions when it's non-NULL, which in turn 160 * ignore it for all cases but swap, where only page_private(page) is 161 * of interest. When page_mapping() does go NULL, the entire 162 * call stack gracefully ignores the page and returns. 163 * -- wli 164 */ 165 smp_mb(); 166 mapping = page_mapping(page); 167 if (mapping && mapping->a_ops && mapping->a_ops->sync_page) 168 mapping->a_ops->sync_page(page); 169 io_schedule(); 170 return 0; 171 } 172 173 /** 174 * filemap_fdatawrite_range - start writeback against all of a mapping's 175 * dirty pages that lie within the byte offsets <start, end> 176 * @mapping: address space structure to write 177 * @start: offset in bytes where the range starts 178 * @end: offset in bytes where the range ends (inclusive) 179 * @sync_mode: enable synchronous operation 180 * 181 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as 182 * opposed to a regular memory * cleansing writeback. The difference between 183 * these two operations is that if a dirty page/buffer is encountered, it must 184 * be waited upon, and not just skipped over. 185 */ 186 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 187 loff_t end, int sync_mode) 188 { 189 int ret; 190 struct writeback_control wbc = { 191 .sync_mode = sync_mode, 192 .nr_to_write = mapping->nrpages * 2, 193 .start = start, 194 .end = end, 195 }; 196 197 if (!mapping_cap_writeback_dirty(mapping)) 198 return 0; 199 200 ret = do_writepages(mapping, &wbc); 201 return ret; 202 } 203 204 static inline int __filemap_fdatawrite(struct address_space *mapping, 205 int sync_mode) 206 { 207 return __filemap_fdatawrite_range(mapping, 0, 0, sync_mode); 208 } 209 210 int filemap_fdatawrite(struct address_space *mapping) 211 { 212 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); 213 } 214 EXPORT_SYMBOL(filemap_fdatawrite); 215 216 static int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 217 loff_t end) 218 { 219 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); 220 } 221 222 /* 223 * This is a mostly non-blocking flush. Not suitable for data-integrity 224 * purposes - I/O may not be started against all dirty pages. 225 */ 226 int filemap_flush(struct address_space *mapping) 227 { 228 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); 229 } 230 EXPORT_SYMBOL(filemap_flush); 231 232 /* 233 * Wait for writeback to complete against pages indexed by start->end 234 * inclusive 235 */ 236 int wait_on_page_writeback_range(struct address_space *mapping, 237 pgoff_t start, pgoff_t end) 238 { 239 struct pagevec pvec; 240 int nr_pages; 241 int ret = 0; 242 pgoff_t index; 243 244 if (end < start) 245 return 0; 246 247 pagevec_init(&pvec, 0); 248 index = start; 249 while ((index <= end) && 250 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 251 PAGECACHE_TAG_WRITEBACK, 252 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { 253 unsigned i; 254 255 for (i = 0; i < nr_pages; i++) { 256 struct page *page = pvec.pages[i]; 257 258 /* until radix tree lookup accepts end_index */ 259 if (page->index > end) 260 continue; 261 262 wait_on_page_writeback(page); 263 if (PageError(page)) 264 ret = -EIO; 265 } 266 pagevec_release(&pvec); 267 cond_resched(); 268 } 269 270 /* Check for outstanding write errors */ 271 if (test_and_clear_bit(AS_ENOSPC, &mapping->flags)) 272 ret = -ENOSPC; 273 if (test_and_clear_bit(AS_EIO, &mapping->flags)) 274 ret = -EIO; 275 276 return ret; 277 } 278 279 /* 280 * Write and wait upon all the pages in the passed range. This is a "data 281 * integrity" operation. It waits upon in-flight writeout before starting and 282 * waiting upon new writeout. If there was an IO error, return it. 283 * 284 * We need to re-take i_mutex during the generic_osync_inode list walk because 285 * it is otherwise livelockable. 286 */ 287 int sync_page_range(struct inode *inode, struct address_space *mapping, 288 loff_t pos, loff_t count) 289 { 290 pgoff_t start = pos >> PAGE_CACHE_SHIFT; 291 pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT; 292 int ret; 293 294 if (!mapping_cap_writeback_dirty(mapping) || !count) 295 return 0; 296 ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1); 297 if (ret == 0) { 298 mutex_lock(&inode->i_mutex); 299 ret = generic_osync_inode(inode, mapping, OSYNC_METADATA); 300 mutex_unlock(&inode->i_mutex); 301 } 302 if (ret == 0) 303 ret = wait_on_page_writeback_range(mapping, start, end); 304 return ret; 305 } 306 EXPORT_SYMBOL(sync_page_range); 307 308 /* 309 * Note: Holding i_mutex across sync_page_range_nolock is not a good idea 310 * as it forces O_SYNC writers to different parts of the same file 311 * to be serialised right until io completion. 312 */ 313 int sync_page_range_nolock(struct inode *inode, struct address_space *mapping, 314 loff_t pos, loff_t count) 315 { 316 pgoff_t start = pos >> PAGE_CACHE_SHIFT; 317 pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT; 318 int ret; 319 320 if (!mapping_cap_writeback_dirty(mapping) || !count) 321 return 0; 322 ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1); 323 if (ret == 0) 324 ret = generic_osync_inode(inode, mapping, OSYNC_METADATA); 325 if (ret == 0) 326 ret = wait_on_page_writeback_range(mapping, start, end); 327 return ret; 328 } 329 EXPORT_SYMBOL(sync_page_range_nolock); 330 331 /** 332 * filemap_fdatawait - walk the list of under-writeback pages of the given 333 * address space and wait for all of them. 334 * 335 * @mapping: address space structure to wait for 336 */ 337 int filemap_fdatawait(struct address_space *mapping) 338 { 339 loff_t i_size = i_size_read(mapping->host); 340 341 if (i_size == 0) 342 return 0; 343 344 return wait_on_page_writeback_range(mapping, 0, 345 (i_size - 1) >> PAGE_CACHE_SHIFT); 346 } 347 EXPORT_SYMBOL(filemap_fdatawait); 348 349 int filemap_write_and_wait(struct address_space *mapping) 350 { 351 int err = 0; 352 353 if (mapping->nrpages) { 354 err = filemap_fdatawrite(mapping); 355 /* 356 * Even if the above returned error, the pages may be 357 * written partially (e.g. -ENOSPC), so we wait for it. 358 * But the -EIO is special case, it may indicate the worst 359 * thing (e.g. bug) happened, so we avoid waiting for it. 360 */ 361 if (err != -EIO) { 362 int err2 = filemap_fdatawait(mapping); 363 if (!err) 364 err = err2; 365 } 366 } 367 return err; 368 } 369 EXPORT_SYMBOL(filemap_write_and_wait); 370 371 /* 372 * Write out and wait upon file offsets lstart->lend, inclusive. 373 * 374 * Note that `lend' is inclusive (describes the last byte to be written) so 375 * that this function can be used to write to the very end-of-file (end = -1). 376 */ 377 int filemap_write_and_wait_range(struct address_space *mapping, 378 loff_t lstart, loff_t lend) 379 { 380 int err = 0; 381 382 if (mapping->nrpages) { 383 err = __filemap_fdatawrite_range(mapping, lstart, lend, 384 WB_SYNC_ALL); 385 /* See comment of filemap_write_and_wait() */ 386 if (err != -EIO) { 387 int err2 = wait_on_page_writeback_range(mapping, 388 lstart >> PAGE_CACHE_SHIFT, 389 lend >> PAGE_CACHE_SHIFT); 390 if (!err) 391 err = err2; 392 } 393 } 394 return err; 395 } 396 397 /* 398 * This function is used to add newly allocated pagecache pages: 399 * the page is new, so we can just run SetPageLocked() against it. 400 * The other page state flags were set by rmqueue(). 401 * 402 * This function does not add the page to the LRU. The caller must do that. 403 */ 404 int add_to_page_cache(struct page *page, struct address_space *mapping, 405 pgoff_t offset, gfp_t gfp_mask) 406 { 407 int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); 408 409 if (error == 0) { 410 write_lock_irq(&mapping->tree_lock); 411 error = radix_tree_insert(&mapping->page_tree, offset, page); 412 if (!error) { 413 page_cache_get(page); 414 SetPageLocked(page); 415 page->mapping = mapping; 416 page->index = offset; 417 mapping->nrpages++; 418 pagecache_acct(1); 419 } 420 write_unlock_irq(&mapping->tree_lock); 421 radix_tree_preload_end(); 422 } 423 return error; 424 } 425 426 EXPORT_SYMBOL(add_to_page_cache); 427 428 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 429 pgoff_t offset, gfp_t gfp_mask) 430 { 431 int ret = add_to_page_cache(page, mapping, offset, gfp_mask); 432 if (ret == 0) 433 lru_cache_add(page); 434 return ret; 435 } 436 437 #ifdef CONFIG_NUMA 438 struct page *page_cache_alloc(struct address_space *x) 439 { 440 if (cpuset_do_page_mem_spread()) { 441 int n = cpuset_mem_spread_node(); 442 return alloc_pages_node(n, mapping_gfp_mask(x), 0); 443 } 444 return alloc_pages(mapping_gfp_mask(x), 0); 445 } 446 EXPORT_SYMBOL(page_cache_alloc); 447 448 struct page *page_cache_alloc_cold(struct address_space *x) 449 { 450 if (cpuset_do_page_mem_spread()) { 451 int n = cpuset_mem_spread_node(); 452 return alloc_pages_node(n, mapping_gfp_mask(x)|__GFP_COLD, 0); 453 } 454 return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0); 455 } 456 EXPORT_SYMBOL(page_cache_alloc_cold); 457 #endif 458 459 /* 460 * In order to wait for pages to become available there must be 461 * waitqueues associated with pages. By using a hash table of 462 * waitqueues where the bucket discipline is to maintain all 463 * waiters on the same queue and wake all when any of the pages 464 * become available, and for the woken contexts to check to be 465 * sure the appropriate page became available, this saves space 466 * at a cost of "thundering herd" phenomena during rare hash 467 * collisions. 468 */ 469 static wait_queue_head_t *page_waitqueue(struct page *page) 470 { 471 const struct zone *zone = page_zone(page); 472 473 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; 474 } 475 476 static inline void wake_up_page(struct page *page, int bit) 477 { 478 __wake_up_bit(page_waitqueue(page), &page->flags, bit); 479 } 480 481 void fastcall wait_on_page_bit(struct page *page, int bit_nr) 482 { 483 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 484 485 if (test_bit(bit_nr, &page->flags)) 486 __wait_on_bit(page_waitqueue(page), &wait, sync_page, 487 TASK_UNINTERRUPTIBLE); 488 } 489 EXPORT_SYMBOL(wait_on_page_bit); 490 491 /** 492 * unlock_page() - unlock a locked page 493 * 494 * @page: the page 495 * 496 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). 497 * Also wakes sleepers in wait_on_page_writeback() because the wakeup 498 * mechananism between PageLocked pages and PageWriteback pages is shared. 499 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. 500 * 501 * The first mb is necessary to safely close the critical section opened by the 502 * TestSetPageLocked(), the second mb is necessary to enforce ordering between 503 * the clear_bit and the read of the waitqueue (to avoid SMP races with a 504 * parallel wait_on_page_locked()). 505 */ 506 void fastcall unlock_page(struct page *page) 507 { 508 smp_mb__before_clear_bit(); 509 if (!TestClearPageLocked(page)) 510 BUG(); 511 smp_mb__after_clear_bit(); 512 wake_up_page(page, PG_locked); 513 } 514 EXPORT_SYMBOL(unlock_page); 515 516 /* 517 * End writeback against a page. 518 */ 519 void end_page_writeback(struct page *page) 520 { 521 if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) { 522 if (!test_clear_page_writeback(page)) 523 BUG(); 524 } 525 smp_mb__after_clear_bit(); 526 wake_up_page(page, PG_writeback); 527 } 528 EXPORT_SYMBOL(end_page_writeback); 529 530 /* 531 * Get a lock on the page, assuming we need to sleep to get it. 532 * 533 * Ugly: running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some 534 * random driver's requestfn sets TASK_RUNNING, we could busywait. However 535 * chances are that on the second loop, the block layer's plug list is empty, 536 * so sync_page() will then return in state TASK_UNINTERRUPTIBLE. 537 */ 538 void fastcall __lock_page(struct page *page) 539 { 540 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 541 542 __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page, 543 TASK_UNINTERRUPTIBLE); 544 } 545 EXPORT_SYMBOL(__lock_page); 546 547 /* 548 * a rather lightweight function, finding and getting a reference to a 549 * hashed page atomically. 550 */ 551 struct page * find_get_page(struct address_space *mapping, unsigned long offset) 552 { 553 struct page *page; 554 555 read_lock_irq(&mapping->tree_lock); 556 page = radix_tree_lookup(&mapping->page_tree, offset); 557 if (page) 558 page_cache_get(page); 559 read_unlock_irq(&mapping->tree_lock); 560 return page; 561 } 562 563 EXPORT_SYMBOL(find_get_page); 564 565 /* 566 * Same as above, but trylock it instead of incrementing the count. 567 */ 568 struct page *find_trylock_page(struct address_space *mapping, unsigned long offset) 569 { 570 struct page *page; 571 572 read_lock_irq(&mapping->tree_lock); 573 page = radix_tree_lookup(&mapping->page_tree, offset); 574 if (page && TestSetPageLocked(page)) 575 page = NULL; 576 read_unlock_irq(&mapping->tree_lock); 577 return page; 578 } 579 580 EXPORT_SYMBOL(find_trylock_page); 581 582 /** 583 * find_lock_page - locate, pin and lock a pagecache page 584 * 585 * @mapping: the address_space to search 586 * @offset: the page index 587 * 588 * Locates the desired pagecache page, locks it, increments its reference 589 * count and returns its address. 590 * 591 * Returns zero if the page was not present. find_lock_page() may sleep. 592 */ 593 struct page *find_lock_page(struct address_space *mapping, 594 unsigned long offset) 595 { 596 struct page *page; 597 598 read_lock_irq(&mapping->tree_lock); 599 repeat: 600 page = radix_tree_lookup(&mapping->page_tree, offset); 601 if (page) { 602 page_cache_get(page); 603 if (TestSetPageLocked(page)) { 604 read_unlock_irq(&mapping->tree_lock); 605 __lock_page(page); 606 read_lock_irq(&mapping->tree_lock); 607 608 /* Has the page been truncated while we slept? */ 609 if (unlikely(page->mapping != mapping || 610 page->index != offset)) { 611 unlock_page(page); 612 page_cache_release(page); 613 goto repeat; 614 } 615 } 616 } 617 read_unlock_irq(&mapping->tree_lock); 618 return page; 619 } 620 621 EXPORT_SYMBOL(find_lock_page); 622 623 /** 624 * find_or_create_page - locate or add a pagecache page 625 * 626 * @mapping: the page's address_space 627 * @index: the page's index into the mapping 628 * @gfp_mask: page allocation mode 629 * 630 * Locates a page in the pagecache. If the page is not present, a new page 631 * is allocated using @gfp_mask and is added to the pagecache and to the VM's 632 * LRU list. The returned page is locked and has its reference count 633 * incremented. 634 * 635 * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic 636 * allocation! 637 * 638 * find_or_create_page() returns the desired page's address, or zero on 639 * memory exhaustion. 640 */ 641 struct page *find_or_create_page(struct address_space *mapping, 642 unsigned long index, gfp_t gfp_mask) 643 { 644 struct page *page, *cached_page = NULL; 645 int err; 646 repeat: 647 page = find_lock_page(mapping, index); 648 if (!page) { 649 if (!cached_page) { 650 cached_page = alloc_page(gfp_mask); 651 if (!cached_page) 652 return NULL; 653 } 654 err = add_to_page_cache_lru(cached_page, mapping, 655 index, gfp_mask); 656 if (!err) { 657 page = cached_page; 658 cached_page = NULL; 659 } else if (err == -EEXIST) 660 goto repeat; 661 } 662 if (cached_page) 663 page_cache_release(cached_page); 664 return page; 665 } 666 667 EXPORT_SYMBOL(find_or_create_page); 668 669 /** 670 * find_get_pages - gang pagecache lookup 671 * @mapping: The address_space to search 672 * @start: The starting page index 673 * @nr_pages: The maximum number of pages 674 * @pages: Where the resulting pages are placed 675 * 676 * find_get_pages() will search for and return a group of up to 677 * @nr_pages pages in the mapping. The pages are placed at @pages. 678 * find_get_pages() takes a reference against the returned pages. 679 * 680 * The search returns a group of mapping-contiguous pages with ascending 681 * indexes. There may be holes in the indices due to not-present pages. 682 * 683 * find_get_pages() returns the number of pages which were found. 684 */ 685 unsigned find_get_pages(struct address_space *mapping, pgoff_t start, 686 unsigned int nr_pages, struct page **pages) 687 { 688 unsigned int i; 689 unsigned int ret; 690 691 read_lock_irq(&mapping->tree_lock); 692 ret = radix_tree_gang_lookup(&mapping->page_tree, 693 (void **)pages, start, nr_pages); 694 for (i = 0; i < ret; i++) 695 page_cache_get(pages[i]); 696 read_unlock_irq(&mapping->tree_lock); 697 return ret; 698 } 699 700 /** 701 * find_get_pages_contig - gang contiguous pagecache lookup 702 * @mapping: The address_space to search 703 * @index: The starting page index 704 * @nr_pages: The maximum number of pages 705 * @pages: Where the resulting pages are placed 706 * 707 * find_get_pages_contig() works exactly like find_get_pages(), except 708 * that the returned number of pages are guaranteed to be contiguous. 709 * 710 * find_get_pages_contig() returns the number of pages which were found. 711 */ 712 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, 713 unsigned int nr_pages, struct page **pages) 714 { 715 unsigned int i; 716 unsigned int ret; 717 718 read_lock_irq(&mapping->tree_lock); 719 ret = radix_tree_gang_lookup(&mapping->page_tree, 720 (void **)pages, index, nr_pages); 721 for (i = 0; i < ret; i++) { 722 if (pages[i]->mapping == NULL || pages[i]->index != index) 723 break; 724 725 page_cache_get(pages[i]); 726 index++; 727 } 728 read_unlock_irq(&mapping->tree_lock); 729 return i; 730 } 731 732 /* 733 * Like find_get_pages, except we only return pages which are tagged with 734 * `tag'. We update *index to index the next page for the traversal. 735 */ 736 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, 737 int tag, unsigned int nr_pages, struct page **pages) 738 { 739 unsigned int i; 740 unsigned int ret; 741 742 read_lock_irq(&mapping->tree_lock); 743 ret = radix_tree_gang_lookup_tag(&mapping->page_tree, 744 (void **)pages, *index, nr_pages, tag); 745 for (i = 0; i < ret; i++) 746 page_cache_get(pages[i]); 747 if (ret) 748 *index = pages[ret - 1]->index + 1; 749 read_unlock_irq(&mapping->tree_lock); 750 return ret; 751 } 752 753 /* 754 * Same as grab_cache_page, but do not wait if the page is unavailable. 755 * This is intended for speculative data generators, where the data can 756 * be regenerated if the page couldn't be grabbed. This routine should 757 * be safe to call while holding the lock for another page. 758 * 759 * Clear __GFP_FS when allocating the page to avoid recursion into the fs 760 * and deadlock against the caller's locked page. 761 */ 762 struct page * 763 grab_cache_page_nowait(struct address_space *mapping, unsigned long index) 764 { 765 struct page *page = find_get_page(mapping, index); 766 gfp_t gfp_mask; 767 768 if (page) { 769 if (!TestSetPageLocked(page)) 770 return page; 771 page_cache_release(page); 772 return NULL; 773 } 774 gfp_mask = mapping_gfp_mask(mapping) & ~__GFP_FS; 775 page = alloc_pages(gfp_mask, 0); 776 if (page && add_to_page_cache_lru(page, mapping, index, gfp_mask)) { 777 page_cache_release(page); 778 page = NULL; 779 } 780 return page; 781 } 782 783 EXPORT_SYMBOL(grab_cache_page_nowait); 784 785 /* 786 * This is a generic file read routine, and uses the 787 * mapping->a_ops->readpage() function for the actual low-level 788 * stuff. 789 * 790 * This is really ugly. But the goto's actually try to clarify some 791 * of the logic when it comes to error handling etc. 792 * 793 * Note the struct file* is only passed for the use of readpage. It may be 794 * NULL. 795 */ 796 void do_generic_mapping_read(struct address_space *mapping, 797 struct file_ra_state *_ra, 798 struct file *filp, 799 loff_t *ppos, 800 read_descriptor_t *desc, 801 read_actor_t actor) 802 { 803 struct inode *inode = mapping->host; 804 unsigned long index; 805 unsigned long end_index; 806 unsigned long offset; 807 unsigned long last_index; 808 unsigned long next_index; 809 unsigned long prev_index; 810 loff_t isize; 811 struct page *cached_page; 812 int error; 813 struct file_ra_state ra = *_ra; 814 815 cached_page = NULL; 816 index = *ppos >> PAGE_CACHE_SHIFT; 817 next_index = index; 818 prev_index = ra.prev_page; 819 last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; 820 offset = *ppos & ~PAGE_CACHE_MASK; 821 822 isize = i_size_read(inode); 823 if (!isize) 824 goto out; 825 826 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 827 for (;;) { 828 struct page *page; 829 unsigned long nr, ret; 830 831 /* nr is the maximum number of bytes to copy from this page */ 832 nr = PAGE_CACHE_SIZE; 833 if (index >= end_index) { 834 if (index > end_index) 835 goto out; 836 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 837 if (nr <= offset) { 838 goto out; 839 } 840 } 841 nr = nr - offset; 842 843 cond_resched(); 844 if (index == next_index) 845 next_index = page_cache_readahead(mapping, &ra, filp, 846 index, last_index - index); 847 848 find_page: 849 page = find_get_page(mapping, index); 850 if (unlikely(page == NULL)) { 851 handle_ra_miss(mapping, &ra, index); 852 goto no_cached_page; 853 } 854 if (!PageUptodate(page)) 855 goto page_not_up_to_date; 856 page_ok: 857 858 /* If users can be writing to this page using arbitrary 859 * virtual addresses, take care about potential aliasing 860 * before reading the page on the kernel side. 861 */ 862 if (mapping_writably_mapped(mapping)) 863 flush_dcache_page(page); 864 865 /* 866 * When (part of) the same page is read multiple times 867 * in succession, only mark it as accessed the first time. 868 */ 869 if (prev_index != index) 870 mark_page_accessed(page); 871 prev_index = index; 872 873 /* 874 * Ok, we have the page, and it's up-to-date, so 875 * now we can copy it to user space... 876 * 877 * The actor routine returns how many bytes were actually used.. 878 * NOTE! This may not be the same as how much of a user buffer 879 * we filled up (we may be padding etc), so we can only update 880 * "pos" here (the actor routine has to update the user buffer 881 * pointers and the remaining count). 882 */ 883 ret = actor(desc, page, offset, nr); 884 offset += ret; 885 index += offset >> PAGE_CACHE_SHIFT; 886 offset &= ~PAGE_CACHE_MASK; 887 888 page_cache_release(page); 889 if (ret == nr && desc->count) 890 continue; 891 goto out; 892 893 page_not_up_to_date: 894 /* Get exclusive access to the page ... */ 895 lock_page(page); 896 897 /* Did it get unhashed before we got the lock? */ 898 if (!page->mapping) { 899 unlock_page(page); 900 page_cache_release(page); 901 continue; 902 } 903 904 /* Did somebody else fill it already? */ 905 if (PageUptodate(page)) { 906 unlock_page(page); 907 goto page_ok; 908 } 909 910 readpage: 911 /* Start the actual read. The read will unlock the page. */ 912 error = mapping->a_ops->readpage(filp, page); 913 914 if (unlikely(error)) { 915 if (error == AOP_TRUNCATED_PAGE) { 916 page_cache_release(page); 917 goto find_page; 918 } 919 goto readpage_error; 920 } 921 922 if (!PageUptodate(page)) { 923 lock_page(page); 924 if (!PageUptodate(page)) { 925 if (page->mapping == NULL) { 926 /* 927 * invalidate_inode_pages got it 928 */ 929 unlock_page(page); 930 page_cache_release(page); 931 goto find_page; 932 } 933 unlock_page(page); 934 error = -EIO; 935 goto readpage_error; 936 } 937 unlock_page(page); 938 } 939 940 /* 941 * i_size must be checked after we have done ->readpage. 942 * 943 * Checking i_size after the readpage allows us to calculate 944 * the correct value for "nr", which means the zero-filled 945 * part of the page is not copied back to userspace (unless 946 * another truncate extends the file - this is desired though). 947 */ 948 isize = i_size_read(inode); 949 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 950 if (unlikely(!isize || index > end_index)) { 951 page_cache_release(page); 952 goto out; 953 } 954 955 /* nr is the maximum number of bytes to copy from this page */ 956 nr = PAGE_CACHE_SIZE; 957 if (index == end_index) { 958 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 959 if (nr <= offset) { 960 page_cache_release(page); 961 goto out; 962 } 963 } 964 nr = nr - offset; 965 goto page_ok; 966 967 readpage_error: 968 /* UHHUH! A synchronous read error occurred. Report it */ 969 desc->error = error; 970 page_cache_release(page); 971 goto out; 972 973 no_cached_page: 974 /* 975 * Ok, it wasn't cached, so we need to create a new 976 * page.. 977 */ 978 if (!cached_page) { 979 cached_page = page_cache_alloc_cold(mapping); 980 if (!cached_page) { 981 desc->error = -ENOMEM; 982 goto out; 983 } 984 } 985 error = add_to_page_cache_lru(cached_page, mapping, 986 index, GFP_KERNEL); 987 if (error) { 988 if (error == -EEXIST) 989 goto find_page; 990 desc->error = error; 991 goto out; 992 } 993 page = cached_page; 994 cached_page = NULL; 995 goto readpage; 996 } 997 998 out: 999 *_ra = ra; 1000 1001 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 1002 if (cached_page) 1003 page_cache_release(cached_page); 1004 if (filp) 1005 file_accessed(filp); 1006 } 1007 1008 EXPORT_SYMBOL(do_generic_mapping_read); 1009 1010 int file_read_actor(read_descriptor_t *desc, struct page *page, 1011 unsigned long offset, unsigned long size) 1012 { 1013 char *kaddr; 1014 unsigned long left, count = desc->count; 1015 1016 if (size > count) 1017 size = count; 1018 1019 /* 1020 * Faults on the destination of a read are common, so do it before 1021 * taking the kmap. 1022 */ 1023 if (!fault_in_pages_writeable(desc->arg.buf, size)) { 1024 kaddr = kmap_atomic(page, KM_USER0); 1025 left = __copy_to_user_inatomic(desc->arg.buf, 1026 kaddr + offset, size); 1027 kunmap_atomic(kaddr, KM_USER0); 1028 if (left == 0) 1029 goto success; 1030 } 1031 1032 /* Do it the slow way */ 1033 kaddr = kmap(page); 1034 left = __copy_to_user(desc->arg.buf, kaddr + offset, size); 1035 kunmap(page); 1036 1037 if (left) { 1038 size -= left; 1039 desc->error = -EFAULT; 1040 } 1041 success: 1042 desc->count = count - size; 1043 desc->written += size; 1044 desc->arg.buf += size; 1045 return size; 1046 } 1047 1048 /* 1049 * This is the "read()" routine for all filesystems 1050 * that can use the page cache directly. 1051 */ 1052 ssize_t 1053 __generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, 1054 unsigned long nr_segs, loff_t *ppos) 1055 { 1056 struct file *filp = iocb->ki_filp; 1057 ssize_t retval; 1058 unsigned long seg; 1059 size_t count; 1060 1061 count = 0; 1062 for (seg = 0; seg < nr_segs; seg++) { 1063 const struct iovec *iv = &iov[seg]; 1064 1065 /* 1066 * If any segment has a negative length, or the cumulative 1067 * length ever wraps negative then return -EINVAL. 1068 */ 1069 count += iv->iov_len; 1070 if (unlikely((ssize_t)(count|iv->iov_len) < 0)) 1071 return -EINVAL; 1072 if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len)) 1073 continue; 1074 if (seg == 0) 1075 return -EFAULT; 1076 nr_segs = seg; 1077 count -= iv->iov_len; /* This segment is no good */ 1078 break; 1079 } 1080 1081 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ 1082 if (filp->f_flags & O_DIRECT) { 1083 loff_t pos = *ppos, size; 1084 struct address_space *mapping; 1085 struct inode *inode; 1086 1087 mapping = filp->f_mapping; 1088 inode = mapping->host; 1089 retval = 0; 1090 if (!count) 1091 goto out; /* skip atime */ 1092 size = i_size_read(inode); 1093 if (pos < size) { 1094 retval = generic_file_direct_IO(READ, iocb, 1095 iov, pos, nr_segs); 1096 if (retval > 0 && !is_sync_kiocb(iocb)) 1097 retval = -EIOCBQUEUED; 1098 if (retval > 0) 1099 *ppos = pos + retval; 1100 } 1101 file_accessed(filp); 1102 goto out; 1103 } 1104 1105 retval = 0; 1106 if (count) { 1107 for (seg = 0; seg < nr_segs; seg++) { 1108 read_descriptor_t desc; 1109 1110 desc.written = 0; 1111 desc.arg.buf = iov[seg].iov_base; 1112 desc.count = iov[seg].iov_len; 1113 if (desc.count == 0) 1114 continue; 1115 desc.error = 0; 1116 do_generic_file_read(filp,ppos,&desc,file_read_actor); 1117 retval += desc.written; 1118 if (desc.error) { 1119 retval = retval ?: desc.error; 1120 break; 1121 } 1122 } 1123 } 1124 out: 1125 return retval; 1126 } 1127 1128 EXPORT_SYMBOL(__generic_file_aio_read); 1129 1130 ssize_t 1131 generic_file_aio_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos) 1132 { 1133 struct iovec local_iov = { .iov_base = buf, .iov_len = count }; 1134 1135 BUG_ON(iocb->ki_pos != pos); 1136 return __generic_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos); 1137 } 1138 1139 EXPORT_SYMBOL(generic_file_aio_read); 1140 1141 ssize_t 1142 generic_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) 1143 { 1144 struct iovec local_iov = { .iov_base = buf, .iov_len = count }; 1145 struct kiocb kiocb; 1146 ssize_t ret; 1147 1148 init_sync_kiocb(&kiocb, filp); 1149 ret = __generic_file_aio_read(&kiocb, &local_iov, 1, ppos); 1150 if (-EIOCBQUEUED == ret) 1151 ret = wait_on_sync_kiocb(&kiocb); 1152 return ret; 1153 } 1154 1155 EXPORT_SYMBOL(generic_file_read); 1156 1157 int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size) 1158 { 1159 ssize_t written; 1160 unsigned long count = desc->count; 1161 struct file *file = desc->arg.data; 1162 1163 if (size > count) 1164 size = count; 1165 1166 written = file->f_op->sendpage(file, page, offset, 1167 size, &file->f_pos, size<count); 1168 if (written < 0) { 1169 desc->error = written; 1170 written = 0; 1171 } 1172 desc->count = count - written; 1173 desc->written += written; 1174 return written; 1175 } 1176 1177 ssize_t generic_file_sendfile(struct file *in_file, loff_t *ppos, 1178 size_t count, read_actor_t actor, void *target) 1179 { 1180 read_descriptor_t desc; 1181 1182 if (!count) 1183 return 0; 1184 1185 desc.written = 0; 1186 desc.count = count; 1187 desc.arg.data = target; 1188 desc.error = 0; 1189 1190 do_generic_file_read(in_file, ppos, &desc, actor); 1191 if (desc.written) 1192 return desc.written; 1193 return desc.error; 1194 } 1195 1196 EXPORT_SYMBOL(generic_file_sendfile); 1197 1198 static ssize_t 1199 do_readahead(struct address_space *mapping, struct file *filp, 1200 unsigned long index, unsigned long nr) 1201 { 1202 if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage) 1203 return -EINVAL; 1204 1205 force_page_cache_readahead(mapping, filp, index, 1206 max_sane_readahead(nr)); 1207 return 0; 1208 } 1209 1210 asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count) 1211 { 1212 ssize_t ret; 1213 struct file *file; 1214 1215 ret = -EBADF; 1216 file = fget(fd); 1217 if (file) { 1218 if (file->f_mode & FMODE_READ) { 1219 struct address_space *mapping = file->f_mapping; 1220 unsigned long start = offset >> PAGE_CACHE_SHIFT; 1221 unsigned long end = (offset + count - 1) >> PAGE_CACHE_SHIFT; 1222 unsigned long len = end - start + 1; 1223 ret = do_readahead(mapping, file, start, len); 1224 } 1225 fput(file); 1226 } 1227 return ret; 1228 } 1229 1230 #ifdef CONFIG_MMU 1231 /* 1232 * This adds the requested page to the page cache if it isn't already there, 1233 * and schedules an I/O to read in its contents from disk. 1234 */ 1235 static int FASTCALL(page_cache_read(struct file * file, unsigned long offset)); 1236 static int fastcall page_cache_read(struct file * file, unsigned long offset) 1237 { 1238 struct address_space *mapping = file->f_mapping; 1239 struct page *page; 1240 int ret; 1241 1242 do { 1243 page = page_cache_alloc_cold(mapping); 1244 if (!page) 1245 return -ENOMEM; 1246 1247 ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL); 1248 if (ret == 0) 1249 ret = mapping->a_ops->readpage(file, page); 1250 else if (ret == -EEXIST) 1251 ret = 0; /* losing race to add is OK */ 1252 1253 page_cache_release(page); 1254 1255 } while (ret == AOP_TRUNCATED_PAGE); 1256 1257 return ret; 1258 } 1259 1260 #define MMAP_LOTSAMISS (100) 1261 1262 /* 1263 * filemap_nopage() is invoked via the vma operations vector for a 1264 * mapped memory region to read in file data during a page fault. 1265 * 1266 * The goto's are kind of ugly, but this streamlines the normal case of having 1267 * it in the page cache, and handles the special cases reasonably without 1268 * having a lot of duplicated code. 1269 */ 1270 struct page *filemap_nopage(struct vm_area_struct *area, 1271 unsigned long address, int *type) 1272 { 1273 int error; 1274 struct file *file = area->vm_file; 1275 struct address_space *mapping = file->f_mapping; 1276 struct file_ra_state *ra = &file->f_ra; 1277 struct inode *inode = mapping->host; 1278 struct page *page; 1279 unsigned long size, pgoff; 1280 int did_readaround = 0, majmin = VM_FAULT_MINOR; 1281 1282 pgoff = ((address-area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff; 1283 1284 retry_all: 1285 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1286 if (pgoff >= size) 1287 goto outside_data_content; 1288 1289 /* If we don't want any read-ahead, don't bother */ 1290 if (VM_RandomReadHint(area)) 1291 goto no_cached_page; 1292 1293 /* 1294 * The readahead code wants to be told about each and every page 1295 * so it can build and shrink its windows appropriately 1296 * 1297 * For sequential accesses, we use the generic readahead logic. 1298 */ 1299 if (VM_SequentialReadHint(area)) 1300 page_cache_readahead(mapping, ra, file, pgoff, 1); 1301 1302 /* 1303 * Do we have something in the page cache already? 1304 */ 1305 retry_find: 1306 page = find_get_page(mapping, pgoff); 1307 if (!page) { 1308 unsigned long ra_pages; 1309 1310 if (VM_SequentialReadHint(area)) { 1311 handle_ra_miss(mapping, ra, pgoff); 1312 goto no_cached_page; 1313 } 1314 ra->mmap_miss++; 1315 1316 /* 1317 * Do we miss much more than hit in this file? If so, 1318 * stop bothering with read-ahead. It will only hurt. 1319 */ 1320 if (ra->mmap_miss > ra->mmap_hit + MMAP_LOTSAMISS) 1321 goto no_cached_page; 1322 1323 /* 1324 * To keep the pgmajfault counter straight, we need to 1325 * check did_readaround, as this is an inner loop. 1326 */ 1327 if (!did_readaround) { 1328 majmin = VM_FAULT_MAJOR; 1329 inc_page_state(pgmajfault); 1330 } 1331 did_readaround = 1; 1332 ra_pages = max_sane_readahead(file->f_ra.ra_pages); 1333 if (ra_pages) { 1334 pgoff_t start = 0; 1335 1336 if (pgoff > ra_pages / 2) 1337 start = pgoff - ra_pages / 2; 1338 do_page_cache_readahead(mapping, file, start, ra_pages); 1339 } 1340 page = find_get_page(mapping, pgoff); 1341 if (!page) 1342 goto no_cached_page; 1343 } 1344 1345 if (!did_readaround) 1346 ra->mmap_hit++; 1347 1348 /* 1349 * Ok, found a page in the page cache, now we need to check 1350 * that it's up-to-date. 1351 */ 1352 if (!PageUptodate(page)) 1353 goto page_not_uptodate; 1354 1355 success: 1356 /* 1357 * Found the page and have a reference on it. 1358 */ 1359 mark_page_accessed(page); 1360 if (type) 1361 *type = majmin; 1362 return page; 1363 1364 outside_data_content: 1365 /* 1366 * An external ptracer can access pages that normally aren't 1367 * accessible.. 1368 */ 1369 if (area->vm_mm == current->mm) 1370 return NULL; 1371 /* Fall through to the non-read-ahead case */ 1372 no_cached_page: 1373 /* 1374 * We're only likely to ever get here if MADV_RANDOM is in 1375 * effect. 1376 */ 1377 error = page_cache_read(file, pgoff); 1378 grab_swap_token(); 1379 1380 /* 1381 * The page we want has now been added to the page cache. 1382 * In the unlikely event that someone removed it in the 1383 * meantime, we'll just come back here and read it again. 1384 */ 1385 if (error >= 0) 1386 goto retry_find; 1387 1388 /* 1389 * An error return from page_cache_read can result if the 1390 * system is low on memory, or a problem occurs while trying 1391 * to schedule I/O. 1392 */ 1393 if (error == -ENOMEM) 1394 return NOPAGE_OOM; 1395 return NULL; 1396 1397 page_not_uptodate: 1398 if (!did_readaround) { 1399 majmin = VM_FAULT_MAJOR; 1400 inc_page_state(pgmajfault); 1401 } 1402 lock_page(page); 1403 1404 /* Did it get unhashed while we waited for it? */ 1405 if (!page->mapping) { 1406 unlock_page(page); 1407 page_cache_release(page); 1408 goto retry_all; 1409 } 1410 1411 /* Did somebody else get it up-to-date? */ 1412 if (PageUptodate(page)) { 1413 unlock_page(page); 1414 goto success; 1415 } 1416 1417 error = mapping->a_ops->readpage(file, page); 1418 if (!error) { 1419 wait_on_page_locked(page); 1420 if (PageUptodate(page)) 1421 goto success; 1422 } else if (error == AOP_TRUNCATED_PAGE) { 1423 page_cache_release(page); 1424 goto retry_find; 1425 } 1426 1427 /* 1428 * Umm, take care of errors if the page isn't up-to-date. 1429 * Try to re-read it _once_. We do this synchronously, 1430 * because there really aren't any performance issues here 1431 * and we need to check for errors. 1432 */ 1433 lock_page(page); 1434 1435 /* Somebody truncated the page on us? */ 1436 if (!page->mapping) { 1437 unlock_page(page); 1438 page_cache_release(page); 1439 goto retry_all; 1440 } 1441 1442 /* Somebody else successfully read it in? */ 1443 if (PageUptodate(page)) { 1444 unlock_page(page); 1445 goto success; 1446 } 1447 ClearPageError(page); 1448 error = mapping->a_ops->readpage(file, page); 1449 if (!error) { 1450 wait_on_page_locked(page); 1451 if (PageUptodate(page)) 1452 goto success; 1453 } else if (error == AOP_TRUNCATED_PAGE) { 1454 page_cache_release(page); 1455 goto retry_find; 1456 } 1457 1458 /* 1459 * Things didn't work out. Return zero to tell the 1460 * mm layer so, possibly freeing the page cache page first. 1461 */ 1462 page_cache_release(page); 1463 return NULL; 1464 } 1465 1466 EXPORT_SYMBOL(filemap_nopage); 1467 1468 static struct page * filemap_getpage(struct file *file, unsigned long pgoff, 1469 int nonblock) 1470 { 1471 struct address_space *mapping = file->f_mapping; 1472 struct page *page; 1473 int error; 1474 1475 /* 1476 * Do we have something in the page cache already? 1477 */ 1478 retry_find: 1479 page = find_get_page(mapping, pgoff); 1480 if (!page) { 1481 if (nonblock) 1482 return NULL; 1483 goto no_cached_page; 1484 } 1485 1486 /* 1487 * Ok, found a page in the page cache, now we need to check 1488 * that it's up-to-date. 1489 */ 1490 if (!PageUptodate(page)) { 1491 if (nonblock) { 1492 page_cache_release(page); 1493 return NULL; 1494 } 1495 goto page_not_uptodate; 1496 } 1497 1498 success: 1499 /* 1500 * Found the page and have a reference on it. 1501 */ 1502 mark_page_accessed(page); 1503 return page; 1504 1505 no_cached_page: 1506 error = page_cache_read(file, pgoff); 1507 1508 /* 1509 * The page we want has now been added to the page cache. 1510 * In the unlikely event that someone removed it in the 1511 * meantime, we'll just come back here and read it again. 1512 */ 1513 if (error >= 0) 1514 goto retry_find; 1515 1516 /* 1517 * An error return from page_cache_read can result if the 1518 * system is low on memory, or a problem occurs while trying 1519 * to schedule I/O. 1520 */ 1521 return NULL; 1522 1523 page_not_uptodate: 1524 lock_page(page); 1525 1526 /* Did it get unhashed while we waited for it? */ 1527 if (!page->mapping) { 1528 unlock_page(page); 1529 goto err; 1530 } 1531 1532 /* Did somebody else get it up-to-date? */ 1533 if (PageUptodate(page)) { 1534 unlock_page(page); 1535 goto success; 1536 } 1537 1538 error = mapping->a_ops->readpage(file, page); 1539 if (!error) { 1540 wait_on_page_locked(page); 1541 if (PageUptodate(page)) 1542 goto success; 1543 } else if (error == AOP_TRUNCATED_PAGE) { 1544 page_cache_release(page); 1545 goto retry_find; 1546 } 1547 1548 /* 1549 * Umm, take care of errors if the page isn't up-to-date. 1550 * Try to re-read it _once_. We do this synchronously, 1551 * because there really aren't any performance issues here 1552 * and we need to check for errors. 1553 */ 1554 lock_page(page); 1555 1556 /* Somebody truncated the page on us? */ 1557 if (!page->mapping) { 1558 unlock_page(page); 1559 goto err; 1560 } 1561 /* Somebody else successfully read it in? */ 1562 if (PageUptodate(page)) { 1563 unlock_page(page); 1564 goto success; 1565 } 1566 1567 ClearPageError(page); 1568 error = mapping->a_ops->readpage(file, page); 1569 if (!error) { 1570 wait_on_page_locked(page); 1571 if (PageUptodate(page)) 1572 goto success; 1573 } else if (error == AOP_TRUNCATED_PAGE) { 1574 page_cache_release(page); 1575 goto retry_find; 1576 } 1577 1578 /* 1579 * Things didn't work out. Return zero to tell the 1580 * mm layer so, possibly freeing the page cache page first. 1581 */ 1582 err: 1583 page_cache_release(page); 1584 1585 return NULL; 1586 } 1587 1588 int filemap_populate(struct vm_area_struct *vma, unsigned long addr, 1589 unsigned long len, pgprot_t prot, unsigned long pgoff, 1590 int nonblock) 1591 { 1592 struct file *file = vma->vm_file; 1593 struct address_space *mapping = file->f_mapping; 1594 struct inode *inode = mapping->host; 1595 unsigned long size; 1596 struct mm_struct *mm = vma->vm_mm; 1597 struct page *page; 1598 int err; 1599 1600 if (!nonblock) 1601 force_page_cache_readahead(mapping, vma->vm_file, 1602 pgoff, len >> PAGE_CACHE_SHIFT); 1603 1604 repeat: 1605 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1606 if (pgoff + (len >> PAGE_CACHE_SHIFT) > size) 1607 return -EINVAL; 1608 1609 page = filemap_getpage(file, pgoff, nonblock); 1610 1611 /* XXX: This is wrong, a filesystem I/O error may have happened. Fix that as 1612 * done in shmem_populate calling shmem_getpage */ 1613 if (!page && !nonblock) 1614 return -ENOMEM; 1615 1616 if (page) { 1617 err = install_page(mm, vma, addr, page, prot); 1618 if (err) { 1619 page_cache_release(page); 1620 return err; 1621 } 1622 } else if (vma->vm_flags & VM_NONLINEAR) { 1623 /* No page was found just because we can't read it in now (being 1624 * here implies nonblock != 0), but the page may exist, so set 1625 * the PTE to fault it in later. */ 1626 err = install_file_pte(mm, vma, addr, pgoff, prot); 1627 if (err) 1628 return err; 1629 } 1630 1631 len -= PAGE_SIZE; 1632 addr += PAGE_SIZE; 1633 pgoff++; 1634 if (len) 1635 goto repeat; 1636 1637 return 0; 1638 } 1639 EXPORT_SYMBOL(filemap_populate); 1640 1641 struct vm_operations_struct generic_file_vm_ops = { 1642 .nopage = filemap_nopage, 1643 .populate = filemap_populate, 1644 }; 1645 1646 /* This is used for a general mmap of a disk file */ 1647 1648 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 1649 { 1650 struct address_space *mapping = file->f_mapping; 1651 1652 if (!mapping->a_ops->readpage) 1653 return -ENOEXEC; 1654 file_accessed(file); 1655 vma->vm_ops = &generic_file_vm_ops; 1656 return 0; 1657 } 1658 1659 /* 1660 * This is for filesystems which do not implement ->writepage. 1661 */ 1662 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) 1663 { 1664 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) 1665 return -EINVAL; 1666 return generic_file_mmap(file, vma); 1667 } 1668 #else 1669 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 1670 { 1671 return -ENOSYS; 1672 } 1673 int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) 1674 { 1675 return -ENOSYS; 1676 } 1677 #endif /* CONFIG_MMU */ 1678 1679 EXPORT_SYMBOL(generic_file_mmap); 1680 EXPORT_SYMBOL(generic_file_readonly_mmap); 1681 1682 static inline struct page *__read_cache_page(struct address_space *mapping, 1683 unsigned long index, 1684 int (*filler)(void *,struct page*), 1685 void *data) 1686 { 1687 struct page *page, *cached_page = NULL; 1688 int err; 1689 repeat: 1690 page = find_get_page(mapping, index); 1691 if (!page) { 1692 if (!cached_page) { 1693 cached_page = page_cache_alloc_cold(mapping); 1694 if (!cached_page) 1695 return ERR_PTR(-ENOMEM); 1696 } 1697 err = add_to_page_cache_lru(cached_page, mapping, 1698 index, GFP_KERNEL); 1699 if (err == -EEXIST) 1700 goto repeat; 1701 if (err < 0) { 1702 /* Presumably ENOMEM for radix tree node */ 1703 page_cache_release(cached_page); 1704 return ERR_PTR(err); 1705 } 1706 page = cached_page; 1707 cached_page = NULL; 1708 err = filler(data, page); 1709 if (err < 0) { 1710 page_cache_release(page); 1711 page = ERR_PTR(err); 1712 } 1713 } 1714 if (cached_page) 1715 page_cache_release(cached_page); 1716 return page; 1717 } 1718 1719 /* 1720 * Read into the page cache. If a page already exists, 1721 * and PageUptodate() is not set, try to fill the page. 1722 */ 1723 struct page *read_cache_page(struct address_space *mapping, 1724 unsigned long index, 1725 int (*filler)(void *,struct page*), 1726 void *data) 1727 { 1728 struct page *page; 1729 int err; 1730 1731 retry: 1732 page = __read_cache_page(mapping, index, filler, data); 1733 if (IS_ERR(page)) 1734 goto out; 1735 mark_page_accessed(page); 1736 if (PageUptodate(page)) 1737 goto out; 1738 1739 lock_page(page); 1740 if (!page->mapping) { 1741 unlock_page(page); 1742 page_cache_release(page); 1743 goto retry; 1744 } 1745 if (PageUptodate(page)) { 1746 unlock_page(page); 1747 goto out; 1748 } 1749 err = filler(data, page); 1750 if (err < 0) { 1751 page_cache_release(page); 1752 page = ERR_PTR(err); 1753 } 1754 out: 1755 return page; 1756 } 1757 1758 EXPORT_SYMBOL(read_cache_page); 1759 1760 /* 1761 * If the page was newly created, increment its refcount and add it to the 1762 * caller's lru-buffering pagevec. This function is specifically for 1763 * generic_file_write(). 1764 */ 1765 static inline struct page * 1766 __grab_cache_page(struct address_space *mapping, unsigned long index, 1767 struct page **cached_page, struct pagevec *lru_pvec) 1768 { 1769 int err; 1770 struct page *page; 1771 repeat: 1772 page = find_lock_page(mapping, index); 1773 if (!page) { 1774 if (!*cached_page) { 1775 *cached_page = page_cache_alloc(mapping); 1776 if (!*cached_page) 1777 return NULL; 1778 } 1779 err = add_to_page_cache(*cached_page, mapping, 1780 index, GFP_KERNEL); 1781 if (err == -EEXIST) 1782 goto repeat; 1783 if (err == 0) { 1784 page = *cached_page; 1785 page_cache_get(page); 1786 if (!pagevec_add(lru_pvec, page)) 1787 __pagevec_lru_add(lru_pvec); 1788 *cached_page = NULL; 1789 } 1790 } 1791 return page; 1792 } 1793 1794 /* 1795 * The logic we want is 1796 * 1797 * if suid or (sgid and xgrp) 1798 * remove privs 1799 */ 1800 int remove_suid(struct dentry *dentry) 1801 { 1802 mode_t mode = dentry->d_inode->i_mode; 1803 int kill = 0; 1804 int result = 0; 1805 1806 /* suid always must be killed */ 1807 if (unlikely(mode & S_ISUID)) 1808 kill = ATTR_KILL_SUID; 1809 1810 /* 1811 * sgid without any exec bits is just a mandatory locking mark; leave 1812 * it alone. If some exec bits are set, it's a real sgid; kill it. 1813 */ 1814 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) 1815 kill |= ATTR_KILL_SGID; 1816 1817 if (unlikely(kill && !capable(CAP_FSETID))) { 1818 struct iattr newattrs; 1819 1820 newattrs.ia_valid = ATTR_FORCE | kill; 1821 result = notify_change(dentry, &newattrs); 1822 } 1823 return result; 1824 } 1825 EXPORT_SYMBOL(remove_suid); 1826 1827 size_t 1828 __filemap_copy_from_user_iovec(char *vaddr, 1829 const struct iovec *iov, size_t base, size_t bytes) 1830 { 1831 size_t copied = 0, left = 0; 1832 1833 while (bytes) { 1834 char __user *buf = iov->iov_base + base; 1835 int copy = min(bytes, iov->iov_len - base); 1836 1837 base = 0; 1838 left = __copy_from_user_inatomic(vaddr, buf, copy); 1839 copied += copy; 1840 bytes -= copy; 1841 vaddr += copy; 1842 iov++; 1843 1844 if (unlikely(left)) { 1845 /* zero the rest of the target like __copy_from_user */ 1846 if (bytes) 1847 memset(vaddr, 0, bytes); 1848 break; 1849 } 1850 } 1851 return copied - left; 1852 } 1853 1854 /* 1855 * Performs necessary checks before doing a write 1856 * 1857 * Can adjust writing position aor amount of bytes to write. 1858 * Returns appropriate error code that caller should return or 1859 * zero in case that write should be allowed. 1860 */ 1861 inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk) 1862 { 1863 struct inode *inode = file->f_mapping->host; 1864 unsigned long limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; 1865 1866 if (unlikely(*pos < 0)) 1867 return -EINVAL; 1868 1869 if (!isblk) { 1870 /* FIXME: this is for backwards compatibility with 2.4 */ 1871 if (file->f_flags & O_APPEND) 1872 *pos = i_size_read(inode); 1873 1874 if (limit != RLIM_INFINITY) { 1875 if (*pos >= limit) { 1876 send_sig(SIGXFSZ, current, 0); 1877 return -EFBIG; 1878 } 1879 if (*count > limit - (typeof(limit))*pos) { 1880 *count = limit - (typeof(limit))*pos; 1881 } 1882 } 1883 } 1884 1885 /* 1886 * LFS rule 1887 */ 1888 if (unlikely(*pos + *count > MAX_NON_LFS && 1889 !(file->f_flags & O_LARGEFILE))) { 1890 if (*pos >= MAX_NON_LFS) { 1891 send_sig(SIGXFSZ, current, 0); 1892 return -EFBIG; 1893 } 1894 if (*count > MAX_NON_LFS - (unsigned long)*pos) { 1895 *count = MAX_NON_LFS - (unsigned long)*pos; 1896 } 1897 } 1898 1899 /* 1900 * Are we about to exceed the fs block limit ? 1901 * 1902 * If we have written data it becomes a short write. If we have 1903 * exceeded without writing data we send a signal and return EFBIG. 1904 * Linus frestrict idea will clean these up nicely.. 1905 */ 1906 if (likely(!isblk)) { 1907 if (unlikely(*pos >= inode->i_sb->s_maxbytes)) { 1908 if (*count || *pos > inode->i_sb->s_maxbytes) { 1909 send_sig(SIGXFSZ, current, 0); 1910 return -EFBIG; 1911 } 1912 /* zero-length writes at ->s_maxbytes are OK */ 1913 } 1914 1915 if (unlikely(*pos + *count > inode->i_sb->s_maxbytes)) 1916 *count = inode->i_sb->s_maxbytes - *pos; 1917 } else { 1918 loff_t isize; 1919 if (bdev_read_only(I_BDEV(inode))) 1920 return -EPERM; 1921 isize = i_size_read(inode); 1922 if (*pos >= isize) { 1923 if (*count || *pos > isize) 1924 return -ENOSPC; 1925 } 1926 1927 if (*pos + *count > isize) 1928 *count = isize - *pos; 1929 } 1930 return 0; 1931 } 1932 EXPORT_SYMBOL(generic_write_checks); 1933 1934 ssize_t 1935 generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov, 1936 unsigned long *nr_segs, loff_t pos, loff_t *ppos, 1937 size_t count, size_t ocount) 1938 { 1939 struct file *file = iocb->ki_filp; 1940 struct address_space *mapping = file->f_mapping; 1941 struct inode *inode = mapping->host; 1942 ssize_t written; 1943 1944 if (count != ocount) 1945 *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count); 1946 1947 written = generic_file_direct_IO(WRITE, iocb, iov, pos, *nr_segs); 1948 if (written > 0) { 1949 loff_t end = pos + written; 1950 if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { 1951 i_size_write(inode, end); 1952 mark_inode_dirty(inode); 1953 } 1954 *ppos = end; 1955 } 1956 1957 /* 1958 * Sync the fs metadata but not the minor inode changes and 1959 * of course not the data as we did direct DMA for the IO. 1960 * i_mutex is held, which protects generic_osync_inode() from 1961 * livelocking. 1962 */ 1963 if (written >= 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { 1964 int err = generic_osync_inode(inode, mapping, OSYNC_METADATA); 1965 if (err < 0) 1966 written = err; 1967 } 1968 if (written == count && !is_sync_kiocb(iocb)) 1969 written = -EIOCBQUEUED; 1970 return written; 1971 } 1972 EXPORT_SYMBOL(generic_file_direct_write); 1973 1974 ssize_t 1975 generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, 1976 unsigned long nr_segs, loff_t pos, loff_t *ppos, 1977 size_t count, ssize_t written) 1978 { 1979 struct file *file = iocb->ki_filp; 1980 struct address_space * mapping = file->f_mapping; 1981 struct address_space_operations *a_ops = mapping->a_ops; 1982 struct inode *inode = mapping->host; 1983 long status = 0; 1984 struct page *page; 1985 struct page *cached_page = NULL; 1986 size_t bytes; 1987 struct pagevec lru_pvec; 1988 const struct iovec *cur_iov = iov; /* current iovec */ 1989 size_t iov_base = 0; /* offset in the current iovec */ 1990 char __user *buf; 1991 1992 pagevec_init(&lru_pvec, 0); 1993 1994 /* 1995 * handle partial DIO write. Adjust cur_iov if needed. 1996 */ 1997 if (likely(nr_segs == 1)) 1998 buf = iov->iov_base + written; 1999 else { 2000 filemap_set_next_iovec(&cur_iov, &iov_base, written); 2001 buf = cur_iov->iov_base + iov_base; 2002 } 2003 2004 do { 2005 unsigned long index; 2006 unsigned long offset; 2007 unsigned long maxlen; 2008 size_t copied; 2009 2010 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ 2011 index = pos >> PAGE_CACHE_SHIFT; 2012 bytes = PAGE_CACHE_SIZE - offset; 2013 if (bytes > count) 2014 bytes = count; 2015 2016 /* 2017 * Bring in the user page that we will copy from _first_. 2018 * Otherwise there's a nasty deadlock on copying from the 2019 * same page as we're writing to, without it being marked 2020 * up-to-date. 2021 */ 2022 maxlen = cur_iov->iov_len - iov_base; 2023 if (maxlen > bytes) 2024 maxlen = bytes; 2025 fault_in_pages_readable(buf, maxlen); 2026 2027 page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec); 2028 if (!page) { 2029 status = -ENOMEM; 2030 break; 2031 } 2032 2033 status = a_ops->prepare_write(file, page, offset, offset+bytes); 2034 if (unlikely(status)) { 2035 loff_t isize = i_size_read(inode); 2036 2037 if (status != AOP_TRUNCATED_PAGE) 2038 unlock_page(page); 2039 page_cache_release(page); 2040 if (status == AOP_TRUNCATED_PAGE) 2041 continue; 2042 /* 2043 * prepare_write() may have instantiated a few blocks 2044 * outside i_size. Trim these off again. 2045 */ 2046 if (pos + bytes > isize) 2047 vmtruncate(inode, isize); 2048 break; 2049 } 2050 if (likely(nr_segs == 1)) 2051 copied = filemap_copy_from_user(page, offset, 2052 buf, bytes); 2053 else 2054 copied = filemap_copy_from_user_iovec(page, offset, 2055 cur_iov, iov_base, bytes); 2056 flush_dcache_page(page); 2057 status = a_ops->commit_write(file, page, offset, offset+bytes); 2058 if (status == AOP_TRUNCATED_PAGE) { 2059 page_cache_release(page); 2060 continue; 2061 } 2062 if (likely(copied > 0)) { 2063 if (!status) 2064 status = copied; 2065 2066 if (status >= 0) { 2067 written += status; 2068 count -= status; 2069 pos += status; 2070 buf += status; 2071 if (unlikely(nr_segs > 1)) { 2072 filemap_set_next_iovec(&cur_iov, 2073 &iov_base, status); 2074 if (count) 2075 buf = cur_iov->iov_base + 2076 iov_base; 2077 } else { 2078 iov_base += status; 2079 } 2080 } 2081 } 2082 if (unlikely(copied != bytes)) 2083 if (status >= 0) 2084 status = -EFAULT; 2085 unlock_page(page); 2086 mark_page_accessed(page); 2087 page_cache_release(page); 2088 if (status < 0) 2089 break; 2090 balance_dirty_pages_ratelimited(mapping); 2091 cond_resched(); 2092 } while (count); 2093 *ppos = pos; 2094 2095 if (cached_page) 2096 page_cache_release(cached_page); 2097 2098 /* 2099 * For now, when the user asks for O_SYNC, we'll actually give O_DSYNC 2100 */ 2101 if (likely(status >= 0)) { 2102 if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) { 2103 if (!a_ops->writepage || !is_sync_kiocb(iocb)) 2104 status = generic_osync_inode(inode, mapping, 2105 OSYNC_METADATA|OSYNC_DATA); 2106 } 2107 } 2108 2109 /* 2110 * If we get here for O_DIRECT writes then we must have fallen through 2111 * to buffered writes (block instantiation inside i_size). So we sync 2112 * the file data here, to try to honour O_DIRECT expectations. 2113 */ 2114 if (unlikely(file->f_flags & O_DIRECT) && written) 2115 status = filemap_write_and_wait(mapping); 2116 2117 pagevec_lru_add(&lru_pvec); 2118 return written ? written : status; 2119 } 2120 EXPORT_SYMBOL(generic_file_buffered_write); 2121 2122 static ssize_t 2123 __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, 2124 unsigned long nr_segs, loff_t *ppos) 2125 { 2126 struct file *file = iocb->ki_filp; 2127 struct address_space * mapping = file->f_mapping; 2128 size_t ocount; /* original count */ 2129 size_t count; /* after file limit checks */ 2130 struct inode *inode = mapping->host; 2131 unsigned long seg; 2132 loff_t pos; 2133 ssize_t written; 2134 ssize_t err; 2135 2136 ocount = 0; 2137 for (seg = 0; seg < nr_segs; seg++) { 2138 const struct iovec *iv = &iov[seg]; 2139 2140 /* 2141 * If any segment has a negative length, or the cumulative 2142 * length ever wraps negative then return -EINVAL. 2143 */ 2144 ocount += iv->iov_len; 2145 if (unlikely((ssize_t)(ocount|iv->iov_len) < 0)) 2146 return -EINVAL; 2147 if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len)) 2148 continue; 2149 if (seg == 0) 2150 return -EFAULT; 2151 nr_segs = seg; 2152 ocount -= iv->iov_len; /* This segment is no good */ 2153 break; 2154 } 2155 2156 count = ocount; 2157 pos = *ppos; 2158 2159 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 2160 2161 /* We can write back this queue in page reclaim */ 2162 current->backing_dev_info = mapping->backing_dev_info; 2163 written = 0; 2164 2165 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 2166 if (err) 2167 goto out; 2168 2169 if (count == 0) 2170 goto out; 2171 2172 err = remove_suid(file->f_dentry); 2173 if (err) 2174 goto out; 2175 2176 file_update_time(file); 2177 2178 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ 2179 if (unlikely(file->f_flags & O_DIRECT)) { 2180 written = generic_file_direct_write(iocb, iov, 2181 &nr_segs, pos, ppos, count, ocount); 2182 if (written < 0 || written == count) 2183 goto out; 2184 /* 2185 * direct-io write to a hole: fall through to buffered I/O 2186 * for completing the rest of the request. 2187 */ 2188 pos += written; 2189 count -= written; 2190 } 2191 2192 written = generic_file_buffered_write(iocb, iov, nr_segs, 2193 pos, ppos, count, written); 2194 out: 2195 current->backing_dev_info = NULL; 2196 return written ? written : err; 2197 } 2198 EXPORT_SYMBOL(generic_file_aio_write_nolock); 2199 2200 ssize_t 2201 generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, 2202 unsigned long nr_segs, loff_t *ppos) 2203 { 2204 struct file *file = iocb->ki_filp; 2205 struct address_space *mapping = file->f_mapping; 2206 struct inode *inode = mapping->host; 2207 ssize_t ret; 2208 loff_t pos = *ppos; 2209 2210 ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs, ppos); 2211 2212 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { 2213 int err; 2214 2215 err = sync_page_range_nolock(inode, mapping, pos, ret); 2216 if (err < 0) 2217 ret = err; 2218 } 2219 return ret; 2220 } 2221 2222 static ssize_t 2223 __generic_file_write_nolock(struct file *file, const struct iovec *iov, 2224 unsigned long nr_segs, loff_t *ppos) 2225 { 2226 struct kiocb kiocb; 2227 ssize_t ret; 2228 2229 init_sync_kiocb(&kiocb, file); 2230 ret = __generic_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos); 2231 if (ret == -EIOCBQUEUED) 2232 ret = wait_on_sync_kiocb(&kiocb); 2233 return ret; 2234 } 2235 2236 ssize_t 2237 generic_file_write_nolock(struct file *file, const struct iovec *iov, 2238 unsigned long nr_segs, loff_t *ppos) 2239 { 2240 struct kiocb kiocb; 2241 ssize_t ret; 2242 2243 init_sync_kiocb(&kiocb, file); 2244 ret = generic_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos); 2245 if (-EIOCBQUEUED == ret) 2246 ret = wait_on_sync_kiocb(&kiocb); 2247 return ret; 2248 } 2249 EXPORT_SYMBOL(generic_file_write_nolock); 2250 2251 ssize_t generic_file_aio_write(struct kiocb *iocb, const char __user *buf, 2252 size_t count, loff_t pos) 2253 { 2254 struct file *file = iocb->ki_filp; 2255 struct address_space *mapping = file->f_mapping; 2256 struct inode *inode = mapping->host; 2257 ssize_t ret; 2258 struct iovec local_iov = { .iov_base = (void __user *)buf, 2259 .iov_len = count }; 2260 2261 BUG_ON(iocb->ki_pos != pos); 2262 2263 mutex_lock(&inode->i_mutex); 2264 ret = __generic_file_aio_write_nolock(iocb, &local_iov, 1, 2265 &iocb->ki_pos); 2266 mutex_unlock(&inode->i_mutex); 2267 2268 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { 2269 ssize_t err; 2270 2271 err = sync_page_range(inode, mapping, pos, ret); 2272 if (err < 0) 2273 ret = err; 2274 } 2275 return ret; 2276 } 2277 EXPORT_SYMBOL(generic_file_aio_write); 2278 2279 ssize_t generic_file_write(struct file *file, const char __user *buf, 2280 size_t count, loff_t *ppos) 2281 { 2282 struct address_space *mapping = file->f_mapping; 2283 struct inode *inode = mapping->host; 2284 ssize_t ret; 2285 struct iovec local_iov = { .iov_base = (void __user *)buf, 2286 .iov_len = count }; 2287 2288 mutex_lock(&inode->i_mutex); 2289 ret = __generic_file_write_nolock(file, &local_iov, 1, ppos); 2290 mutex_unlock(&inode->i_mutex); 2291 2292 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { 2293 ssize_t err; 2294 2295 err = sync_page_range(inode, mapping, *ppos - ret, ret); 2296 if (err < 0) 2297 ret = err; 2298 } 2299 return ret; 2300 } 2301 EXPORT_SYMBOL(generic_file_write); 2302 2303 ssize_t generic_file_readv(struct file *filp, const struct iovec *iov, 2304 unsigned long nr_segs, loff_t *ppos) 2305 { 2306 struct kiocb kiocb; 2307 ssize_t ret; 2308 2309 init_sync_kiocb(&kiocb, filp); 2310 ret = __generic_file_aio_read(&kiocb, iov, nr_segs, ppos); 2311 if (-EIOCBQUEUED == ret) 2312 ret = wait_on_sync_kiocb(&kiocb); 2313 return ret; 2314 } 2315 EXPORT_SYMBOL(generic_file_readv); 2316 2317 ssize_t generic_file_writev(struct file *file, const struct iovec *iov, 2318 unsigned long nr_segs, loff_t *ppos) 2319 { 2320 struct address_space *mapping = file->f_mapping; 2321 struct inode *inode = mapping->host; 2322 ssize_t ret; 2323 2324 mutex_lock(&inode->i_mutex); 2325 ret = __generic_file_write_nolock(file, iov, nr_segs, ppos); 2326 mutex_unlock(&inode->i_mutex); 2327 2328 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { 2329 int err; 2330 2331 err = sync_page_range(inode, mapping, *ppos - ret, ret); 2332 if (err < 0) 2333 ret = err; 2334 } 2335 return ret; 2336 } 2337 EXPORT_SYMBOL(generic_file_writev); 2338 2339 /* 2340 * Called under i_mutex for writes to S_ISREG files. Returns -EIO if something 2341 * went wrong during pagecache shootdown. 2342 */ 2343 static ssize_t 2344 generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, 2345 loff_t offset, unsigned long nr_segs) 2346 { 2347 struct file *file = iocb->ki_filp; 2348 struct address_space *mapping = file->f_mapping; 2349 ssize_t retval; 2350 size_t write_len = 0; 2351 2352 /* 2353 * If it's a write, unmap all mmappings of the file up-front. This 2354 * will cause any pte dirty bits to be propagated into the pageframes 2355 * for the subsequent filemap_write_and_wait(). 2356 */ 2357 if (rw == WRITE) { 2358 write_len = iov_length(iov, nr_segs); 2359 if (mapping_mapped(mapping)) 2360 unmap_mapping_range(mapping, offset, write_len, 0); 2361 } 2362 2363 retval = filemap_write_and_wait(mapping); 2364 if (retval == 0) { 2365 retval = mapping->a_ops->direct_IO(rw, iocb, iov, 2366 offset, nr_segs); 2367 if (rw == WRITE && mapping->nrpages) { 2368 pgoff_t end = (offset + write_len - 1) 2369 >> PAGE_CACHE_SHIFT; 2370 int err = invalidate_inode_pages2_range(mapping, 2371 offset >> PAGE_CACHE_SHIFT, end); 2372 if (err) 2373 retval = err; 2374 } 2375 } 2376 return retval; 2377 } 2378