1 /* 2 * linux/mm/filemap.c 3 * 4 * Copyright (C) 1994-1999 Linus Torvalds 5 */ 6 7 /* 8 * This file handles the generic file mmap semantics used by 9 * most "normal" filesystems (but you don't /have/ to use this: 10 * the NFS filesystem used to do this differently, for example) 11 */ 12 #include <linux/export.h> 13 #include <linux/compiler.h> 14 #include <linux/fs.h> 15 #include <linux/uaccess.h> 16 #include <linux/capability.h> 17 #include <linux/kernel_stat.h> 18 #include <linux/gfp.h> 19 #include <linux/mm.h> 20 #include <linux/swap.h> 21 #include <linux/mman.h> 22 #include <linux/pagemap.h> 23 #include <linux/file.h> 24 #include <linux/uio.h> 25 #include <linux/hash.h> 26 #include <linux/writeback.h> 27 #include <linux/backing-dev.h> 28 #include <linux/pagevec.h> 29 #include <linux/blkdev.h> 30 #include <linux/security.h> 31 #include <linux/cpuset.h> 32 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */ 33 #include <linux/hugetlb.h> 34 #include <linux/memcontrol.h> 35 #include <linux/cleancache.h> 36 #include <linux/rmap.h> 37 #include "internal.h" 38 39 #define CREATE_TRACE_POINTS 40 #include <trace/events/filemap.h> 41 42 /* 43 * FIXME: remove all knowledge of the buffer layer from the core VM 44 */ 45 #include <linux/buffer_head.h> /* for try_to_free_buffers */ 46 47 #include <asm/mman.h> 48 49 /* 50 * Shared mappings implemented 30.11.1994. It's not fully working yet, 51 * though. 52 * 53 * Shared mappings now work. 15.8.1995 Bruno. 54 * 55 * finished 'unifying' the page and buffer cache and SMP-threaded the 56 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> 57 * 58 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> 59 */ 60 61 /* 62 * Lock ordering: 63 * 64 * ->i_mmap_rwsem (truncate_pagecache) 65 * ->private_lock (__free_pte->__set_page_dirty_buffers) 66 * ->swap_lock (exclusive_swap_page, others) 67 * ->mapping->tree_lock 68 * 69 * ->i_mutex 70 * ->i_mmap_rwsem (truncate->unmap_mapping_range) 71 * 72 * ->mmap_sem 73 * ->i_mmap_rwsem 74 * ->page_table_lock or pte_lock (various, mainly in memory.c) 75 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) 76 * 77 * ->mmap_sem 78 * ->lock_page (access_process_vm) 79 * 80 * ->i_mutex (generic_perform_write) 81 * ->mmap_sem (fault_in_pages_readable->do_page_fault) 82 * 83 * bdi->wb.list_lock 84 * sb_lock (fs/fs-writeback.c) 85 * ->mapping->tree_lock (__sync_single_inode) 86 * 87 * ->i_mmap_rwsem 88 * ->anon_vma.lock (vma_adjust) 89 * 90 * ->anon_vma.lock 91 * ->page_table_lock or pte_lock (anon_vma_prepare and various) 92 * 93 * ->page_table_lock or pte_lock 94 * ->swap_lock (try_to_unmap_one) 95 * ->private_lock (try_to_unmap_one) 96 * ->tree_lock (try_to_unmap_one) 97 * ->zone.lru_lock (follow_page->mark_page_accessed) 98 * ->zone.lru_lock (check_pte_range->isolate_lru_page) 99 * ->private_lock (page_remove_rmap->set_page_dirty) 100 * ->tree_lock (page_remove_rmap->set_page_dirty) 101 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) 102 * ->inode->i_lock (page_remove_rmap->set_page_dirty) 103 * bdi.wb->list_lock (zap_pte_range->set_page_dirty) 104 * ->inode->i_lock (zap_pte_range->set_page_dirty) 105 * ->private_lock (zap_pte_range->__set_page_dirty_buffers) 106 * 107 * ->i_mmap_rwsem 108 * ->tasklist_lock (memory_failure, collect_procs_ao) 109 */ 110 111 static void page_cache_tree_delete(struct address_space *mapping, 112 struct page *page, void *shadow) 113 { 114 struct radix_tree_node *node; 115 unsigned long index; 116 unsigned int offset; 117 unsigned int tag; 118 void **slot; 119 120 VM_BUG_ON(!PageLocked(page)); 121 122 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot); 123 124 if (shadow) { 125 mapping->nrshadows++; 126 /* 127 * Make sure the nrshadows update is committed before 128 * the nrpages update so that final truncate racing 129 * with reclaim does not see both counters 0 at the 130 * same time and miss a shadow entry. 131 */ 132 smp_wmb(); 133 } 134 mapping->nrpages--; 135 136 if (!node) { 137 /* Clear direct pointer tags in root node */ 138 mapping->page_tree.gfp_mask &= __GFP_BITS_MASK; 139 radix_tree_replace_slot(slot, shadow); 140 return; 141 } 142 143 /* Clear tree tags for the removed page */ 144 index = page->index; 145 offset = index & RADIX_TREE_MAP_MASK; 146 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { 147 if (test_bit(offset, node->tags[tag])) 148 radix_tree_tag_clear(&mapping->page_tree, index, tag); 149 } 150 151 /* Delete page, swap shadow entry */ 152 radix_tree_replace_slot(slot, shadow); 153 workingset_node_pages_dec(node); 154 if (shadow) 155 workingset_node_shadows_inc(node); 156 else 157 if (__radix_tree_delete_node(&mapping->page_tree, node)) 158 return; 159 160 /* 161 * Track node that only contains shadow entries. 162 * 163 * Avoid acquiring the list_lru lock if already tracked. The 164 * list_empty() test is safe as node->private_list is 165 * protected by mapping->tree_lock. 166 */ 167 if (!workingset_node_pages(node) && 168 list_empty(&node->private_list)) { 169 node->private_data = mapping; 170 list_lru_add(&workingset_shadow_nodes, &node->private_list); 171 } 172 } 173 174 /* 175 * Delete a page from the page cache and free it. Caller has to make 176 * sure the page is locked and that nobody else uses it - or that usage 177 * is safe. The caller must hold the mapping's tree_lock. 178 */ 179 void __delete_from_page_cache(struct page *page, void *shadow) 180 { 181 struct address_space *mapping = page->mapping; 182 183 trace_mm_filemap_delete_from_page_cache(page); 184 /* 185 * if we're uptodate, flush out into the cleancache, otherwise 186 * invalidate any existing cleancache entries. We can't leave 187 * stale data around in the cleancache once our page is gone 188 */ 189 if (PageUptodate(page) && PageMappedToDisk(page)) 190 cleancache_put_page(page); 191 else 192 cleancache_invalidate_page(mapping, page); 193 194 page_cache_tree_delete(mapping, page, shadow); 195 196 page->mapping = NULL; 197 /* Leave page->index set: truncation lookup relies upon it */ 198 199 /* hugetlb pages do not participate in page cache accounting. */ 200 if (!PageHuge(page)) 201 __dec_zone_page_state(page, NR_FILE_PAGES); 202 if (PageSwapBacked(page)) 203 __dec_zone_page_state(page, NR_SHMEM); 204 BUG_ON(page_mapped(page)); 205 206 /* 207 * At this point page must be either written or cleaned by truncate. 208 * Dirty page here signals a bug and loss of unwritten data. 209 * 210 * This fixes dirty accounting after removing the page entirely but 211 * leaves PageDirty set: it has no effect for truncated page and 212 * anyway will be cleared before returning page into buddy allocator. 213 */ 214 if (WARN_ON_ONCE(PageDirty(page))) 215 account_page_cleaned(page, mapping); 216 } 217 218 /** 219 * delete_from_page_cache - delete page from page cache 220 * @page: the page which the kernel is trying to remove from page cache 221 * 222 * This must be called only on pages that have been verified to be in the page 223 * cache and locked. It will never put the page into the free list, the caller 224 * has a reference on the page. 225 */ 226 void delete_from_page_cache(struct page *page) 227 { 228 struct address_space *mapping = page->mapping; 229 void (*freepage)(struct page *); 230 231 BUG_ON(!PageLocked(page)); 232 233 freepage = mapping->a_ops->freepage; 234 spin_lock_irq(&mapping->tree_lock); 235 __delete_from_page_cache(page, NULL); 236 spin_unlock_irq(&mapping->tree_lock); 237 238 if (freepage) 239 freepage(page); 240 page_cache_release(page); 241 } 242 EXPORT_SYMBOL(delete_from_page_cache); 243 244 static int filemap_check_errors(struct address_space *mapping) 245 { 246 int ret = 0; 247 /* Check for outstanding write errors */ 248 if (test_bit(AS_ENOSPC, &mapping->flags) && 249 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) 250 ret = -ENOSPC; 251 if (test_bit(AS_EIO, &mapping->flags) && 252 test_and_clear_bit(AS_EIO, &mapping->flags)) 253 ret = -EIO; 254 return ret; 255 } 256 257 /** 258 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range 259 * @mapping: address space structure to write 260 * @start: offset in bytes where the range starts 261 * @end: offset in bytes where the range ends (inclusive) 262 * @sync_mode: enable synchronous operation 263 * 264 * Start writeback against all of a mapping's dirty pages that lie 265 * within the byte offsets <start, end> inclusive. 266 * 267 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as 268 * opposed to a regular memory cleansing writeback. The difference between 269 * these two operations is that if a dirty page/buffer is encountered, it must 270 * be waited upon, and not just skipped over. 271 */ 272 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 273 loff_t end, int sync_mode) 274 { 275 int ret; 276 struct writeback_control wbc = { 277 .sync_mode = sync_mode, 278 .nr_to_write = LONG_MAX, 279 .range_start = start, 280 .range_end = end, 281 }; 282 283 if (!mapping_cap_writeback_dirty(mapping)) 284 return 0; 285 286 ret = do_writepages(mapping, &wbc); 287 return ret; 288 } 289 290 static inline int __filemap_fdatawrite(struct address_space *mapping, 291 int sync_mode) 292 { 293 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); 294 } 295 296 int filemap_fdatawrite(struct address_space *mapping) 297 { 298 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); 299 } 300 EXPORT_SYMBOL(filemap_fdatawrite); 301 302 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 303 loff_t end) 304 { 305 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); 306 } 307 EXPORT_SYMBOL(filemap_fdatawrite_range); 308 309 /** 310 * filemap_flush - mostly a non-blocking flush 311 * @mapping: target address_space 312 * 313 * This is a mostly non-blocking flush. Not suitable for data-integrity 314 * purposes - I/O may not be started against all dirty pages. 315 */ 316 int filemap_flush(struct address_space *mapping) 317 { 318 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); 319 } 320 EXPORT_SYMBOL(filemap_flush); 321 322 /** 323 * filemap_fdatawait_range - wait for writeback to complete 324 * @mapping: address space structure to wait for 325 * @start_byte: offset in bytes where the range starts 326 * @end_byte: offset in bytes where the range ends (inclusive) 327 * 328 * Walk the list of under-writeback pages of the given address space 329 * in the given range and wait for all of them. 330 */ 331 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, 332 loff_t end_byte) 333 { 334 pgoff_t index = start_byte >> PAGE_CACHE_SHIFT; 335 pgoff_t end = end_byte >> PAGE_CACHE_SHIFT; 336 struct pagevec pvec; 337 int nr_pages; 338 int ret2, ret = 0; 339 340 if (end_byte < start_byte) 341 goto out; 342 343 pagevec_init(&pvec, 0); 344 while ((index <= end) && 345 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 346 PAGECACHE_TAG_WRITEBACK, 347 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { 348 unsigned i; 349 350 for (i = 0; i < nr_pages; i++) { 351 struct page *page = pvec.pages[i]; 352 353 /* until radix tree lookup accepts end_index */ 354 if (page->index > end) 355 continue; 356 357 wait_on_page_writeback(page); 358 if (TestClearPageError(page)) 359 ret = -EIO; 360 } 361 pagevec_release(&pvec); 362 cond_resched(); 363 } 364 out: 365 ret2 = filemap_check_errors(mapping); 366 if (!ret) 367 ret = ret2; 368 369 return ret; 370 } 371 EXPORT_SYMBOL(filemap_fdatawait_range); 372 373 /** 374 * filemap_fdatawait - wait for all under-writeback pages to complete 375 * @mapping: address space structure to wait for 376 * 377 * Walk the list of under-writeback pages of the given address space 378 * and wait for all of them. 379 */ 380 int filemap_fdatawait(struct address_space *mapping) 381 { 382 loff_t i_size = i_size_read(mapping->host); 383 384 if (i_size == 0) 385 return 0; 386 387 return filemap_fdatawait_range(mapping, 0, i_size - 1); 388 } 389 EXPORT_SYMBOL(filemap_fdatawait); 390 391 int filemap_write_and_wait(struct address_space *mapping) 392 { 393 int err = 0; 394 395 if (mapping->nrpages) { 396 err = filemap_fdatawrite(mapping); 397 /* 398 * Even if the above returned error, the pages may be 399 * written partially (e.g. -ENOSPC), so we wait for it. 400 * But the -EIO is special case, it may indicate the worst 401 * thing (e.g. bug) happened, so we avoid waiting for it. 402 */ 403 if (err != -EIO) { 404 int err2 = filemap_fdatawait(mapping); 405 if (!err) 406 err = err2; 407 } 408 } else { 409 err = filemap_check_errors(mapping); 410 } 411 return err; 412 } 413 EXPORT_SYMBOL(filemap_write_and_wait); 414 415 /** 416 * filemap_write_and_wait_range - write out & wait on a file range 417 * @mapping: the address_space for the pages 418 * @lstart: offset in bytes where the range starts 419 * @lend: offset in bytes where the range ends (inclusive) 420 * 421 * Write out and wait upon file offsets lstart->lend, inclusive. 422 * 423 * Note that `lend' is inclusive (describes the last byte to be written) so 424 * that this function can be used to write to the very end-of-file (end = -1). 425 */ 426 int filemap_write_and_wait_range(struct address_space *mapping, 427 loff_t lstart, loff_t lend) 428 { 429 int err = 0; 430 431 if (mapping->nrpages) { 432 err = __filemap_fdatawrite_range(mapping, lstart, lend, 433 WB_SYNC_ALL); 434 /* See comment of filemap_write_and_wait() */ 435 if (err != -EIO) { 436 int err2 = filemap_fdatawait_range(mapping, 437 lstart, lend); 438 if (!err) 439 err = err2; 440 } 441 } else { 442 err = filemap_check_errors(mapping); 443 } 444 return err; 445 } 446 EXPORT_SYMBOL(filemap_write_and_wait_range); 447 448 /** 449 * replace_page_cache_page - replace a pagecache page with a new one 450 * @old: page to be replaced 451 * @new: page to replace with 452 * @gfp_mask: allocation mode 453 * 454 * This function replaces a page in the pagecache with a new one. On 455 * success it acquires the pagecache reference for the new page and 456 * drops it for the old page. Both the old and new pages must be 457 * locked. This function does not add the new page to the LRU, the 458 * caller must do that. 459 * 460 * The remove + add is atomic. The only way this function can fail is 461 * memory allocation failure. 462 */ 463 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) 464 { 465 int error; 466 467 VM_BUG_ON_PAGE(!PageLocked(old), old); 468 VM_BUG_ON_PAGE(!PageLocked(new), new); 469 VM_BUG_ON_PAGE(new->mapping, new); 470 471 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); 472 if (!error) { 473 struct address_space *mapping = old->mapping; 474 void (*freepage)(struct page *); 475 476 pgoff_t offset = old->index; 477 freepage = mapping->a_ops->freepage; 478 479 page_cache_get(new); 480 new->mapping = mapping; 481 new->index = offset; 482 483 spin_lock_irq(&mapping->tree_lock); 484 __delete_from_page_cache(old, NULL); 485 error = radix_tree_insert(&mapping->page_tree, offset, new); 486 BUG_ON(error); 487 mapping->nrpages++; 488 489 /* 490 * hugetlb pages do not participate in page cache accounting. 491 */ 492 if (!PageHuge(new)) 493 __inc_zone_page_state(new, NR_FILE_PAGES); 494 if (PageSwapBacked(new)) 495 __inc_zone_page_state(new, NR_SHMEM); 496 spin_unlock_irq(&mapping->tree_lock); 497 mem_cgroup_migrate(old, new, true); 498 radix_tree_preload_end(); 499 if (freepage) 500 freepage(old); 501 page_cache_release(old); 502 } 503 504 return error; 505 } 506 EXPORT_SYMBOL_GPL(replace_page_cache_page); 507 508 static int page_cache_tree_insert(struct address_space *mapping, 509 struct page *page, void **shadowp) 510 { 511 struct radix_tree_node *node; 512 void **slot; 513 int error; 514 515 error = __radix_tree_create(&mapping->page_tree, page->index, 516 &node, &slot); 517 if (error) 518 return error; 519 if (*slot) { 520 void *p; 521 522 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 523 if (!radix_tree_exceptional_entry(p)) 524 return -EEXIST; 525 if (shadowp) 526 *shadowp = p; 527 mapping->nrshadows--; 528 if (node) 529 workingset_node_shadows_dec(node); 530 } 531 radix_tree_replace_slot(slot, page); 532 mapping->nrpages++; 533 if (node) { 534 workingset_node_pages_inc(node); 535 /* 536 * Don't track node that contains actual pages. 537 * 538 * Avoid acquiring the list_lru lock if already 539 * untracked. The list_empty() test is safe as 540 * node->private_list is protected by 541 * mapping->tree_lock. 542 */ 543 if (!list_empty(&node->private_list)) 544 list_lru_del(&workingset_shadow_nodes, 545 &node->private_list); 546 } 547 return 0; 548 } 549 550 static int __add_to_page_cache_locked(struct page *page, 551 struct address_space *mapping, 552 pgoff_t offset, gfp_t gfp_mask, 553 void **shadowp) 554 { 555 int huge = PageHuge(page); 556 struct mem_cgroup *memcg; 557 int error; 558 559 VM_BUG_ON_PAGE(!PageLocked(page), page); 560 VM_BUG_ON_PAGE(PageSwapBacked(page), page); 561 562 if (!huge) { 563 error = mem_cgroup_try_charge(page, current->mm, 564 gfp_mask, &memcg); 565 if (error) 566 return error; 567 } 568 569 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); 570 if (error) { 571 if (!huge) 572 mem_cgroup_cancel_charge(page, memcg); 573 return error; 574 } 575 576 page_cache_get(page); 577 page->mapping = mapping; 578 page->index = offset; 579 580 spin_lock_irq(&mapping->tree_lock); 581 error = page_cache_tree_insert(mapping, page, shadowp); 582 radix_tree_preload_end(); 583 if (unlikely(error)) 584 goto err_insert; 585 586 /* hugetlb pages do not participate in page cache accounting. */ 587 if (!huge) 588 __inc_zone_page_state(page, NR_FILE_PAGES); 589 spin_unlock_irq(&mapping->tree_lock); 590 if (!huge) 591 mem_cgroup_commit_charge(page, memcg, false); 592 trace_mm_filemap_add_to_page_cache(page); 593 return 0; 594 err_insert: 595 page->mapping = NULL; 596 /* Leave page->index set: truncation relies upon it */ 597 spin_unlock_irq(&mapping->tree_lock); 598 if (!huge) 599 mem_cgroup_cancel_charge(page, memcg); 600 page_cache_release(page); 601 return error; 602 } 603 604 /** 605 * add_to_page_cache_locked - add a locked page to the pagecache 606 * @page: page to add 607 * @mapping: the page's address_space 608 * @offset: page index 609 * @gfp_mask: page allocation mode 610 * 611 * This function is used to add a page to the pagecache. It must be locked. 612 * This function does not add the page to the LRU. The caller must do that. 613 */ 614 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, 615 pgoff_t offset, gfp_t gfp_mask) 616 { 617 return __add_to_page_cache_locked(page, mapping, offset, 618 gfp_mask, NULL); 619 } 620 EXPORT_SYMBOL(add_to_page_cache_locked); 621 622 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 623 pgoff_t offset, gfp_t gfp_mask) 624 { 625 void *shadow = NULL; 626 int ret; 627 628 __set_page_locked(page); 629 ret = __add_to_page_cache_locked(page, mapping, offset, 630 gfp_mask, &shadow); 631 if (unlikely(ret)) 632 __clear_page_locked(page); 633 else { 634 /* 635 * The page might have been evicted from cache only 636 * recently, in which case it should be activated like 637 * any other repeatedly accessed page. 638 */ 639 if (shadow && workingset_refault(shadow)) { 640 SetPageActive(page); 641 workingset_activation(page); 642 } else 643 ClearPageActive(page); 644 lru_cache_add(page); 645 } 646 return ret; 647 } 648 EXPORT_SYMBOL_GPL(add_to_page_cache_lru); 649 650 #ifdef CONFIG_NUMA 651 struct page *__page_cache_alloc(gfp_t gfp) 652 { 653 int n; 654 struct page *page; 655 656 if (cpuset_do_page_mem_spread()) { 657 unsigned int cpuset_mems_cookie; 658 do { 659 cpuset_mems_cookie = read_mems_allowed_begin(); 660 n = cpuset_mem_spread_node(); 661 page = alloc_pages_exact_node(n, gfp, 0); 662 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); 663 664 return page; 665 } 666 return alloc_pages(gfp, 0); 667 } 668 EXPORT_SYMBOL(__page_cache_alloc); 669 #endif 670 671 /* 672 * In order to wait for pages to become available there must be 673 * waitqueues associated with pages. By using a hash table of 674 * waitqueues where the bucket discipline is to maintain all 675 * waiters on the same queue and wake all when any of the pages 676 * become available, and for the woken contexts to check to be 677 * sure the appropriate page became available, this saves space 678 * at a cost of "thundering herd" phenomena during rare hash 679 * collisions. 680 */ 681 wait_queue_head_t *page_waitqueue(struct page *page) 682 { 683 const struct zone *zone = page_zone(page); 684 685 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; 686 } 687 EXPORT_SYMBOL(page_waitqueue); 688 689 void wait_on_page_bit(struct page *page, int bit_nr) 690 { 691 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 692 693 if (test_bit(bit_nr, &page->flags)) 694 __wait_on_bit(page_waitqueue(page), &wait, bit_wait_io, 695 TASK_UNINTERRUPTIBLE); 696 } 697 EXPORT_SYMBOL(wait_on_page_bit); 698 699 int wait_on_page_bit_killable(struct page *page, int bit_nr) 700 { 701 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 702 703 if (!test_bit(bit_nr, &page->flags)) 704 return 0; 705 706 return __wait_on_bit(page_waitqueue(page), &wait, 707 bit_wait_io, TASK_KILLABLE); 708 } 709 710 int wait_on_page_bit_killable_timeout(struct page *page, 711 int bit_nr, unsigned long timeout) 712 { 713 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 714 715 wait.key.timeout = jiffies + timeout; 716 if (!test_bit(bit_nr, &page->flags)) 717 return 0; 718 return __wait_on_bit(page_waitqueue(page), &wait, 719 bit_wait_io_timeout, TASK_KILLABLE); 720 } 721 EXPORT_SYMBOL_GPL(wait_on_page_bit_killable_timeout); 722 723 /** 724 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue 725 * @page: Page defining the wait queue of interest 726 * @waiter: Waiter to add to the queue 727 * 728 * Add an arbitrary @waiter to the wait queue for the nominated @page. 729 */ 730 void add_page_wait_queue(struct page *page, wait_queue_t *waiter) 731 { 732 wait_queue_head_t *q = page_waitqueue(page); 733 unsigned long flags; 734 735 spin_lock_irqsave(&q->lock, flags); 736 __add_wait_queue(q, waiter); 737 spin_unlock_irqrestore(&q->lock, flags); 738 } 739 EXPORT_SYMBOL_GPL(add_page_wait_queue); 740 741 /** 742 * unlock_page - unlock a locked page 743 * @page: the page 744 * 745 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). 746 * Also wakes sleepers in wait_on_page_writeback() because the wakeup 747 * mechanism between PageLocked pages and PageWriteback pages is shared. 748 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. 749 * 750 * The mb is necessary to enforce ordering between the clear_bit and the read 751 * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()). 752 */ 753 void unlock_page(struct page *page) 754 { 755 VM_BUG_ON_PAGE(!PageLocked(page), page); 756 clear_bit_unlock(PG_locked, &page->flags); 757 smp_mb__after_atomic(); 758 wake_up_page(page, PG_locked); 759 } 760 EXPORT_SYMBOL(unlock_page); 761 762 /** 763 * end_page_writeback - end writeback against a page 764 * @page: the page 765 */ 766 void end_page_writeback(struct page *page) 767 { 768 /* 769 * TestClearPageReclaim could be used here but it is an atomic 770 * operation and overkill in this particular case. Failing to 771 * shuffle a page marked for immediate reclaim is too mild to 772 * justify taking an atomic operation penalty at the end of 773 * ever page writeback. 774 */ 775 if (PageReclaim(page)) { 776 ClearPageReclaim(page); 777 rotate_reclaimable_page(page); 778 } 779 780 if (!test_clear_page_writeback(page)) 781 BUG(); 782 783 smp_mb__after_atomic(); 784 wake_up_page(page, PG_writeback); 785 } 786 EXPORT_SYMBOL(end_page_writeback); 787 788 /* 789 * After completing I/O on a page, call this routine to update the page 790 * flags appropriately 791 */ 792 void page_endio(struct page *page, int rw, int err) 793 { 794 if (rw == READ) { 795 if (!err) { 796 SetPageUptodate(page); 797 } else { 798 ClearPageUptodate(page); 799 SetPageError(page); 800 } 801 unlock_page(page); 802 } else { /* rw == WRITE */ 803 if (err) { 804 SetPageError(page); 805 if (page->mapping) 806 mapping_set_error(page->mapping, err); 807 } 808 end_page_writeback(page); 809 } 810 } 811 EXPORT_SYMBOL_GPL(page_endio); 812 813 /** 814 * __lock_page - get a lock on the page, assuming we need to sleep to get it 815 * @page: the page to lock 816 */ 817 void __lock_page(struct page *page) 818 { 819 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 820 821 __wait_on_bit_lock(page_waitqueue(page), &wait, bit_wait_io, 822 TASK_UNINTERRUPTIBLE); 823 } 824 EXPORT_SYMBOL(__lock_page); 825 826 int __lock_page_killable(struct page *page) 827 { 828 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 829 830 return __wait_on_bit_lock(page_waitqueue(page), &wait, 831 bit_wait_io, TASK_KILLABLE); 832 } 833 EXPORT_SYMBOL_GPL(__lock_page_killable); 834 835 /* 836 * Return values: 837 * 1 - page is locked; mmap_sem is still held. 838 * 0 - page is not locked. 839 * mmap_sem has been released (up_read()), unless flags had both 840 * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in 841 * which case mmap_sem is still held. 842 * 843 * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1 844 * with the page locked and the mmap_sem unperturbed. 845 */ 846 int __lock_page_or_retry(struct page *page, struct mm_struct *mm, 847 unsigned int flags) 848 { 849 if (flags & FAULT_FLAG_ALLOW_RETRY) { 850 /* 851 * CAUTION! In this case, mmap_sem is not released 852 * even though return 0. 853 */ 854 if (flags & FAULT_FLAG_RETRY_NOWAIT) 855 return 0; 856 857 up_read(&mm->mmap_sem); 858 if (flags & FAULT_FLAG_KILLABLE) 859 wait_on_page_locked_killable(page); 860 else 861 wait_on_page_locked(page); 862 return 0; 863 } else { 864 if (flags & FAULT_FLAG_KILLABLE) { 865 int ret; 866 867 ret = __lock_page_killable(page); 868 if (ret) { 869 up_read(&mm->mmap_sem); 870 return 0; 871 } 872 } else 873 __lock_page(page); 874 return 1; 875 } 876 } 877 878 /** 879 * page_cache_next_hole - find the next hole (not-present entry) 880 * @mapping: mapping 881 * @index: index 882 * @max_scan: maximum range to search 883 * 884 * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the 885 * lowest indexed hole. 886 * 887 * Returns: the index of the hole if found, otherwise returns an index 888 * outside of the set specified (in which case 'return - index >= 889 * max_scan' will be true). In rare cases of index wrap-around, 0 will 890 * be returned. 891 * 892 * page_cache_next_hole may be called under rcu_read_lock. However, 893 * like radix_tree_gang_lookup, this will not atomically search a 894 * snapshot of the tree at a single point in time. For example, if a 895 * hole is created at index 5, then subsequently a hole is created at 896 * index 10, page_cache_next_hole covering both indexes may return 10 897 * if called under rcu_read_lock. 898 */ 899 pgoff_t page_cache_next_hole(struct address_space *mapping, 900 pgoff_t index, unsigned long max_scan) 901 { 902 unsigned long i; 903 904 for (i = 0; i < max_scan; i++) { 905 struct page *page; 906 907 page = radix_tree_lookup(&mapping->page_tree, index); 908 if (!page || radix_tree_exceptional_entry(page)) 909 break; 910 index++; 911 if (index == 0) 912 break; 913 } 914 915 return index; 916 } 917 EXPORT_SYMBOL(page_cache_next_hole); 918 919 /** 920 * page_cache_prev_hole - find the prev hole (not-present entry) 921 * @mapping: mapping 922 * @index: index 923 * @max_scan: maximum range to search 924 * 925 * Search backwards in the range [max(index-max_scan+1, 0), index] for 926 * the first hole. 927 * 928 * Returns: the index of the hole if found, otherwise returns an index 929 * outside of the set specified (in which case 'index - return >= 930 * max_scan' will be true). In rare cases of wrap-around, ULONG_MAX 931 * will be returned. 932 * 933 * page_cache_prev_hole may be called under rcu_read_lock. However, 934 * like radix_tree_gang_lookup, this will not atomically search a 935 * snapshot of the tree at a single point in time. For example, if a 936 * hole is created at index 10, then subsequently a hole is created at 937 * index 5, page_cache_prev_hole covering both indexes may return 5 if 938 * called under rcu_read_lock. 939 */ 940 pgoff_t page_cache_prev_hole(struct address_space *mapping, 941 pgoff_t index, unsigned long max_scan) 942 { 943 unsigned long i; 944 945 for (i = 0; i < max_scan; i++) { 946 struct page *page; 947 948 page = radix_tree_lookup(&mapping->page_tree, index); 949 if (!page || radix_tree_exceptional_entry(page)) 950 break; 951 index--; 952 if (index == ULONG_MAX) 953 break; 954 } 955 956 return index; 957 } 958 EXPORT_SYMBOL(page_cache_prev_hole); 959 960 /** 961 * find_get_entry - find and get a page cache entry 962 * @mapping: the address_space to search 963 * @offset: the page cache index 964 * 965 * Looks up the page cache slot at @mapping & @offset. If there is a 966 * page cache page, it is returned with an increased refcount. 967 * 968 * If the slot holds a shadow entry of a previously evicted page, or a 969 * swap entry from shmem/tmpfs, it is returned. 970 * 971 * Otherwise, %NULL is returned. 972 */ 973 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) 974 { 975 void **pagep; 976 struct page *page; 977 978 rcu_read_lock(); 979 repeat: 980 page = NULL; 981 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); 982 if (pagep) { 983 page = radix_tree_deref_slot(pagep); 984 if (unlikely(!page)) 985 goto out; 986 if (radix_tree_exception(page)) { 987 if (radix_tree_deref_retry(page)) 988 goto repeat; 989 /* 990 * A shadow entry of a recently evicted page, 991 * or a swap entry from shmem/tmpfs. Return 992 * it without attempting to raise page count. 993 */ 994 goto out; 995 } 996 if (!page_cache_get_speculative(page)) 997 goto repeat; 998 999 /* 1000 * Has the page moved? 1001 * This is part of the lockless pagecache protocol. See 1002 * include/linux/pagemap.h for details. 1003 */ 1004 if (unlikely(page != *pagep)) { 1005 page_cache_release(page); 1006 goto repeat; 1007 } 1008 } 1009 out: 1010 rcu_read_unlock(); 1011 1012 return page; 1013 } 1014 EXPORT_SYMBOL(find_get_entry); 1015 1016 /** 1017 * find_lock_entry - locate, pin and lock a page cache entry 1018 * @mapping: the address_space to search 1019 * @offset: the page cache index 1020 * 1021 * Looks up the page cache slot at @mapping & @offset. If there is a 1022 * page cache page, it is returned locked and with an increased 1023 * refcount. 1024 * 1025 * If the slot holds a shadow entry of a previously evicted page, or a 1026 * swap entry from shmem/tmpfs, it is returned. 1027 * 1028 * Otherwise, %NULL is returned. 1029 * 1030 * find_lock_entry() may sleep. 1031 */ 1032 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset) 1033 { 1034 struct page *page; 1035 1036 repeat: 1037 page = find_get_entry(mapping, offset); 1038 if (page && !radix_tree_exception(page)) { 1039 lock_page(page); 1040 /* Has the page been truncated? */ 1041 if (unlikely(page->mapping != mapping)) { 1042 unlock_page(page); 1043 page_cache_release(page); 1044 goto repeat; 1045 } 1046 VM_BUG_ON_PAGE(page->index != offset, page); 1047 } 1048 return page; 1049 } 1050 EXPORT_SYMBOL(find_lock_entry); 1051 1052 /** 1053 * pagecache_get_page - find and get a page reference 1054 * @mapping: the address_space to search 1055 * @offset: the page index 1056 * @fgp_flags: PCG flags 1057 * @gfp_mask: gfp mask to use for the page cache data page allocation 1058 * 1059 * Looks up the page cache slot at @mapping & @offset. 1060 * 1061 * PCG flags modify how the page is returned. 1062 * 1063 * FGP_ACCESSED: the page will be marked accessed 1064 * FGP_LOCK: Page is return locked 1065 * FGP_CREAT: If page is not present then a new page is allocated using 1066 * @gfp_mask and added to the page cache and the VM's LRU 1067 * list. The page is returned locked and with an increased 1068 * refcount. Otherwise, %NULL is returned. 1069 * 1070 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even 1071 * if the GFP flags specified for FGP_CREAT are atomic. 1072 * 1073 * If there is a page cache page, it is returned with an increased refcount. 1074 */ 1075 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, 1076 int fgp_flags, gfp_t gfp_mask) 1077 { 1078 struct page *page; 1079 1080 repeat: 1081 page = find_get_entry(mapping, offset); 1082 if (radix_tree_exceptional_entry(page)) 1083 page = NULL; 1084 if (!page) 1085 goto no_page; 1086 1087 if (fgp_flags & FGP_LOCK) { 1088 if (fgp_flags & FGP_NOWAIT) { 1089 if (!trylock_page(page)) { 1090 page_cache_release(page); 1091 return NULL; 1092 } 1093 } else { 1094 lock_page(page); 1095 } 1096 1097 /* Has the page been truncated? */ 1098 if (unlikely(page->mapping != mapping)) { 1099 unlock_page(page); 1100 page_cache_release(page); 1101 goto repeat; 1102 } 1103 VM_BUG_ON_PAGE(page->index != offset, page); 1104 } 1105 1106 if (page && (fgp_flags & FGP_ACCESSED)) 1107 mark_page_accessed(page); 1108 1109 no_page: 1110 if (!page && (fgp_flags & FGP_CREAT)) { 1111 int err; 1112 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping)) 1113 gfp_mask |= __GFP_WRITE; 1114 if (fgp_flags & FGP_NOFS) 1115 gfp_mask &= ~__GFP_FS; 1116 1117 page = __page_cache_alloc(gfp_mask); 1118 if (!page) 1119 return NULL; 1120 1121 if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK))) 1122 fgp_flags |= FGP_LOCK; 1123 1124 /* Init accessed so avoid atomic mark_page_accessed later */ 1125 if (fgp_flags & FGP_ACCESSED) 1126 __SetPageReferenced(page); 1127 1128 err = add_to_page_cache_lru(page, mapping, offset, 1129 gfp_mask & GFP_RECLAIM_MASK); 1130 if (unlikely(err)) { 1131 page_cache_release(page); 1132 page = NULL; 1133 if (err == -EEXIST) 1134 goto repeat; 1135 } 1136 } 1137 1138 return page; 1139 } 1140 EXPORT_SYMBOL(pagecache_get_page); 1141 1142 /** 1143 * find_get_entries - gang pagecache lookup 1144 * @mapping: The address_space to search 1145 * @start: The starting page cache index 1146 * @nr_entries: The maximum number of entries 1147 * @entries: Where the resulting entries are placed 1148 * @indices: The cache indices corresponding to the entries in @entries 1149 * 1150 * find_get_entries() will search for and return a group of up to 1151 * @nr_entries entries in the mapping. The entries are placed at 1152 * @entries. find_get_entries() takes a reference against any actual 1153 * pages it returns. 1154 * 1155 * The search returns a group of mapping-contiguous page cache entries 1156 * with ascending indexes. There may be holes in the indices due to 1157 * not-present pages. 1158 * 1159 * Any shadow entries of evicted pages, or swap entries from 1160 * shmem/tmpfs, are included in the returned array. 1161 * 1162 * find_get_entries() returns the number of pages and shadow entries 1163 * which were found. 1164 */ 1165 unsigned find_get_entries(struct address_space *mapping, 1166 pgoff_t start, unsigned int nr_entries, 1167 struct page **entries, pgoff_t *indices) 1168 { 1169 void **slot; 1170 unsigned int ret = 0; 1171 struct radix_tree_iter iter; 1172 1173 if (!nr_entries) 1174 return 0; 1175 1176 rcu_read_lock(); 1177 restart: 1178 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 1179 struct page *page; 1180 repeat: 1181 page = radix_tree_deref_slot(slot); 1182 if (unlikely(!page)) 1183 continue; 1184 if (radix_tree_exception(page)) { 1185 if (radix_tree_deref_retry(page)) 1186 goto restart; 1187 /* 1188 * A shadow entry of a recently evicted page, 1189 * or a swap entry from shmem/tmpfs. Return 1190 * it without attempting to raise page count. 1191 */ 1192 goto export; 1193 } 1194 if (!page_cache_get_speculative(page)) 1195 goto repeat; 1196 1197 /* Has the page moved? */ 1198 if (unlikely(page != *slot)) { 1199 page_cache_release(page); 1200 goto repeat; 1201 } 1202 export: 1203 indices[ret] = iter.index; 1204 entries[ret] = page; 1205 if (++ret == nr_entries) 1206 break; 1207 } 1208 rcu_read_unlock(); 1209 return ret; 1210 } 1211 1212 /** 1213 * find_get_pages - gang pagecache lookup 1214 * @mapping: The address_space to search 1215 * @start: The starting page index 1216 * @nr_pages: The maximum number of pages 1217 * @pages: Where the resulting pages are placed 1218 * 1219 * find_get_pages() will search for and return a group of up to 1220 * @nr_pages pages in the mapping. The pages are placed at @pages. 1221 * find_get_pages() takes a reference against the returned pages. 1222 * 1223 * The search returns a group of mapping-contiguous pages with ascending 1224 * indexes. There may be holes in the indices due to not-present pages. 1225 * 1226 * find_get_pages() returns the number of pages which were found. 1227 */ 1228 unsigned find_get_pages(struct address_space *mapping, pgoff_t start, 1229 unsigned int nr_pages, struct page **pages) 1230 { 1231 struct radix_tree_iter iter; 1232 void **slot; 1233 unsigned ret = 0; 1234 1235 if (unlikely(!nr_pages)) 1236 return 0; 1237 1238 rcu_read_lock(); 1239 restart: 1240 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 1241 struct page *page; 1242 repeat: 1243 page = radix_tree_deref_slot(slot); 1244 if (unlikely(!page)) 1245 continue; 1246 1247 if (radix_tree_exception(page)) { 1248 if (radix_tree_deref_retry(page)) { 1249 /* 1250 * Transient condition which can only trigger 1251 * when entry at index 0 moves out of or back 1252 * to root: none yet gotten, safe to restart. 1253 */ 1254 WARN_ON(iter.index); 1255 goto restart; 1256 } 1257 /* 1258 * A shadow entry of a recently evicted page, 1259 * or a swap entry from shmem/tmpfs. Skip 1260 * over it. 1261 */ 1262 continue; 1263 } 1264 1265 if (!page_cache_get_speculative(page)) 1266 goto repeat; 1267 1268 /* Has the page moved? */ 1269 if (unlikely(page != *slot)) { 1270 page_cache_release(page); 1271 goto repeat; 1272 } 1273 1274 pages[ret] = page; 1275 if (++ret == nr_pages) 1276 break; 1277 } 1278 1279 rcu_read_unlock(); 1280 return ret; 1281 } 1282 1283 /** 1284 * find_get_pages_contig - gang contiguous pagecache lookup 1285 * @mapping: The address_space to search 1286 * @index: The starting page index 1287 * @nr_pages: The maximum number of pages 1288 * @pages: Where the resulting pages are placed 1289 * 1290 * find_get_pages_contig() works exactly like find_get_pages(), except 1291 * that the returned number of pages are guaranteed to be contiguous. 1292 * 1293 * find_get_pages_contig() returns the number of pages which were found. 1294 */ 1295 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, 1296 unsigned int nr_pages, struct page **pages) 1297 { 1298 struct radix_tree_iter iter; 1299 void **slot; 1300 unsigned int ret = 0; 1301 1302 if (unlikely(!nr_pages)) 1303 return 0; 1304 1305 rcu_read_lock(); 1306 restart: 1307 radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) { 1308 struct page *page; 1309 repeat: 1310 page = radix_tree_deref_slot(slot); 1311 /* The hole, there no reason to continue */ 1312 if (unlikely(!page)) 1313 break; 1314 1315 if (radix_tree_exception(page)) { 1316 if (radix_tree_deref_retry(page)) { 1317 /* 1318 * Transient condition which can only trigger 1319 * when entry at index 0 moves out of or back 1320 * to root: none yet gotten, safe to restart. 1321 */ 1322 goto restart; 1323 } 1324 /* 1325 * A shadow entry of a recently evicted page, 1326 * or a swap entry from shmem/tmpfs. Stop 1327 * looking for contiguous pages. 1328 */ 1329 break; 1330 } 1331 1332 if (!page_cache_get_speculative(page)) 1333 goto repeat; 1334 1335 /* Has the page moved? */ 1336 if (unlikely(page != *slot)) { 1337 page_cache_release(page); 1338 goto repeat; 1339 } 1340 1341 /* 1342 * must check mapping and index after taking the ref. 1343 * otherwise we can get both false positives and false 1344 * negatives, which is just confusing to the caller. 1345 */ 1346 if (page->mapping == NULL || page->index != iter.index) { 1347 page_cache_release(page); 1348 break; 1349 } 1350 1351 pages[ret] = page; 1352 if (++ret == nr_pages) 1353 break; 1354 } 1355 rcu_read_unlock(); 1356 return ret; 1357 } 1358 EXPORT_SYMBOL(find_get_pages_contig); 1359 1360 /** 1361 * find_get_pages_tag - find and return pages that match @tag 1362 * @mapping: the address_space to search 1363 * @index: the starting page index 1364 * @tag: the tag index 1365 * @nr_pages: the maximum number of pages 1366 * @pages: where the resulting pages are placed 1367 * 1368 * Like find_get_pages, except we only return pages which are tagged with 1369 * @tag. We update @index to index the next page for the traversal. 1370 */ 1371 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, 1372 int tag, unsigned int nr_pages, struct page **pages) 1373 { 1374 struct radix_tree_iter iter; 1375 void **slot; 1376 unsigned ret = 0; 1377 1378 if (unlikely(!nr_pages)) 1379 return 0; 1380 1381 rcu_read_lock(); 1382 restart: 1383 radix_tree_for_each_tagged(slot, &mapping->page_tree, 1384 &iter, *index, tag) { 1385 struct page *page; 1386 repeat: 1387 page = radix_tree_deref_slot(slot); 1388 if (unlikely(!page)) 1389 continue; 1390 1391 if (radix_tree_exception(page)) { 1392 if (radix_tree_deref_retry(page)) { 1393 /* 1394 * Transient condition which can only trigger 1395 * when entry at index 0 moves out of or back 1396 * to root: none yet gotten, safe to restart. 1397 */ 1398 goto restart; 1399 } 1400 /* 1401 * A shadow entry of a recently evicted page. 1402 * 1403 * Those entries should never be tagged, but 1404 * this tree walk is lockless and the tags are 1405 * looked up in bulk, one radix tree node at a 1406 * time, so there is a sizable window for page 1407 * reclaim to evict a page we saw tagged. 1408 * 1409 * Skip over it. 1410 */ 1411 continue; 1412 } 1413 1414 if (!page_cache_get_speculative(page)) 1415 goto repeat; 1416 1417 /* Has the page moved? */ 1418 if (unlikely(page != *slot)) { 1419 page_cache_release(page); 1420 goto repeat; 1421 } 1422 1423 pages[ret] = page; 1424 if (++ret == nr_pages) 1425 break; 1426 } 1427 1428 rcu_read_unlock(); 1429 1430 if (ret) 1431 *index = pages[ret - 1]->index + 1; 1432 1433 return ret; 1434 } 1435 EXPORT_SYMBOL(find_get_pages_tag); 1436 1437 /* 1438 * CD/DVDs are error prone. When a medium error occurs, the driver may fail 1439 * a _large_ part of the i/o request. Imagine the worst scenario: 1440 * 1441 * ---R__________________________________________B__________ 1442 * ^ reading here ^ bad block(assume 4k) 1443 * 1444 * read(R) => miss => readahead(R...B) => media error => frustrating retries 1445 * => failing the whole request => read(R) => read(R+1) => 1446 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => 1447 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => 1448 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... 1449 * 1450 * It is going insane. Fix it by quickly scaling down the readahead size. 1451 */ 1452 static void shrink_readahead_size_eio(struct file *filp, 1453 struct file_ra_state *ra) 1454 { 1455 ra->ra_pages /= 4; 1456 } 1457 1458 /** 1459 * do_generic_file_read - generic file read routine 1460 * @filp: the file to read 1461 * @ppos: current file position 1462 * @iter: data destination 1463 * @written: already copied 1464 * 1465 * This is a generic file read routine, and uses the 1466 * mapping->a_ops->readpage() function for the actual low-level stuff. 1467 * 1468 * This is really ugly. But the goto's actually try to clarify some 1469 * of the logic when it comes to error handling etc. 1470 */ 1471 static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos, 1472 struct iov_iter *iter, ssize_t written) 1473 { 1474 struct address_space *mapping = filp->f_mapping; 1475 struct inode *inode = mapping->host; 1476 struct file_ra_state *ra = &filp->f_ra; 1477 pgoff_t index; 1478 pgoff_t last_index; 1479 pgoff_t prev_index; 1480 unsigned long offset; /* offset into pagecache page */ 1481 unsigned int prev_offset; 1482 int error = 0; 1483 1484 index = *ppos >> PAGE_CACHE_SHIFT; 1485 prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT; 1486 prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1); 1487 last_index = (*ppos + iter->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; 1488 offset = *ppos & ~PAGE_CACHE_MASK; 1489 1490 for (;;) { 1491 struct page *page; 1492 pgoff_t end_index; 1493 loff_t isize; 1494 unsigned long nr, ret; 1495 1496 cond_resched(); 1497 find_page: 1498 page = find_get_page(mapping, index); 1499 if (!page) { 1500 page_cache_sync_readahead(mapping, 1501 ra, filp, 1502 index, last_index - index); 1503 page = find_get_page(mapping, index); 1504 if (unlikely(page == NULL)) 1505 goto no_cached_page; 1506 } 1507 if (PageReadahead(page)) { 1508 page_cache_async_readahead(mapping, 1509 ra, filp, page, 1510 index, last_index - index); 1511 } 1512 if (!PageUptodate(page)) { 1513 if (inode->i_blkbits == PAGE_CACHE_SHIFT || 1514 !mapping->a_ops->is_partially_uptodate) 1515 goto page_not_up_to_date; 1516 if (!trylock_page(page)) 1517 goto page_not_up_to_date; 1518 /* Did it get truncated before we got the lock? */ 1519 if (!page->mapping) 1520 goto page_not_up_to_date_locked; 1521 if (!mapping->a_ops->is_partially_uptodate(page, 1522 offset, iter->count)) 1523 goto page_not_up_to_date_locked; 1524 unlock_page(page); 1525 } 1526 page_ok: 1527 /* 1528 * i_size must be checked after we know the page is Uptodate. 1529 * 1530 * Checking i_size after the check allows us to calculate 1531 * the correct value for "nr", which means the zero-filled 1532 * part of the page is not copied back to userspace (unless 1533 * another truncate extends the file - this is desired though). 1534 */ 1535 1536 isize = i_size_read(inode); 1537 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1538 if (unlikely(!isize || index > end_index)) { 1539 page_cache_release(page); 1540 goto out; 1541 } 1542 1543 /* nr is the maximum number of bytes to copy from this page */ 1544 nr = PAGE_CACHE_SIZE; 1545 if (index == end_index) { 1546 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1547 if (nr <= offset) { 1548 page_cache_release(page); 1549 goto out; 1550 } 1551 } 1552 nr = nr - offset; 1553 1554 /* If users can be writing to this page using arbitrary 1555 * virtual addresses, take care about potential aliasing 1556 * before reading the page on the kernel side. 1557 */ 1558 if (mapping_writably_mapped(mapping)) 1559 flush_dcache_page(page); 1560 1561 /* 1562 * When a sequential read accesses a page several times, 1563 * only mark it as accessed the first time. 1564 */ 1565 if (prev_index != index || offset != prev_offset) 1566 mark_page_accessed(page); 1567 prev_index = index; 1568 1569 /* 1570 * Ok, we have the page, and it's up-to-date, so 1571 * now we can copy it to user space... 1572 */ 1573 1574 ret = copy_page_to_iter(page, offset, nr, iter); 1575 offset += ret; 1576 index += offset >> PAGE_CACHE_SHIFT; 1577 offset &= ~PAGE_CACHE_MASK; 1578 prev_offset = offset; 1579 1580 page_cache_release(page); 1581 written += ret; 1582 if (!iov_iter_count(iter)) 1583 goto out; 1584 if (ret < nr) { 1585 error = -EFAULT; 1586 goto out; 1587 } 1588 continue; 1589 1590 page_not_up_to_date: 1591 /* Get exclusive access to the page ... */ 1592 error = lock_page_killable(page); 1593 if (unlikely(error)) 1594 goto readpage_error; 1595 1596 page_not_up_to_date_locked: 1597 /* Did it get truncated before we got the lock? */ 1598 if (!page->mapping) { 1599 unlock_page(page); 1600 page_cache_release(page); 1601 continue; 1602 } 1603 1604 /* Did somebody else fill it already? */ 1605 if (PageUptodate(page)) { 1606 unlock_page(page); 1607 goto page_ok; 1608 } 1609 1610 readpage: 1611 /* 1612 * A previous I/O error may have been due to temporary 1613 * failures, eg. multipath errors. 1614 * PG_error will be set again if readpage fails. 1615 */ 1616 ClearPageError(page); 1617 /* Start the actual read. The read will unlock the page. */ 1618 error = mapping->a_ops->readpage(filp, page); 1619 1620 if (unlikely(error)) { 1621 if (error == AOP_TRUNCATED_PAGE) { 1622 page_cache_release(page); 1623 error = 0; 1624 goto find_page; 1625 } 1626 goto readpage_error; 1627 } 1628 1629 if (!PageUptodate(page)) { 1630 error = lock_page_killable(page); 1631 if (unlikely(error)) 1632 goto readpage_error; 1633 if (!PageUptodate(page)) { 1634 if (page->mapping == NULL) { 1635 /* 1636 * invalidate_mapping_pages got it 1637 */ 1638 unlock_page(page); 1639 page_cache_release(page); 1640 goto find_page; 1641 } 1642 unlock_page(page); 1643 shrink_readahead_size_eio(filp, ra); 1644 error = -EIO; 1645 goto readpage_error; 1646 } 1647 unlock_page(page); 1648 } 1649 1650 goto page_ok; 1651 1652 readpage_error: 1653 /* UHHUH! A synchronous read error occurred. Report it */ 1654 page_cache_release(page); 1655 goto out; 1656 1657 no_cached_page: 1658 /* 1659 * Ok, it wasn't cached, so we need to create a new 1660 * page.. 1661 */ 1662 page = page_cache_alloc_cold(mapping); 1663 if (!page) { 1664 error = -ENOMEM; 1665 goto out; 1666 } 1667 error = add_to_page_cache_lru(page, mapping, 1668 index, GFP_KERNEL); 1669 if (error) { 1670 page_cache_release(page); 1671 if (error == -EEXIST) { 1672 error = 0; 1673 goto find_page; 1674 } 1675 goto out; 1676 } 1677 goto readpage; 1678 } 1679 1680 out: 1681 ra->prev_pos = prev_index; 1682 ra->prev_pos <<= PAGE_CACHE_SHIFT; 1683 ra->prev_pos |= prev_offset; 1684 1685 *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset; 1686 file_accessed(filp); 1687 return written ? written : error; 1688 } 1689 1690 /** 1691 * generic_file_read_iter - generic filesystem read routine 1692 * @iocb: kernel I/O control block 1693 * @iter: destination for the data read 1694 * 1695 * This is the "read_iter()" routine for all filesystems 1696 * that can use the page cache directly. 1697 */ 1698 ssize_t 1699 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 1700 { 1701 struct file *file = iocb->ki_filp; 1702 ssize_t retval = 0; 1703 loff_t *ppos = &iocb->ki_pos; 1704 loff_t pos = *ppos; 1705 1706 if (iocb->ki_flags & IOCB_DIRECT) { 1707 struct address_space *mapping = file->f_mapping; 1708 struct inode *inode = mapping->host; 1709 size_t count = iov_iter_count(iter); 1710 loff_t size; 1711 1712 if (!count) 1713 goto out; /* skip atime */ 1714 size = i_size_read(inode); 1715 retval = filemap_write_and_wait_range(mapping, pos, 1716 pos + count - 1); 1717 if (!retval) { 1718 struct iov_iter data = *iter; 1719 retval = mapping->a_ops->direct_IO(iocb, &data, pos); 1720 } 1721 1722 if (retval > 0) { 1723 *ppos = pos + retval; 1724 iov_iter_advance(iter, retval); 1725 } 1726 1727 /* 1728 * Btrfs can have a short DIO read if we encounter 1729 * compressed extents, so if there was an error, or if 1730 * we've already read everything we wanted to, or if 1731 * there was a short read because we hit EOF, go ahead 1732 * and return. Otherwise fallthrough to buffered io for 1733 * the rest of the read. Buffered reads will not work for 1734 * DAX files, so don't bother trying. 1735 */ 1736 if (retval < 0 || !iov_iter_count(iter) || *ppos >= size || 1737 IS_DAX(inode)) { 1738 file_accessed(file); 1739 goto out; 1740 } 1741 } 1742 1743 retval = do_generic_file_read(file, ppos, iter, retval); 1744 out: 1745 return retval; 1746 } 1747 EXPORT_SYMBOL(generic_file_read_iter); 1748 1749 #ifdef CONFIG_MMU 1750 /** 1751 * page_cache_read - adds requested page to the page cache if not already there 1752 * @file: file to read 1753 * @offset: page index 1754 * 1755 * This adds the requested page to the page cache if it isn't already there, 1756 * and schedules an I/O to read in its contents from disk. 1757 */ 1758 static int page_cache_read(struct file *file, pgoff_t offset) 1759 { 1760 struct address_space *mapping = file->f_mapping; 1761 struct page *page; 1762 int ret; 1763 1764 do { 1765 page = page_cache_alloc_cold(mapping); 1766 if (!page) 1767 return -ENOMEM; 1768 1769 ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL); 1770 if (ret == 0) 1771 ret = mapping->a_ops->readpage(file, page); 1772 else if (ret == -EEXIST) 1773 ret = 0; /* losing race to add is OK */ 1774 1775 page_cache_release(page); 1776 1777 } while (ret == AOP_TRUNCATED_PAGE); 1778 1779 return ret; 1780 } 1781 1782 #define MMAP_LOTSAMISS (100) 1783 1784 /* 1785 * Synchronous readahead happens when we don't even find 1786 * a page in the page cache at all. 1787 */ 1788 static void do_sync_mmap_readahead(struct vm_area_struct *vma, 1789 struct file_ra_state *ra, 1790 struct file *file, 1791 pgoff_t offset) 1792 { 1793 unsigned long ra_pages; 1794 struct address_space *mapping = file->f_mapping; 1795 1796 /* If we don't want any read-ahead, don't bother */ 1797 if (vma->vm_flags & VM_RAND_READ) 1798 return; 1799 if (!ra->ra_pages) 1800 return; 1801 1802 if (vma->vm_flags & VM_SEQ_READ) { 1803 page_cache_sync_readahead(mapping, ra, file, offset, 1804 ra->ra_pages); 1805 return; 1806 } 1807 1808 /* Avoid banging the cache line if not needed */ 1809 if (ra->mmap_miss < MMAP_LOTSAMISS * 10) 1810 ra->mmap_miss++; 1811 1812 /* 1813 * Do we miss much more than hit in this file? If so, 1814 * stop bothering with read-ahead. It will only hurt. 1815 */ 1816 if (ra->mmap_miss > MMAP_LOTSAMISS) 1817 return; 1818 1819 /* 1820 * mmap read-around 1821 */ 1822 ra_pages = max_sane_readahead(ra->ra_pages); 1823 ra->start = max_t(long, 0, offset - ra_pages / 2); 1824 ra->size = ra_pages; 1825 ra->async_size = ra_pages / 4; 1826 ra_submit(ra, mapping, file); 1827 } 1828 1829 /* 1830 * Asynchronous readahead happens when we find the page and PG_readahead, 1831 * so we want to possibly extend the readahead further.. 1832 */ 1833 static void do_async_mmap_readahead(struct vm_area_struct *vma, 1834 struct file_ra_state *ra, 1835 struct file *file, 1836 struct page *page, 1837 pgoff_t offset) 1838 { 1839 struct address_space *mapping = file->f_mapping; 1840 1841 /* If we don't want any read-ahead, don't bother */ 1842 if (vma->vm_flags & VM_RAND_READ) 1843 return; 1844 if (ra->mmap_miss > 0) 1845 ra->mmap_miss--; 1846 if (PageReadahead(page)) 1847 page_cache_async_readahead(mapping, ra, file, 1848 page, offset, ra->ra_pages); 1849 } 1850 1851 /** 1852 * filemap_fault - read in file data for page fault handling 1853 * @vma: vma in which the fault was taken 1854 * @vmf: struct vm_fault containing details of the fault 1855 * 1856 * filemap_fault() is invoked via the vma operations vector for a 1857 * mapped memory region to read in file data during a page fault. 1858 * 1859 * The goto's are kind of ugly, but this streamlines the normal case of having 1860 * it in the page cache, and handles the special cases reasonably without 1861 * having a lot of duplicated code. 1862 * 1863 * vma->vm_mm->mmap_sem must be held on entry. 1864 * 1865 * If our return value has VM_FAULT_RETRY set, it's because 1866 * lock_page_or_retry() returned 0. 1867 * The mmap_sem has usually been released in this case. 1868 * See __lock_page_or_retry() for the exception. 1869 * 1870 * If our return value does not have VM_FAULT_RETRY set, the mmap_sem 1871 * has not been released. 1872 * 1873 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set. 1874 */ 1875 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1876 { 1877 int error; 1878 struct file *file = vma->vm_file; 1879 struct address_space *mapping = file->f_mapping; 1880 struct file_ra_state *ra = &file->f_ra; 1881 struct inode *inode = mapping->host; 1882 pgoff_t offset = vmf->pgoff; 1883 struct page *page; 1884 loff_t size; 1885 int ret = 0; 1886 1887 size = round_up(i_size_read(inode), PAGE_CACHE_SIZE); 1888 if (offset >= size >> PAGE_CACHE_SHIFT) 1889 return VM_FAULT_SIGBUS; 1890 1891 /* 1892 * Do we have something in the page cache already? 1893 */ 1894 page = find_get_page(mapping, offset); 1895 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { 1896 /* 1897 * We found the page, so try async readahead before 1898 * waiting for the lock. 1899 */ 1900 do_async_mmap_readahead(vma, ra, file, page, offset); 1901 } else if (!page) { 1902 /* No page in the page cache at all */ 1903 do_sync_mmap_readahead(vma, ra, file, offset); 1904 count_vm_event(PGMAJFAULT); 1905 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 1906 ret = VM_FAULT_MAJOR; 1907 retry_find: 1908 page = find_get_page(mapping, offset); 1909 if (!page) 1910 goto no_cached_page; 1911 } 1912 1913 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { 1914 page_cache_release(page); 1915 return ret | VM_FAULT_RETRY; 1916 } 1917 1918 /* Did it get truncated? */ 1919 if (unlikely(page->mapping != mapping)) { 1920 unlock_page(page); 1921 put_page(page); 1922 goto retry_find; 1923 } 1924 VM_BUG_ON_PAGE(page->index != offset, page); 1925 1926 /* 1927 * We have a locked page in the page cache, now we need to check 1928 * that it's up-to-date. If not, it is going to be due to an error. 1929 */ 1930 if (unlikely(!PageUptodate(page))) 1931 goto page_not_uptodate; 1932 1933 /* 1934 * Found the page and have a reference on it. 1935 * We must recheck i_size under page lock. 1936 */ 1937 size = round_up(i_size_read(inode), PAGE_CACHE_SIZE); 1938 if (unlikely(offset >= size >> PAGE_CACHE_SHIFT)) { 1939 unlock_page(page); 1940 page_cache_release(page); 1941 return VM_FAULT_SIGBUS; 1942 } 1943 1944 vmf->page = page; 1945 return ret | VM_FAULT_LOCKED; 1946 1947 no_cached_page: 1948 /* 1949 * We're only likely to ever get here if MADV_RANDOM is in 1950 * effect. 1951 */ 1952 error = page_cache_read(file, offset); 1953 1954 /* 1955 * The page we want has now been added to the page cache. 1956 * In the unlikely event that someone removed it in the 1957 * meantime, we'll just come back here and read it again. 1958 */ 1959 if (error >= 0) 1960 goto retry_find; 1961 1962 /* 1963 * An error return from page_cache_read can result if the 1964 * system is low on memory, or a problem occurs while trying 1965 * to schedule I/O. 1966 */ 1967 if (error == -ENOMEM) 1968 return VM_FAULT_OOM; 1969 return VM_FAULT_SIGBUS; 1970 1971 page_not_uptodate: 1972 /* 1973 * Umm, take care of errors if the page isn't up-to-date. 1974 * Try to re-read it _once_. We do this synchronously, 1975 * because there really aren't any performance issues here 1976 * and we need to check for errors. 1977 */ 1978 ClearPageError(page); 1979 error = mapping->a_ops->readpage(file, page); 1980 if (!error) { 1981 wait_on_page_locked(page); 1982 if (!PageUptodate(page)) 1983 error = -EIO; 1984 } 1985 page_cache_release(page); 1986 1987 if (!error || error == AOP_TRUNCATED_PAGE) 1988 goto retry_find; 1989 1990 /* Things didn't work out. Return zero to tell the mm layer so. */ 1991 shrink_readahead_size_eio(file, ra); 1992 return VM_FAULT_SIGBUS; 1993 } 1994 EXPORT_SYMBOL(filemap_fault); 1995 1996 void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf) 1997 { 1998 struct radix_tree_iter iter; 1999 void **slot; 2000 struct file *file = vma->vm_file; 2001 struct address_space *mapping = file->f_mapping; 2002 loff_t size; 2003 struct page *page; 2004 unsigned long address = (unsigned long) vmf->virtual_address; 2005 unsigned long addr; 2006 pte_t *pte; 2007 2008 rcu_read_lock(); 2009 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, vmf->pgoff) { 2010 if (iter.index > vmf->max_pgoff) 2011 break; 2012 repeat: 2013 page = radix_tree_deref_slot(slot); 2014 if (unlikely(!page)) 2015 goto next; 2016 if (radix_tree_exception(page)) { 2017 if (radix_tree_deref_retry(page)) 2018 break; 2019 else 2020 goto next; 2021 } 2022 2023 if (!page_cache_get_speculative(page)) 2024 goto repeat; 2025 2026 /* Has the page moved? */ 2027 if (unlikely(page != *slot)) { 2028 page_cache_release(page); 2029 goto repeat; 2030 } 2031 2032 if (!PageUptodate(page) || 2033 PageReadahead(page) || 2034 PageHWPoison(page)) 2035 goto skip; 2036 if (!trylock_page(page)) 2037 goto skip; 2038 2039 if (page->mapping != mapping || !PageUptodate(page)) 2040 goto unlock; 2041 2042 size = round_up(i_size_read(mapping->host), PAGE_CACHE_SIZE); 2043 if (page->index >= size >> PAGE_CACHE_SHIFT) 2044 goto unlock; 2045 2046 pte = vmf->pte + page->index - vmf->pgoff; 2047 if (!pte_none(*pte)) 2048 goto unlock; 2049 2050 if (file->f_ra.mmap_miss > 0) 2051 file->f_ra.mmap_miss--; 2052 addr = address + (page->index - vmf->pgoff) * PAGE_SIZE; 2053 do_set_pte(vma, addr, page, pte, false, false); 2054 unlock_page(page); 2055 goto next; 2056 unlock: 2057 unlock_page(page); 2058 skip: 2059 page_cache_release(page); 2060 next: 2061 if (iter.index == vmf->max_pgoff) 2062 break; 2063 } 2064 rcu_read_unlock(); 2065 } 2066 EXPORT_SYMBOL(filemap_map_pages); 2067 2068 int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 2069 { 2070 struct page *page = vmf->page; 2071 struct inode *inode = file_inode(vma->vm_file); 2072 int ret = VM_FAULT_LOCKED; 2073 2074 sb_start_pagefault(inode->i_sb); 2075 file_update_time(vma->vm_file); 2076 lock_page(page); 2077 if (page->mapping != inode->i_mapping) { 2078 unlock_page(page); 2079 ret = VM_FAULT_NOPAGE; 2080 goto out; 2081 } 2082 /* 2083 * We mark the page dirty already here so that when freeze is in 2084 * progress, we are guaranteed that writeback during freezing will 2085 * see the dirty page and writeprotect it again. 2086 */ 2087 set_page_dirty(page); 2088 wait_for_stable_page(page); 2089 out: 2090 sb_end_pagefault(inode->i_sb); 2091 return ret; 2092 } 2093 EXPORT_SYMBOL(filemap_page_mkwrite); 2094 2095 const struct vm_operations_struct generic_file_vm_ops = { 2096 .fault = filemap_fault, 2097 .map_pages = filemap_map_pages, 2098 .page_mkwrite = filemap_page_mkwrite, 2099 }; 2100 2101 /* This is used for a general mmap of a disk file */ 2102 2103 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 2104 { 2105 struct address_space *mapping = file->f_mapping; 2106 2107 if (!mapping->a_ops->readpage) 2108 return -ENOEXEC; 2109 file_accessed(file); 2110 vma->vm_ops = &generic_file_vm_ops; 2111 return 0; 2112 } 2113 2114 /* 2115 * This is for filesystems which do not implement ->writepage. 2116 */ 2117 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) 2118 { 2119 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) 2120 return -EINVAL; 2121 return generic_file_mmap(file, vma); 2122 } 2123 #else 2124 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 2125 { 2126 return -ENOSYS; 2127 } 2128 int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) 2129 { 2130 return -ENOSYS; 2131 } 2132 #endif /* CONFIG_MMU */ 2133 2134 EXPORT_SYMBOL(generic_file_mmap); 2135 EXPORT_SYMBOL(generic_file_readonly_mmap); 2136 2137 static struct page *wait_on_page_read(struct page *page) 2138 { 2139 if (!IS_ERR(page)) { 2140 wait_on_page_locked(page); 2141 if (!PageUptodate(page)) { 2142 page_cache_release(page); 2143 page = ERR_PTR(-EIO); 2144 } 2145 } 2146 return page; 2147 } 2148 2149 static struct page *__read_cache_page(struct address_space *mapping, 2150 pgoff_t index, 2151 int (*filler)(void *, struct page *), 2152 void *data, 2153 gfp_t gfp) 2154 { 2155 struct page *page; 2156 int err; 2157 repeat: 2158 page = find_get_page(mapping, index); 2159 if (!page) { 2160 page = __page_cache_alloc(gfp | __GFP_COLD); 2161 if (!page) 2162 return ERR_PTR(-ENOMEM); 2163 err = add_to_page_cache_lru(page, mapping, index, gfp); 2164 if (unlikely(err)) { 2165 page_cache_release(page); 2166 if (err == -EEXIST) 2167 goto repeat; 2168 /* Presumably ENOMEM for radix tree node */ 2169 return ERR_PTR(err); 2170 } 2171 err = filler(data, page); 2172 if (err < 0) { 2173 page_cache_release(page); 2174 page = ERR_PTR(err); 2175 } else { 2176 page = wait_on_page_read(page); 2177 } 2178 } 2179 return page; 2180 } 2181 2182 static struct page *do_read_cache_page(struct address_space *mapping, 2183 pgoff_t index, 2184 int (*filler)(void *, struct page *), 2185 void *data, 2186 gfp_t gfp) 2187 2188 { 2189 struct page *page; 2190 int err; 2191 2192 retry: 2193 page = __read_cache_page(mapping, index, filler, data, gfp); 2194 if (IS_ERR(page)) 2195 return page; 2196 if (PageUptodate(page)) 2197 goto out; 2198 2199 lock_page(page); 2200 if (!page->mapping) { 2201 unlock_page(page); 2202 page_cache_release(page); 2203 goto retry; 2204 } 2205 if (PageUptodate(page)) { 2206 unlock_page(page); 2207 goto out; 2208 } 2209 err = filler(data, page); 2210 if (err < 0) { 2211 page_cache_release(page); 2212 return ERR_PTR(err); 2213 } else { 2214 page = wait_on_page_read(page); 2215 if (IS_ERR(page)) 2216 return page; 2217 } 2218 out: 2219 mark_page_accessed(page); 2220 return page; 2221 } 2222 2223 /** 2224 * read_cache_page - read into page cache, fill it if needed 2225 * @mapping: the page's address_space 2226 * @index: the page index 2227 * @filler: function to perform the read 2228 * @data: first arg to filler(data, page) function, often left as NULL 2229 * 2230 * Read into the page cache. If a page already exists, and PageUptodate() is 2231 * not set, try to fill the page and wait for it to become unlocked. 2232 * 2233 * If the page does not get brought uptodate, return -EIO. 2234 */ 2235 struct page *read_cache_page(struct address_space *mapping, 2236 pgoff_t index, 2237 int (*filler)(void *, struct page *), 2238 void *data) 2239 { 2240 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping)); 2241 } 2242 EXPORT_SYMBOL(read_cache_page); 2243 2244 /** 2245 * read_cache_page_gfp - read into page cache, using specified page allocation flags. 2246 * @mapping: the page's address_space 2247 * @index: the page index 2248 * @gfp: the page allocator flags to use if allocating 2249 * 2250 * This is the same as "read_mapping_page(mapping, index, NULL)", but with 2251 * any new page allocations done using the specified allocation flags. 2252 * 2253 * If the page does not get brought uptodate, return -EIO. 2254 */ 2255 struct page *read_cache_page_gfp(struct address_space *mapping, 2256 pgoff_t index, 2257 gfp_t gfp) 2258 { 2259 filler_t *filler = (filler_t *)mapping->a_ops->readpage; 2260 2261 return do_read_cache_page(mapping, index, filler, NULL, gfp); 2262 } 2263 EXPORT_SYMBOL(read_cache_page_gfp); 2264 2265 /* 2266 * Performs necessary checks before doing a write 2267 * 2268 * Can adjust writing position or amount of bytes to write. 2269 * Returns appropriate error code that caller should return or 2270 * zero in case that write should be allowed. 2271 */ 2272 inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from) 2273 { 2274 struct file *file = iocb->ki_filp; 2275 struct inode *inode = file->f_mapping->host; 2276 unsigned long limit = rlimit(RLIMIT_FSIZE); 2277 loff_t pos; 2278 2279 if (!iov_iter_count(from)) 2280 return 0; 2281 2282 /* FIXME: this is for backwards compatibility with 2.4 */ 2283 if (iocb->ki_flags & IOCB_APPEND) 2284 iocb->ki_pos = i_size_read(inode); 2285 2286 pos = iocb->ki_pos; 2287 2288 if (limit != RLIM_INFINITY) { 2289 if (iocb->ki_pos >= limit) { 2290 send_sig(SIGXFSZ, current, 0); 2291 return -EFBIG; 2292 } 2293 iov_iter_truncate(from, limit - (unsigned long)pos); 2294 } 2295 2296 /* 2297 * LFS rule 2298 */ 2299 if (unlikely(pos + iov_iter_count(from) > MAX_NON_LFS && 2300 !(file->f_flags & O_LARGEFILE))) { 2301 if (pos >= MAX_NON_LFS) 2302 return -EFBIG; 2303 iov_iter_truncate(from, MAX_NON_LFS - (unsigned long)pos); 2304 } 2305 2306 /* 2307 * Are we about to exceed the fs block limit ? 2308 * 2309 * If we have written data it becomes a short write. If we have 2310 * exceeded without writing data we send a signal and return EFBIG. 2311 * Linus frestrict idea will clean these up nicely.. 2312 */ 2313 if (unlikely(pos >= inode->i_sb->s_maxbytes)) 2314 return -EFBIG; 2315 2316 iov_iter_truncate(from, inode->i_sb->s_maxbytes - pos); 2317 return iov_iter_count(from); 2318 } 2319 EXPORT_SYMBOL(generic_write_checks); 2320 2321 int pagecache_write_begin(struct file *file, struct address_space *mapping, 2322 loff_t pos, unsigned len, unsigned flags, 2323 struct page **pagep, void **fsdata) 2324 { 2325 const struct address_space_operations *aops = mapping->a_ops; 2326 2327 return aops->write_begin(file, mapping, pos, len, flags, 2328 pagep, fsdata); 2329 } 2330 EXPORT_SYMBOL(pagecache_write_begin); 2331 2332 int pagecache_write_end(struct file *file, struct address_space *mapping, 2333 loff_t pos, unsigned len, unsigned copied, 2334 struct page *page, void *fsdata) 2335 { 2336 const struct address_space_operations *aops = mapping->a_ops; 2337 2338 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); 2339 } 2340 EXPORT_SYMBOL(pagecache_write_end); 2341 2342 ssize_t 2343 generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos) 2344 { 2345 struct file *file = iocb->ki_filp; 2346 struct address_space *mapping = file->f_mapping; 2347 struct inode *inode = mapping->host; 2348 ssize_t written; 2349 size_t write_len; 2350 pgoff_t end; 2351 struct iov_iter data; 2352 2353 write_len = iov_iter_count(from); 2354 end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT; 2355 2356 written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1); 2357 if (written) 2358 goto out; 2359 2360 /* 2361 * After a write we want buffered reads to be sure to go to disk to get 2362 * the new data. We invalidate clean cached page from the region we're 2363 * about to write. We do this *before* the write so that we can return 2364 * without clobbering -EIOCBQUEUED from ->direct_IO(). 2365 */ 2366 if (mapping->nrpages) { 2367 written = invalidate_inode_pages2_range(mapping, 2368 pos >> PAGE_CACHE_SHIFT, end); 2369 /* 2370 * If a page can not be invalidated, return 0 to fall back 2371 * to buffered write. 2372 */ 2373 if (written) { 2374 if (written == -EBUSY) 2375 return 0; 2376 goto out; 2377 } 2378 } 2379 2380 data = *from; 2381 written = mapping->a_ops->direct_IO(iocb, &data, pos); 2382 2383 /* 2384 * Finally, try again to invalidate clean pages which might have been 2385 * cached by non-direct readahead, or faulted in by get_user_pages() 2386 * if the source of the write was an mmap'ed region of the file 2387 * we're writing. Either one is a pretty crazy thing to do, 2388 * so we don't support it 100%. If this invalidation 2389 * fails, tough, the write still worked... 2390 */ 2391 if (mapping->nrpages) { 2392 invalidate_inode_pages2_range(mapping, 2393 pos >> PAGE_CACHE_SHIFT, end); 2394 } 2395 2396 if (written > 0) { 2397 pos += written; 2398 iov_iter_advance(from, written); 2399 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { 2400 i_size_write(inode, pos); 2401 mark_inode_dirty(inode); 2402 } 2403 iocb->ki_pos = pos; 2404 } 2405 out: 2406 return written; 2407 } 2408 EXPORT_SYMBOL(generic_file_direct_write); 2409 2410 /* 2411 * Find or create a page at the given pagecache position. Return the locked 2412 * page. This function is specifically for buffered writes. 2413 */ 2414 struct page *grab_cache_page_write_begin(struct address_space *mapping, 2415 pgoff_t index, unsigned flags) 2416 { 2417 struct page *page; 2418 int fgp_flags = FGP_LOCK|FGP_ACCESSED|FGP_WRITE|FGP_CREAT; 2419 2420 if (flags & AOP_FLAG_NOFS) 2421 fgp_flags |= FGP_NOFS; 2422 2423 page = pagecache_get_page(mapping, index, fgp_flags, 2424 mapping_gfp_mask(mapping)); 2425 if (page) 2426 wait_for_stable_page(page); 2427 2428 return page; 2429 } 2430 EXPORT_SYMBOL(grab_cache_page_write_begin); 2431 2432 ssize_t generic_perform_write(struct file *file, 2433 struct iov_iter *i, loff_t pos) 2434 { 2435 struct address_space *mapping = file->f_mapping; 2436 const struct address_space_operations *a_ops = mapping->a_ops; 2437 long status = 0; 2438 ssize_t written = 0; 2439 unsigned int flags = 0; 2440 2441 /* 2442 * Copies from kernel address space cannot fail (NFSD is a big user). 2443 */ 2444 if (!iter_is_iovec(i)) 2445 flags |= AOP_FLAG_UNINTERRUPTIBLE; 2446 2447 do { 2448 struct page *page; 2449 unsigned long offset; /* Offset into pagecache page */ 2450 unsigned long bytes; /* Bytes to write to page */ 2451 size_t copied; /* Bytes copied from user */ 2452 void *fsdata; 2453 2454 offset = (pos & (PAGE_CACHE_SIZE - 1)); 2455 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, 2456 iov_iter_count(i)); 2457 2458 again: 2459 /* 2460 * Bring in the user page that we will copy from _first_. 2461 * Otherwise there's a nasty deadlock on copying from the 2462 * same page as we're writing to, without it being marked 2463 * up-to-date. 2464 * 2465 * Not only is this an optimisation, but it is also required 2466 * to check that the address is actually valid, when atomic 2467 * usercopies are used, below. 2468 */ 2469 if (unlikely(iov_iter_fault_in_readable(i, bytes))) { 2470 status = -EFAULT; 2471 break; 2472 } 2473 2474 status = a_ops->write_begin(file, mapping, pos, bytes, flags, 2475 &page, &fsdata); 2476 if (unlikely(status < 0)) 2477 break; 2478 2479 if (mapping_writably_mapped(mapping)) 2480 flush_dcache_page(page); 2481 2482 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 2483 flush_dcache_page(page); 2484 2485 status = a_ops->write_end(file, mapping, pos, bytes, copied, 2486 page, fsdata); 2487 if (unlikely(status < 0)) 2488 break; 2489 copied = status; 2490 2491 cond_resched(); 2492 2493 iov_iter_advance(i, copied); 2494 if (unlikely(copied == 0)) { 2495 /* 2496 * If we were unable to copy any data at all, we must 2497 * fall back to a single segment length write. 2498 * 2499 * If we didn't fallback here, we could livelock 2500 * because not all segments in the iov can be copied at 2501 * once without a pagefault. 2502 */ 2503 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, 2504 iov_iter_single_seg_count(i)); 2505 goto again; 2506 } 2507 pos += copied; 2508 written += copied; 2509 2510 balance_dirty_pages_ratelimited(mapping); 2511 if (fatal_signal_pending(current)) { 2512 status = -EINTR; 2513 break; 2514 } 2515 } while (iov_iter_count(i)); 2516 2517 return written ? written : status; 2518 } 2519 EXPORT_SYMBOL(generic_perform_write); 2520 2521 /** 2522 * __generic_file_write_iter - write data to a file 2523 * @iocb: IO state structure (file, offset, etc.) 2524 * @from: iov_iter with data to write 2525 * 2526 * This function does all the work needed for actually writing data to a 2527 * file. It does all basic checks, removes SUID from the file, updates 2528 * modification times and calls proper subroutines depending on whether we 2529 * do direct IO or a standard buffered write. 2530 * 2531 * It expects i_mutex to be grabbed unless we work on a block device or similar 2532 * object which does not need locking at all. 2533 * 2534 * This function does *not* take care of syncing data in case of O_SYNC write. 2535 * A caller has to handle it. This is mainly due to the fact that we want to 2536 * avoid syncing under i_mutex. 2537 */ 2538 ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 2539 { 2540 struct file *file = iocb->ki_filp; 2541 struct address_space * mapping = file->f_mapping; 2542 struct inode *inode = mapping->host; 2543 ssize_t written = 0; 2544 ssize_t err; 2545 ssize_t status; 2546 2547 /* We can write back this queue in page reclaim */ 2548 current->backing_dev_info = inode_to_bdi(inode); 2549 err = file_remove_suid(file); 2550 if (err) 2551 goto out; 2552 2553 err = file_update_time(file); 2554 if (err) 2555 goto out; 2556 2557 if (iocb->ki_flags & IOCB_DIRECT) { 2558 loff_t pos, endbyte; 2559 2560 written = generic_file_direct_write(iocb, from, iocb->ki_pos); 2561 /* 2562 * If the write stopped short of completing, fall back to 2563 * buffered writes. Some filesystems do this for writes to 2564 * holes, for example. For DAX files, a buffered write will 2565 * not succeed (even if it did, DAX does not handle dirty 2566 * page-cache pages correctly). 2567 */ 2568 if (written < 0 || !iov_iter_count(from) || IS_DAX(inode)) 2569 goto out; 2570 2571 status = generic_perform_write(file, from, pos = iocb->ki_pos); 2572 /* 2573 * If generic_perform_write() returned a synchronous error 2574 * then we want to return the number of bytes which were 2575 * direct-written, or the error code if that was zero. Note 2576 * that this differs from normal direct-io semantics, which 2577 * will return -EFOO even if some bytes were written. 2578 */ 2579 if (unlikely(status < 0)) { 2580 err = status; 2581 goto out; 2582 } 2583 /* 2584 * We need to ensure that the page cache pages are written to 2585 * disk and invalidated to preserve the expected O_DIRECT 2586 * semantics. 2587 */ 2588 endbyte = pos + status - 1; 2589 err = filemap_write_and_wait_range(mapping, pos, endbyte); 2590 if (err == 0) { 2591 iocb->ki_pos = endbyte + 1; 2592 written += status; 2593 invalidate_mapping_pages(mapping, 2594 pos >> PAGE_CACHE_SHIFT, 2595 endbyte >> PAGE_CACHE_SHIFT); 2596 } else { 2597 /* 2598 * We don't know how much we wrote, so just return 2599 * the number of bytes which were direct-written 2600 */ 2601 } 2602 } else { 2603 written = generic_perform_write(file, from, iocb->ki_pos); 2604 if (likely(written > 0)) 2605 iocb->ki_pos += written; 2606 } 2607 out: 2608 current->backing_dev_info = NULL; 2609 return written ? written : err; 2610 } 2611 EXPORT_SYMBOL(__generic_file_write_iter); 2612 2613 /** 2614 * generic_file_write_iter - write data to a file 2615 * @iocb: IO state structure 2616 * @from: iov_iter with data to write 2617 * 2618 * This is a wrapper around __generic_file_write_iter() to be used by most 2619 * filesystems. It takes care of syncing the file in case of O_SYNC file 2620 * and acquires i_mutex as needed. 2621 */ 2622 ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 2623 { 2624 struct file *file = iocb->ki_filp; 2625 struct inode *inode = file->f_mapping->host; 2626 ssize_t ret; 2627 2628 mutex_lock(&inode->i_mutex); 2629 ret = generic_write_checks(iocb, from); 2630 if (ret > 0) 2631 ret = __generic_file_write_iter(iocb, from); 2632 mutex_unlock(&inode->i_mutex); 2633 2634 if (ret > 0) { 2635 ssize_t err; 2636 2637 err = generic_write_sync(file, iocb->ki_pos - ret, ret); 2638 if (err < 0) 2639 ret = err; 2640 } 2641 return ret; 2642 } 2643 EXPORT_SYMBOL(generic_file_write_iter); 2644 2645 /** 2646 * try_to_release_page() - release old fs-specific metadata on a page 2647 * 2648 * @page: the page which the kernel is trying to free 2649 * @gfp_mask: memory allocation flags (and I/O mode) 2650 * 2651 * The address_space is to try to release any data against the page 2652 * (presumably at page->private). If the release was successful, return `1'. 2653 * Otherwise return zero. 2654 * 2655 * This may also be called if PG_fscache is set on a page, indicating that the 2656 * page is known to the local caching routines. 2657 * 2658 * The @gfp_mask argument specifies whether I/O may be performed to release 2659 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS). 2660 * 2661 */ 2662 int try_to_release_page(struct page *page, gfp_t gfp_mask) 2663 { 2664 struct address_space * const mapping = page->mapping; 2665 2666 BUG_ON(!PageLocked(page)); 2667 if (PageWriteback(page)) 2668 return 0; 2669 2670 if (mapping && mapping->a_ops->releasepage) 2671 return mapping->a_ops->releasepage(page, gfp_mask); 2672 return try_to_free_buffers(page); 2673 } 2674 2675 EXPORT_SYMBOL(try_to_release_page); 2676