1 /* 2 * linux/mm/filemap.c 3 * 4 * Copyright (C) 1994-1999 Linus Torvalds 5 */ 6 7 /* 8 * This file handles the generic file mmap semantics used by 9 * most "normal" filesystems (but you don't /have/ to use this: 10 * the NFS filesystem used to do this differently, for example) 11 */ 12 #include <linux/export.h> 13 #include <linux/compiler.h> 14 #include <linux/dax.h> 15 #include <linux/fs.h> 16 #include <linux/uaccess.h> 17 #include <linux/capability.h> 18 #include <linux/kernel_stat.h> 19 #include <linux/gfp.h> 20 #include <linux/mm.h> 21 #include <linux/swap.h> 22 #include <linux/mman.h> 23 #include <linux/pagemap.h> 24 #include <linux/file.h> 25 #include <linux/uio.h> 26 #include <linux/hash.h> 27 #include <linux/writeback.h> 28 #include <linux/backing-dev.h> 29 #include <linux/pagevec.h> 30 #include <linux/blkdev.h> 31 #include <linux/security.h> 32 #include <linux/cpuset.h> 33 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */ 34 #include <linux/hugetlb.h> 35 #include <linux/memcontrol.h> 36 #include <linux/cleancache.h> 37 #include <linux/rmap.h> 38 #include "internal.h" 39 40 #define CREATE_TRACE_POINTS 41 #include <trace/events/filemap.h> 42 43 /* 44 * FIXME: remove all knowledge of the buffer layer from the core VM 45 */ 46 #include <linux/buffer_head.h> /* for try_to_free_buffers */ 47 48 #include <asm/mman.h> 49 50 /* 51 * Shared mappings implemented 30.11.1994. It's not fully working yet, 52 * though. 53 * 54 * Shared mappings now work. 15.8.1995 Bruno. 55 * 56 * finished 'unifying' the page and buffer cache and SMP-threaded the 57 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> 58 * 59 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> 60 */ 61 62 /* 63 * Lock ordering: 64 * 65 * ->i_mmap_rwsem (truncate_pagecache) 66 * ->private_lock (__free_pte->__set_page_dirty_buffers) 67 * ->swap_lock (exclusive_swap_page, others) 68 * ->mapping->tree_lock 69 * 70 * ->i_mutex 71 * ->i_mmap_rwsem (truncate->unmap_mapping_range) 72 * 73 * ->mmap_sem 74 * ->i_mmap_rwsem 75 * ->page_table_lock or pte_lock (various, mainly in memory.c) 76 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) 77 * 78 * ->mmap_sem 79 * ->lock_page (access_process_vm) 80 * 81 * ->i_mutex (generic_perform_write) 82 * ->mmap_sem (fault_in_pages_readable->do_page_fault) 83 * 84 * bdi->wb.list_lock 85 * sb_lock (fs/fs-writeback.c) 86 * ->mapping->tree_lock (__sync_single_inode) 87 * 88 * ->i_mmap_rwsem 89 * ->anon_vma.lock (vma_adjust) 90 * 91 * ->anon_vma.lock 92 * ->page_table_lock or pte_lock (anon_vma_prepare and various) 93 * 94 * ->page_table_lock or pte_lock 95 * ->swap_lock (try_to_unmap_one) 96 * ->private_lock (try_to_unmap_one) 97 * ->tree_lock (try_to_unmap_one) 98 * ->zone_lru_lock(zone) (follow_page->mark_page_accessed) 99 * ->zone_lru_lock(zone) (check_pte_range->isolate_lru_page) 100 * ->private_lock (page_remove_rmap->set_page_dirty) 101 * ->tree_lock (page_remove_rmap->set_page_dirty) 102 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) 103 * ->inode->i_lock (page_remove_rmap->set_page_dirty) 104 * ->memcg->move_lock (page_remove_rmap->lock_page_memcg) 105 * bdi.wb->list_lock (zap_pte_range->set_page_dirty) 106 * ->inode->i_lock (zap_pte_range->set_page_dirty) 107 * ->private_lock (zap_pte_range->__set_page_dirty_buffers) 108 * 109 * ->i_mmap_rwsem 110 * ->tasklist_lock (memory_failure, collect_procs_ao) 111 */ 112 113 static int page_cache_tree_insert(struct address_space *mapping, 114 struct page *page, void **shadowp) 115 { 116 struct radix_tree_node *node; 117 void **slot; 118 int error; 119 120 error = __radix_tree_create(&mapping->page_tree, page->index, 0, 121 &node, &slot); 122 if (error) 123 return error; 124 if (*slot) { 125 void *p; 126 127 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 128 if (!radix_tree_exceptional_entry(p)) 129 return -EEXIST; 130 131 mapping->nrexceptional--; 132 if (!dax_mapping(mapping)) { 133 if (shadowp) 134 *shadowp = p; 135 } else { 136 /* DAX can replace empty locked entry with a hole */ 137 WARN_ON_ONCE(p != 138 dax_radix_locked_entry(0, RADIX_DAX_EMPTY)); 139 /* Wakeup waiters for exceptional entry lock */ 140 dax_wake_mapping_entry_waiter(mapping, page->index, p, 141 false); 142 } 143 } 144 __radix_tree_replace(&mapping->page_tree, node, slot, page, 145 workingset_update_node, mapping); 146 mapping->nrpages++; 147 return 0; 148 } 149 150 static void page_cache_tree_delete(struct address_space *mapping, 151 struct page *page, void *shadow) 152 { 153 int i, nr; 154 155 /* hugetlb pages are represented by one entry in the radix tree */ 156 nr = PageHuge(page) ? 1 : hpage_nr_pages(page); 157 158 VM_BUG_ON_PAGE(!PageLocked(page), page); 159 VM_BUG_ON_PAGE(PageTail(page), page); 160 VM_BUG_ON_PAGE(nr != 1 && shadow, page); 161 162 for (i = 0; i < nr; i++) { 163 struct radix_tree_node *node; 164 void **slot; 165 166 __radix_tree_lookup(&mapping->page_tree, page->index + i, 167 &node, &slot); 168 169 VM_BUG_ON_PAGE(!node && nr != 1, page); 170 171 radix_tree_clear_tags(&mapping->page_tree, node, slot); 172 __radix_tree_replace(&mapping->page_tree, node, slot, shadow, 173 workingset_update_node, mapping); 174 } 175 176 if (shadow) { 177 mapping->nrexceptional += nr; 178 /* 179 * Make sure the nrexceptional update is committed before 180 * the nrpages update so that final truncate racing 181 * with reclaim does not see both counters 0 at the 182 * same time and miss a shadow entry. 183 */ 184 smp_wmb(); 185 } 186 mapping->nrpages -= nr; 187 } 188 189 /* 190 * Delete a page from the page cache and free it. Caller has to make 191 * sure the page is locked and that nobody else uses it - or that usage 192 * is safe. The caller must hold the mapping's tree_lock. 193 */ 194 void __delete_from_page_cache(struct page *page, void *shadow) 195 { 196 struct address_space *mapping = page->mapping; 197 int nr = hpage_nr_pages(page); 198 199 trace_mm_filemap_delete_from_page_cache(page); 200 /* 201 * if we're uptodate, flush out into the cleancache, otherwise 202 * invalidate any existing cleancache entries. We can't leave 203 * stale data around in the cleancache once our page is gone 204 */ 205 if (PageUptodate(page) && PageMappedToDisk(page)) 206 cleancache_put_page(page); 207 else 208 cleancache_invalidate_page(mapping, page); 209 210 VM_BUG_ON_PAGE(PageTail(page), page); 211 VM_BUG_ON_PAGE(page_mapped(page), page); 212 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) { 213 int mapcount; 214 215 pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n", 216 current->comm, page_to_pfn(page)); 217 dump_page(page, "still mapped when deleted"); 218 dump_stack(); 219 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 220 221 mapcount = page_mapcount(page); 222 if (mapping_exiting(mapping) && 223 page_count(page) >= mapcount + 2) { 224 /* 225 * All vmas have already been torn down, so it's 226 * a good bet that actually the page is unmapped, 227 * and we'd prefer not to leak it: if we're wrong, 228 * some other bad page check should catch it later. 229 */ 230 page_mapcount_reset(page); 231 page_ref_sub(page, mapcount); 232 } 233 } 234 235 page_cache_tree_delete(mapping, page, shadow); 236 237 page->mapping = NULL; 238 /* Leave page->index set: truncation lookup relies upon it */ 239 240 /* hugetlb pages do not participate in page cache accounting. */ 241 if (!PageHuge(page)) 242 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); 243 if (PageSwapBacked(page)) { 244 __mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr); 245 if (PageTransHuge(page)) 246 __dec_node_page_state(page, NR_SHMEM_THPS); 247 } else { 248 VM_BUG_ON_PAGE(PageTransHuge(page) && !PageHuge(page), page); 249 } 250 251 /* 252 * At this point page must be either written or cleaned by truncate. 253 * Dirty page here signals a bug and loss of unwritten data. 254 * 255 * This fixes dirty accounting after removing the page entirely but 256 * leaves PageDirty set: it has no effect for truncated page and 257 * anyway will be cleared before returning page into buddy allocator. 258 */ 259 if (WARN_ON_ONCE(PageDirty(page))) 260 account_page_cleaned(page, mapping, inode_to_wb(mapping->host)); 261 } 262 263 /** 264 * delete_from_page_cache - delete page from page cache 265 * @page: the page which the kernel is trying to remove from page cache 266 * 267 * This must be called only on pages that have been verified to be in the page 268 * cache and locked. It will never put the page into the free list, the caller 269 * has a reference on the page. 270 */ 271 void delete_from_page_cache(struct page *page) 272 { 273 struct address_space *mapping = page_mapping(page); 274 unsigned long flags; 275 void (*freepage)(struct page *); 276 277 BUG_ON(!PageLocked(page)); 278 279 freepage = mapping->a_ops->freepage; 280 281 spin_lock_irqsave(&mapping->tree_lock, flags); 282 __delete_from_page_cache(page, NULL); 283 spin_unlock_irqrestore(&mapping->tree_lock, flags); 284 285 if (freepage) 286 freepage(page); 287 288 if (PageTransHuge(page) && !PageHuge(page)) { 289 page_ref_sub(page, HPAGE_PMD_NR); 290 VM_BUG_ON_PAGE(page_count(page) <= 0, page); 291 } else { 292 put_page(page); 293 } 294 } 295 EXPORT_SYMBOL(delete_from_page_cache); 296 297 int filemap_check_errors(struct address_space *mapping) 298 { 299 int ret = 0; 300 /* Check for outstanding write errors */ 301 if (test_bit(AS_ENOSPC, &mapping->flags) && 302 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) 303 ret = -ENOSPC; 304 if (test_bit(AS_EIO, &mapping->flags) && 305 test_and_clear_bit(AS_EIO, &mapping->flags)) 306 ret = -EIO; 307 return ret; 308 } 309 EXPORT_SYMBOL(filemap_check_errors); 310 311 /** 312 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range 313 * @mapping: address space structure to write 314 * @start: offset in bytes where the range starts 315 * @end: offset in bytes where the range ends (inclusive) 316 * @sync_mode: enable synchronous operation 317 * 318 * Start writeback against all of a mapping's dirty pages that lie 319 * within the byte offsets <start, end> inclusive. 320 * 321 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as 322 * opposed to a regular memory cleansing writeback. The difference between 323 * these two operations is that if a dirty page/buffer is encountered, it must 324 * be waited upon, and not just skipped over. 325 */ 326 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 327 loff_t end, int sync_mode) 328 { 329 int ret; 330 struct writeback_control wbc = { 331 .sync_mode = sync_mode, 332 .nr_to_write = LONG_MAX, 333 .range_start = start, 334 .range_end = end, 335 }; 336 337 if (!mapping_cap_writeback_dirty(mapping)) 338 return 0; 339 340 wbc_attach_fdatawrite_inode(&wbc, mapping->host); 341 ret = do_writepages(mapping, &wbc); 342 wbc_detach_inode(&wbc); 343 return ret; 344 } 345 346 static inline int __filemap_fdatawrite(struct address_space *mapping, 347 int sync_mode) 348 { 349 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); 350 } 351 352 int filemap_fdatawrite(struct address_space *mapping) 353 { 354 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); 355 } 356 EXPORT_SYMBOL(filemap_fdatawrite); 357 358 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 359 loff_t end) 360 { 361 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); 362 } 363 EXPORT_SYMBOL(filemap_fdatawrite_range); 364 365 /** 366 * filemap_flush - mostly a non-blocking flush 367 * @mapping: target address_space 368 * 369 * This is a mostly non-blocking flush. Not suitable for data-integrity 370 * purposes - I/O may not be started against all dirty pages. 371 */ 372 int filemap_flush(struct address_space *mapping) 373 { 374 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); 375 } 376 EXPORT_SYMBOL(filemap_flush); 377 378 static int __filemap_fdatawait_range(struct address_space *mapping, 379 loff_t start_byte, loff_t end_byte) 380 { 381 pgoff_t index = start_byte >> PAGE_SHIFT; 382 pgoff_t end = end_byte >> PAGE_SHIFT; 383 struct pagevec pvec; 384 int nr_pages; 385 int ret = 0; 386 387 if (end_byte < start_byte) 388 goto out; 389 390 pagevec_init(&pvec, 0); 391 while ((index <= end) && 392 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 393 PAGECACHE_TAG_WRITEBACK, 394 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { 395 unsigned i; 396 397 for (i = 0; i < nr_pages; i++) { 398 struct page *page = pvec.pages[i]; 399 400 /* until radix tree lookup accepts end_index */ 401 if (page->index > end) 402 continue; 403 404 wait_on_page_writeback(page); 405 if (TestClearPageError(page)) 406 ret = -EIO; 407 } 408 pagevec_release(&pvec); 409 cond_resched(); 410 } 411 out: 412 return ret; 413 } 414 415 /** 416 * filemap_fdatawait_range - wait for writeback to complete 417 * @mapping: address space structure to wait for 418 * @start_byte: offset in bytes where the range starts 419 * @end_byte: offset in bytes where the range ends (inclusive) 420 * 421 * Walk the list of under-writeback pages of the given address space 422 * in the given range and wait for all of them. Check error status of 423 * the address space and return it. 424 * 425 * Since the error status of the address space is cleared by this function, 426 * callers are responsible for checking the return value and handling and/or 427 * reporting the error. 428 */ 429 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, 430 loff_t end_byte) 431 { 432 int ret, ret2; 433 434 ret = __filemap_fdatawait_range(mapping, start_byte, end_byte); 435 ret2 = filemap_check_errors(mapping); 436 if (!ret) 437 ret = ret2; 438 439 return ret; 440 } 441 EXPORT_SYMBOL(filemap_fdatawait_range); 442 443 /** 444 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors 445 * @mapping: address space structure to wait for 446 * 447 * Walk the list of under-writeback pages of the given address space 448 * and wait for all of them. Unlike filemap_fdatawait(), this function 449 * does not clear error status of the address space. 450 * 451 * Use this function if callers don't handle errors themselves. Expected 452 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), 453 * fsfreeze(8) 454 */ 455 void filemap_fdatawait_keep_errors(struct address_space *mapping) 456 { 457 loff_t i_size = i_size_read(mapping->host); 458 459 if (i_size == 0) 460 return; 461 462 __filemap_fdatawait_range(mapping, 0, i_size - 1); 463 } 464 465 /** 466 * filemap_fdatawait - wait for all under-writeback pages to complete 467 * @mapping: address space structure to wait for 468 * 469 * Walk the list of under-writeback pages of the given address space 470 * and wait for all of them. Check error status of the address space 471 * and return it. 472 * 473 * Since the error status of the address space is cleared by this function, 474 * callers are responsible for checking the return value and handling and/or 475 * reporting the error. 476 */ 477 int filemap_fdatawait(struct address_space *mapping) 478 { 479 loff_t i_size = i_size_read(mapping->host); 480 481 if (i_size == 0) 482 return 0; 483 484 return filemap_fdatawait_range(mapping, 0, i_size - 1); 485 } 486 EXPORT_SYMBOL(filemap_fdatawait); 487 488 int filemap_write_and_wait(struct address_space *mapping) 489 { 490 int err = 0; 491 492 if ((!dax_mapping(mapping) && mapping->nrpages) || 493 (dax_mapping(mapping) && mapping->nrexceptional)) { 494 err = filemap_fdatawrite(mapping); 495 /* 496 * Even if the above returned error, the pages may be 497 * written partially (e.g. -ENOSPC), so we wait for it. 498 * But the -EIO is special case, it may indicate the worst 499 * thing (e.g. bug) happened, so we avoid waiting for it. 500 */ 501 if (err != -EIO) { 502 int err2 = filemap_fdatawait(mapping); 503 if (!err) 504 err = err2; 505 } 506 } else { 507 err = filemap_check_errors(mapping); 508 } 509 return err; 510 } 511 EXPORT_SYMBOL(filemap_write_and_wait); 512 513 /** 514 * filemap_write_and_wait_range - write out & wait on a file range 515 * @mapping: the address_space for the pages 516 * @lstart: offset in bytes where the range starts 517 * @lend: offset in bytes where the range ends (inclusive) 518 * 519 * Write out and wait upon file offsets lstart->lend, inclusive. 520 * 521 * Note that `lend' is inclusive (describes the last byte to be written) so 522 * that this function can be used to write to the very end-of-file (end = -1). 523 */ 524 int filemap_write_and_wait_range(struct address_space *mapping, 525 loff_t lstart, loff_t lend) 526 { 527 int err = 0; 528 529 if ((!dax_mapping(mapping) && mapping->nrpages) || 530 (dax_mapping(mapping) && mapping->nrexceptional)) { 531 err = __filemap_fdatawrite_range(mapping, lstart, lend, 532 WB_SYNC_ALL); 533 /* See comment of filemap_write_and_wait() */ 534 if (err != -EIO) { 535 int err2 = filemap_fdatawait_range(mapping, 536 lstart, lend); 537 if (!err) 538 err = err2; 539 } 540 } else { 541 err = filemap_check_errors(mapping); 542 } 543 return err; 544 } 545 EXPORT_SYMBOL(filemap_write_and_wait_range); 546 547 /** 548 * replace_page_cache_page - replace a pagecache page with a new one 549 * @old: page to be replaced 550 * @new: page to replace with 551 * @gfp_mask: allocation mode 552 * 553 * This function replaces a page in the pagecache with a new one. On 554 * success it acquires the pagecache reference for the new page and 555 * drops it for the old page. Both the old and new pages must be 556 * locked. This function does not add the new page to the LRU, the 557 * caller must do that. 558 * 559 * The remove + add is atomic. The only way this function can fail is 560 * memory allocation failure. 561 */ 562 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) 563 { 564 int error; 565 566 VM_BUG_ON_PAGE(!PageLocked(old), old); 567 VM_BUG_ON_PAGE(!PageLocked(new), new); 568 VM_BUG_ON_PAGE(new->mapping, new); 569 570 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); 571 if (!error) { 572 struct address_space *mapping = old->mapping; 573 void (*freepage)(struct page *); 574 unsigned long flags; 575 576 pgoff_t offset = old->index; 577 freepage = mapping->a_ops->freepage; 578 579 get_page(new); 580 new->mapping = mapping; 581 new->index = offset; 582 583 spin_lock_irqsave(&mapping->tree_lock, flags); 584 __delete_from_page_cache(old, NULL); 585 error = page_cache_tree_insert(mapping, new, NULL); 586 BUG_ON(error); 587 588 /* 589 * hugetlb pages do not participate in page cache accounting. 590 */ 591 if (!PageHuge(new)) 592 __inc_node_page_state(new, NR_FILE_PAGES); 593 if (PageSwapBacked(new)) 594 __inc_node_page_state(new, NR_SHMEM); 595 spin_unlock_irqrestore(&mapping->tree_lock, flags); 596 mem_cgroup_migrate(old, new); 597 radix_tree_preload_end(); 598 if (freepage) 599 freepage(old); 600 put_page(old); 601 } 602 603 return error; 604 } 605 EXPORT_SYMBOL_GPL(replace_page_cache_page); 606 607 static int __add_to_page_cache_locked(struct page *page, 608 struct address_space *mapping, 609 pgoff_t offset, gfp_t gfp_mask, 610 void **shadowp) 611 { 612 int huge = PageHuge(page); 613 struct mem_cgroup *memcg; 614 int error; 615 616 VM_BUG_ON_PAGE(!PageLocked(page), page); 617 VM_BUG_ON_PAGE(PageSwapBacked(page), page); 618 619 if (!huge) { 620 error = mem_cgroup_try_charge(page, current->mm, 621 gfp_mask, &memcg, false); 622 if (error) 623 return error; 624 } 625 626 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); 627 if (error) { 628 if (!huge) 629 mem_cgroup_cancel_charge(page, memcg, false); 630 return error; 631 } 632 633 get_page(page); 634 page->mapping = mapping; 635 page->index = offset; 636 637 spin_lock_irq(&mapping->tree_lock); 638 error = page_cache_tree_insert(mapping, page, shadowp); 639 radix_tree_preload_end(); 640 if (unlikely(error)) 641 goto err_insert; 642 643 /* hugetlb pages do not participate in page cache accounting. */ 644 if (!huge) 645 __inc_node_page_state(page, NR_FILE_PAGES); 646 spin_unlock_irq(&mapping->tree_lock); 647 if (!huge) 648 mem_cgroup_commit_charge(page, memcg, false, false); 649 trace_mm_filemap_add_to_page_cache(page); 650 return 0; 651 err_insert: 652 page->mapping = NULL; 653 /* Leave page->index set: truncation relies upon it */ 654 spin_unlock_irq(&mapping->tree_lock); 655 if (!huge) 656 mem_cgroup_cancel_charge(page, memcg, false); 657 put_page(page); 658 return error; 659 } 660 661 /** 662 * add_to_page_cache_locked - add a locked page to the pagecache 663 * @page: page to add 664 * @mapping: the page's address_space 665 * @offset: page index 666 * @gfp_mask: page allocation mode 667 * 668 * This function is used to add a page to the pagecache. It must be locked. 669 * This function does not add the page to the LRU. The caller must do that. 670 */ 671 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, 672 pgoff_t offset, gfp_t gfp_mask) 673 { 674 return __add_to_page_cache_locked(page, mapping, offset, 675 gfp_mask, NULL); 676 } 677 EXPORT_SYMBOL(add_to_page_cache_locked); 678 679 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 680 pgoff_t offset, gfp_t gfp_mask) 681 { 682 void *shadow = NULL; 683 int ret; 684 685 __SetPageLocked(page); 686 ret = __add_to_page_cache_locked(page, mapping, offset, 687 gfp_mask, &shadow); 688 if (unlikely(ret)) 689 __ClearPageLocked(page); 690 else { 691 /* 692 * The page might have been evicted from cache only 693 * recently, in which case it should be activated like 694 * any other repeatedly accessed page. 695 * The exception is pages getting rewritten; evicting other 696 * data from the working set, only to cache data that will 697 * get overwritten with something else, is a waste of memory. 698 */ 699 if (!(gfp_mask & __GFP_WRITE) && 700 shadow && workingset_refault(shadow)) { 701 SetPageActive(page); 702 workingset_activation(page); 703 } else 704 ClearPageActive(page); 705 lru_cache_add(page); 706 } 707 return ret; 708 } 709 EXPORT_SYMBOL_GPL(add_to_page_cache_lru); 710 711 #ifdef CONFIG_NUMA 712 struct page *__page_cache_alloc(gfp_t gfp) 713 { 714 int n; 715 struct page *page; 716 717 if (cpuset_do_page_mem_spread()) { 718 unsigned int cpuset_mems_cookie; 719 do { 720 cpuset_mems_cookie = read_mems_allowed_begin(); 721 n = cpuset_mem_spread_node(); 722 page = __alloc_pages_node(n, gfp, 0); 723 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); 724 725 return page; 726 } 727 return alloc_pages(gfp, 0); 728 } 729 EXPORT_SYMBOL(__page_cache_alloc); 730 #endif 731 732 /* 733 * In order to wait for pages to become available there must be 734 * waitqueues associated with pages. By using a hash table of 735 * waitqueues where the bucket discipline is to maintain all 736 * waiters on the same queue and wake all when any of the pages 737 * become available, and for the woken contexts to check to be 738 * sure the appropriate page became available, this saves space 739 * at a cost of "thundering herd" phenomena during rare hash 740 * collisions. 741 */ 742 wait_queue_head_t *page_waitqueue(struct page *page) 743 { 744 return bit_waitqueue(page, 0); 745 } 746 EXPORT_SYMBOL(page_waitqueue); 747 748 void wait_on_page_bit(struct page *page, int bit_nr) 749 { 750 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 751 752 if (test_bit(bit_nr, &page->flags)) 753 __wait_on_bit(page_waitqueue(page), &wait, bit_wait_io, 754 TASK_UNINTERRUPTIBLE); 755 } 756 EXPORT_SYMBOL(wait_on_page_bit); 757 758 int wait_on_page_bit_killable(struct page *page, int bit_nr) 759 { 760 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 761 762 if (!test_bit(bit_nr, &page->flags)) 763 return 0; 764 765 return __wait_on_bit(page_waitqueue(page), &wait, 766 bit_wait_io, TASK_KILLABLE); 767 } 768 769 int wait_on_page_bit_killable_timeout(struct page *page, 770 int bit_nr, unsigned long timeout) 771 { 772 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 773 774 wait.key.timeout = jiffies + timeout; 775 if (!test_bit(bit_nr, &page->flags)) 776 return 0; 777 return __wait_on_bit(page_waitqueue(page), &wait, 778 bit_wait_io_timeout, TASK_KILLABLE); 779 } 780 EXPORT_SYMBOL_GPL(wait_on_page_bit_killable_timeout); 781 782 /** 783 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue 784 * @page: Page defining the wait queue of interest 785 * @waiter: Waiter to add to the queue 786 * 787 * Add an arbitrary @waiter to the wait queue for the nominated @page. 788 */ 789 void add_page_wait_queue(struct page *page, wait_queue_t *waiter) 790 { 791 wait_queue_head_t *q = page_waitqueue(page); 792 unsigned long flags; 793 794 spin_lock_irqsave(&q->lock, flags); 795 __add_wait_queue(q, waiter); 796 spin_unlock_irqrestore(&q->lock, flags); 797 } 798 EXPORT_SYMBOL_GPL(add_page_wait_queue); 799 800 /** 801 * unlock_page - unlock a locked page 802 * @page: the page 803 * 804 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). 805 * Also wakes sleepers in wait_on_page_writeback() because the wakeup 806 * mechanism between PageLocked pages and PageWriteback pages is shared. 807 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. 808 * 809 * The mb is necessary to enforce ordering between the clear_bit and the read 810 * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()). 811 */ 812 void unlock_page(struct page *page) 813 { 814 page = compound_head(page); 815 VM_BUG_ON_PAGE(!PageLocked(page), page); 816 clear_bit_unlock(PG_locked, &page->flags); 817 smp_mb__after_atomic(); 818 wake_up_page(page, PG_locked); 819 } 820 EXPORT_SYMBOL(unlock_page); 821 822 /** 823 * end_page_writeback - end writeback against a page 824 * @page: the page 825 */ 826 void end_page_writeback(struct page *page) 827 { 828 /* 829 * TestClearPageReclaim could be used here but it is an atomic 830 * operation and overkill in this particular case. Failing to 831 * shuffle a page marked for immediate reclaim is too mild to 832 * justify taking an atomic operation penalty at the end of 833 * ever page writeback. 834 */ 835 if (PageReclaim(page)) { 836 ClearPageReclaim(page); 837 rotate_reclaimable_page(page); 838 } 839 840 if (!test_clear_page_writeback(page)) 841 BUG(); 842 843 smp_mb__after_atomic(); 844 wake_up_page(page, PG_writeback); 845 } 846 EXPORT_SYMBOL(end_page_writeback); 847 848 /* 849 * After completing I/O on a page, call this routine to update the page 850 * flags appropriately 851 */ 852 void page_endio(struct page *page, bool is_write, int err) 853 { 854 if (!is_write) { 855 if (!err) { 856 SetPageUptodate(page); 857 } else { 858 ClearPageUptodate(page); 859 SetPageError(page); 860 } 861 unlock_page(page); 862 } else { 863 if (err) { 864 SetPageError(page); 865 if (page->mapping) 866 mapping_set_error(page->mapping, err); 867 } 868 end_page_writeback(page); 869 } 870 } 871 EXPORT_SYMBOL_GPL(page_endio); 872 873 /** 874 * __lock_page - get a lock on the page, assuming we need to sleep to get it 875 * @page: the page to lock 876 */ 877 void __lock_page(struct page *page) 878 { 879 struct page *page_head = compound_head(page); 880 DEFINE_WAIT_BIT(wait, &page_head->flags, PG_locked); 881 882 __wait_on_bit_lock(page_waitqueue(page_head), &wait, bit_wait_io, 883 TASK_UNINTERRUPTIBLE); 884 } 885 EXPORT_SYMBOL(__lock_page); 886 887 int __lock_page_killable(struct page *page) 888 { 889 struct page *page_head = compound_head(page); 890 DEFINE_WAIT_BIT(wait, &page_head->flags, PG_locked); 891 892 return __wait_on_bit_lock(page_waitqueue(page_head), &wait, 893 bit_wait_io, TASK_KILLABLE); 894 } 895 EXPORT_SYMBOL_GPL(__lock_page_killable); 896 897 /* 898 * Return values: 899 * 1 - page is locked; mmap_sem is still held. 900 * 0 - page is not locked. 901 * mmap_sem has been released (up_read()), unless flags had both 902 * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in 903 * which case mmap_sem is still held. 904 * 905 * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1 906 * with the page locked and the mmap_sem unperturbed. 907 */ 908 int __lock_page_or_retry(struct page *page, struct mm_struct *mm, 909 unsigned int flags) 910 { 911 if (flags & FAULT_FLAG_ALLOW_RETRY) { 912 /* 913 * CAUTION! In this case, mmap_sem is not released 914 * even though return 0. 915 */ 916 if (flags & FAULT_FLAG_RETRY_NOWAIT) 917 return 0; 918 919 up_read(&mm->mmap_sem); 920 if (flags & FAULT_FLAG_KILLABLE) 921 wait_on_page_locked_killable(page); 922 else 923 wait_on_page_locked(page); 924 return 0; 925 } else { 926 if (flags & FAULT_FLAG_KILLABLE) { 927 int ret; 928 929 ret = __lock_page_killable(page); 930 if (ret) { 931 up_read(&mm->mmap_sem); 932 return 0; 933 } 934 } else 935 __lock_page(page); 936 return 1; 937 } 938 } 939 940 /** 941 * page_cache_next_hole - find the next hole (not-present entry) 942 * @mapping: mapping 943 * @index: index 944 * @max_scan: maximum range to search 945 * 946 * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the 947 * lowest indexed hole. 948 * 949 * Returns: the index of the hole if found, otherwise returns an index 950 * outside of the set specified (in which case 'return - index >= 951 * max_scan' will be true). In rare cases of index wrap-around, 0 will 952 * be returned. 953 * 954 * page_cache_next_hole may be called under rcu_read_lock. However, 955 * like radix_tree_gang_lookup, this will not atomically search a 956 * snapshot of the tree at a single point in time. For example, if a 957 * hole is created at index 5, then subsequently a hole is created at 958 * index 10, page_cache_next_hole covering both indexes may return 10 959 * if called under rcu_read_lock. 960 */ 961 pgoff_t page_cache_next_hole(struct address_space *mapping, 962 pgoff_t index, unsigned long max_scan) 963 { 964 unsigned long i; 965 966 for (i = 0; i < max_scan; i++) { 967 struct page *page; 968 969 page = radix_tree_lookup(&mapping->page_tree, index); 970 if (!page || radix_tree_exceptional_entry(page)) 971 break; 972 index++; 973 if (index == 0) 974 break; 975 } 976 977 return index; 978 } 979 EXPORT_SYMBOL(page_cache_next_hole); 980 981 /** 982 * page_cache_prev_hole - find the prev hole (not-present entry) 983 * @mapping: mapping 984 * @index: index 985 * @max_scan: maximum range to search 986 * 987 * Search backwards in the range [max(index-max_scan+1, 0), index] for 988 * the first hole. 989 * 990 * Returns: the index of the hole if found, otherwise returns an index 991 * outside of the set specified (in which case 'index - return >= 992 * max_scan' will be true). In rare cases of wrap-around, ULONG_MAX 993 * will be returned. 994 * 995 * page_cache_prev_hole may be called under rcu_read_lock. However, 996 * like radix_tree_gang_lookup, this will not atomically search a 997 * snapshot of the tree at a single point in time. For example, if a 998 * hole is created at index 10, then subsequently a hole is created at 999 * index 5, page_cache_prev_hole covering both indexes may return 5 if 1000 * called under rcu_read_lock. 1001 */ 1002 pgoff_t page_cache_prev_hole(struct address_space *mapping, 1003 pgoff_t index, unsigned long max_scan) 1004 { 1005 unsigned long i; 1006 1007 for (i = 0; i < max_scan; i++) { 1008 struct page *page; 1009 1010 page = radix_tree_lookup(&mapping->page_tree, index); 1011 if (!page || radix_tree_exceptional_entry(page)) 1012 break; 1013 index--; 1014 if (index == ULONG_MAX) 1015 break; 1016 } 1017 1018 return index; 1019 } 1020 EXPORT_SYMBOL(page_cache_prev_hole); 1021 1022 /** 1023 * find_get_entry - find and get a page cache entry 1024 * @mapping: the address_space to search 1025 * @offset: the page cache index 1026 * 1027 * Looks up the page cache slot at @mapping & @offset. If there is a 1028 * page cache page, it is returned with an increased refcount. 1029 * 1030 * If the slot holds a shadow entry of a previously evicted page, or a 1031 * swap entry from shmem/tmpfs, it is returned. 1032 * 1033 * Otherwise, %NULL is returned. 1034 */ 1035 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) 1036 { 1037 void **pagep; 1038 struct page *head, *page; 1039 1040 rcu_read_lock(); 1041 repeat: 1042 page = NULL; 1043 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); 1044 if (pagep) { 1045 page = radix_tree_deref_slot(pagep); 1046 if (unlikely(!page)) 1047 goto out; 1048 if (radix_tree_exception(page)) { 1049 if (radix_tree_deref_retry(page)) 1050 goto repeat; 1051 /* 1052 * A shadow entry of a recently evicted page, 1053 * or a swap entry from shmem/tmpfs. Return 1054 * it without attempting to raise page count. 1055 */ 1056 goto out; 1057 } 1058 1059 head = compound_head(page); 1060 if (!page_cache_get_speculative(head)) 1061 goto repeat; 1062 1063 /* The page was split under us? */ 1064 if (compound_head(page) != head) { 1065 put_page(head); 1066 goto repeat; 1067 } 1068 1069 /* 1070 * Has the page moved? 1071 * This is part of the lockless pagecache protocol. See 1072 * include/linux/pagemap.h for details. 1073 */ 1074 if (unlikely(page != *pagep)) { 1075 put_page(head); 1076 goto repeat; 1077 } 1078 } 1079 out: 1080 rcu_read_unlock(); 1081 1082 return page; 1083 } 1084 EXPORT_SYMBOL(find_get_entry); 1085 1086 /** 1087 * find_lock_entry - locate, pin and lock a page cache entry 1088 * @mapping: the address_space to search 1089 * @offset: the page cache index 1090 * 1091 * Looks up the page cache slot at @mapping & @offset. If there is a 1092 * page cache page, it is returned locked and with an increased 1093 * refcount. 1094 * 1095 * If the slot holds a shadow entry of a previously evicted page, or a 1096 * swap entry from shmem/tmpfs, it is returned. 1097 * 1098 * Otherwise, %NULL is returned. 1099 * 1100 * find_lock_entry() may sleep. 1101 */ 1102 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset) 1103 { 1104 struct page *page; 1105 1106 repeat: 1107 page = find_get_entry(mapping, offset); 1108 if (page && !radix_tree_exception(page)) { 1109 lock_page(page); 1110 /* Has the page been truncated? */ 1111 if (unlikely(page_mapping(page) != mapping)) { 1112 unlock_page(page); 1113 put_page(page); 1114 goto repeat; 1115 } 1116 VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page); 1117 } 1118 return page; 1119 } 1120 EXPORT_SYMBOL(find_lock_entry); 1121 1122 /** 1123 * pagecache_get_page - find and get a page reference 1124 * @mapping: the address_space to search 1125 * @offset: the page index 1126 * @fgp_flags: PCG flags 1127 * @gfp_mask: gfp mask to use for the page cache data page allocation 1128 * 1129 * Looks up the page cache slot at @mapping & @offset. 1130 * 1131 * PCG flags modify how the page is returned. 1132 * 1133 * FGP_ACCESSED: the page will be marked accessed 1134 * FGP_LOCK: Page is return locked 1135 * FGP_CREAT: If page is not present then a new page is allocated using 1136 * @gfp_mask and added to the page cache and the VM's LRU 1137 * list. The page is returned locked and with an increased 1138 * refcount. Otherwise, %NULL is returned. 1139 * 1140 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even 1141 * if the GFP flags specified for FGP_CREAT are atomic. 1142 * 1143 * If there is a page cache page, it is returned with an increased refcount. 1144 */ 1145 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, 1146 int fgp_flags, gfp_t gfp_mask) 1147 { 1148 struct page *page; 1149 1150 repeat: 1151 page = find_get_entry(mapping, offset); 1152 if (radix_tree_exceptional_entry(page)) 1153 page = NULL; 1154 if (!page) 1155 goto no_page; 1156 1157 if (fgp_flags & FGP_LOCK) { 1158 if (fgp_flags & FGP_NOWAIT) { 1159 if (!trylock_page(page)) { 1160 put_page(page); 1161 return NULL; 1162 } 1163 } else { 1164 lock_page(page); 1165 } 1166 1167 /* Has the page been truncated? */ 1168 if (unlikely(page->mapping != mapping)) { 1169 unlock_page(page); 1170 put_page(page); 1171 goto repeat; 1172 } 1173 VM_BUG_ON_PAGE(page->index != offset, page); 1174 } 1175 1176 if (page && (fgp_flags & FGP_ACCESSED)) 1177 mark_page_accessed(page); 1178 1179 no_page: 1180 if (!page && (fgp_flags & FGP_CREAT)) { 1181 int err; 1182 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping)) 1183 gfp_mask |= __GFP_WRITE; 1184 if (fgp_flags & FGP_NOFS) 1185 gfp_mask &= ~__GFP_FS; 1186 1187 page = __page_cache_alloc(gfp_mask); 1188 if (!page) 1189 return NULL; 1190 1191 if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK))) 1192 fgp_flags |= FGP_LOCK; 1193 1194 /* Init accessed so avoid atomic mark_page_accessed later */ 1195 if (fgp_flags & FGP_ACCESSED) 1196 __SetPageReferenced(page); 1197 1198 err = add_to_page_cache_lru(page, mapping, offset, 1199 gfp_mask & GFP_RECLAIM_MASK); 1200 if (unlikely(err)) { 1201 put_page(page); 1202 page = NULL; 1203 if (err == -EEXIST) 1204 goto repeat; 1205 } 1206 } 1207 1208 return page; 1209 } 1210 EXPORT_SYMBOL(pagecache_get_page); 1211 1212 /** 1213 * find_get_entries - gang pagecache lookup 1214 * @mapping: The address_space to search 1215 * @start: The starting page cache index 1216 * @nr_entries: The maximum number of entries 1217 * @entries: Where the resulting entries are placed 1218 * @indices: The cache indices corresponding to the entries in @entries 1219 * 1220 * find_get_entries() will search for and return a group of up to 1221 * @nr_entries entries in the mapping. The entries are placed at 1222 * @entries. find_get_entries() takes a reference against any actual 1223 * pages it returns. 1224 * 1225 * The search returns a group of mapping-contiguous page cache entries 1226 * with ascending indexes. There may be holes in the indices due to 1227 * not-present pages. 1228 * 1229 * Any shadow entries of evicted pages, or swap entries from 1230 * shmem/tmpfs, are included in the returned array. 1231 * 1232 * find_get_entries() returns the number of pages and shadow entries 1233 * which were found. 1234 */ 1235 unsigned find_get_entries(struct address_space *mapping, 1236 pgoff_t start, unsigned int nr_entries, 1237 struct page **entries, pgoff_t *indices) 1238 { 1239 void **slot; 1240 unsigned int ret = 0; 1241 struct radix_tree_iter iter; 1242 1243 if (!nr_entries) 1244 return 0; 1245 1246 rcu_read_lock(); 1247 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 1248 struct page *head, *page; 1249 repeat: 1250 page = radix_tree_deref_slot(slot); 1251 if (unlikely(!page)) 1252 continue; 1253 if (radix_tree_exception(page)) { 1254 if (radix_tree_deref_retry(page)) { 1255 slot = radix_tree_iter_retry(&iter); 1256 continue; 1257 } 1258 /* 1259 * A shadow entry of a recently evicted page, a swap 1260 * entry from shmem/tmpfs or a DAX entry. Return it 1261 * without attempting to raise page count. 1262 */ 1263 goto export; 1264 } 1265 1266 head = compound_head(page); 1267 if (!page_cache_get_speculative(head)) 1268 goto repeat; 1269 1270 /* The page was split under us? */ 1271 if (compound_head(page) != head) { 1272 put_page(head); 1273 goto repeat; 1274 } 1275 1276 /* Has the page moved? */ 1277 if (unlikely(page != *slot)) { 1278 put_page(head); 1279 goto repeat; 1280 } 1281 export: 1282 indices[ret] = iter.index; 1283 entries[ret] = page; 1284 if (++ret == nr_entries) 1285 break; 1286 } 1287 rcu_read_unlock(); 1288 return ret; 1289 } 1290 1291 /** 1292 * find_get_pages - gang pagecache lookup 1293 * @mapping: The address_space to search 1294 * @start: The starting page index 1295 * @nr_pages: The maximum number of pages 1296 * @pages: Where the resulting pages are placed 1297 * 1298 * find_get_pages() will search for and return a group of up to 1299 * @nr_pages pages in the mapping. The pages are placed at @pages. 1300 * find_get_pages() takes a reference against the returned pages. 1301 * 1302 * The search returns a group of mapping-contiguous pages with ascending 1303 * indexes. There may be holes in the indices due to not-present pages. 1304 * 1305 * find_get_pages() returns the number of pages which were found. 1306 */ 1307 unsigned find_get_pages(struct address_space *mapping, pgoff_t start, 1308 unsigned int nr_pages, struct page **pages) 1309 { 1310 struct radix_tree_iter iter; 1311 void **slot; 1312 unsigned ret = 0; 1313 1314 if (unlikely(!nr_pages)) 1315 return 0; 1316 1317 rcu_read_lock(); 1318 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 1319 struct page *head, *page; 1320 repeat: 1321 page = radix_tree_deref_slot(slot); 1322 if (unlikely(!page)) 1323 continue; 1324 1325 if (radix_tree_exception(page)) { 1326 if (radix_tree_deref_retry(page)) { 1327 slot = radix_tree_iter_retry(&iter); 1328 continue; 1329 } 1330 /* 1331 * A shadow entry of a recently evicted page, 1332 * or a swap entry from shmem/tmpfs. Skip 1333 * over it. 1334 */ 1335 continue; 1336 } 1337 1338 head = compound_head(page); 1339 if (!page_cache_get_speculative(head)) 1340 goto repeat; 1341 1342 /* The page was split under us? */ 1343 if (compound_head(page) != head) { 1344 put_page(head); 1345 goto repeat; 1346 } 1347 1348 /* Has the page moved? */ 1349 if (unlikely(page != *slot)) { 1350 put_page(head); 1351 goto repeat; 1352 } 1353 1354 pages[ret] = page; 1355 if (++ret == nr_pages) 1356 break; 1357 } 1358 1359 rcu_read_unlock(); 1360 return ret; 1361 } 1362 1363 /** 1364 * find_get_pages_contig - gang contiguous pagecache lookup 1365 * @mapping: The address_space to search 1366 * @index: The starting page index 1367 * @nr_pages: The maximum number of pages 1368 * @pages: Where the resulting pages are placed 1369 * 1370 * find_get_pages_contig() works exactly like find_get_pages(), except 1371 * that the returned number of pages are guaranteed to be contiguous. 1372 * 1373 * find_get_pages_contig() returns the number of pages which were found. 1374 */ 1375 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, 1376 unsigned int nr_pages, struct page **pages) 1377 { 1378 struct radix_tree_iter iter; 1379 void **slot; 1380 unsigned int ret = 0; 1381 1382 if (unlikely(!nr_pages)) 1383 return 0; 1384 1385 rcu_read_lock(); 1386 radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) { 1387 struct page *head, *page; 1388 repeat: 1389 page = radix_tree_deref_slot(slot); 1390 /* The hole, there no reason to continue */ 1391 if (unlikely(!page)) 1392 break; 1393 1394 if (radix_tree_exception(page)) { 1395 if (radix_tree_deref_retry(page)) { 1396 slot = radix_tree_iter_retry(&iter); 1397 continue; 1398 } 1399 /* 1400 * A shadow entry of a recently evicted page, 1401 * or a swap entry from shmem/tmpfs. Stop 1402 * looking for contiguous pages. 1403 */ 1404 break; 1405 } 1406 1407 head = compound_head(page); 1408 if (!page_cache_get_speculative(head)) 1409 goto repeat; 1410 1411 /* The page was split under us? */ 1412 if (compound_head(page) != head) { 1413 put_page(head); 1414 goto repeat; 1415 } 1416 1417 /* Has the page moved? */ 1418 if (unlikely(page != *slot)) { 1419 put_page(head); 1420 goto repeat; 1421 } 1422 1423 /* 1424 * must check mapping and index after taking the ref. 1425 * otherwise we can get both false positives and false 1426 * negatives, which is just confusing to the caller. 1427 */ 1428 if (page->mapping == NULL || page_to_pgoff(page) != iter.index) { 1429 put_page(page); 1430 break; 1431 } 1432 1433 pages[ret] = page; 1434 if (++ret == nr_pages) 1435 break; 1436 } 1437 rcu_read_unlock(); 1438 return ret; 1439 } 1440 EXPORT_SYMBOL(find_get_pages_contig); 1441 1442 /** 1443 * find_get_pages_tag - find and return pages that match @tag 1444 * @mapping: the address_space to search 1445 * @index: the starting page index 1446 * @tag: the tag index 1447 * @nr_pages: the maximum number of pages 1448 * @pages: where the resulting pages are placed 1449 * 1450 * Like find_get_pages, except we only return pages which are tagged with 1451 * @tag. We update @index to index the next page for the traversal. 1452 */ 1453 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, 1454 int tag, unsigned int nr_pages, struct page **pages) 1455 { 1456 struct radix_tree_iter iter; 1457 void **slot; 1458 unsigned ret = 0; 1459 1460 if (unlikely(!nr_pages)) 1461 return 0; 1462 1463 rcu_read_lock(); 1464 radix_tree_for_each_tagged(slot, &mapping->page_tree, 1465 &iter, *index, tag) { 1466 struct page *head, *page; 1467 repeat: 1468 page = radix_tree_deref_slot(slot); 1469 if (unlikely(!page)) 1470 continue; 1471 1472 if (radix_tree_exception(page)) { 1473 if (radix_tree_deref_retry(page)) { 1474 slot = radix_tree_iter_retry(&iter); 1475 continue; 1476 } 1477 /* 1478 * A shadow entry of a recently evicted page. 1479 * 1480 * Those entries should never be tagged, but 1481 * this tree walk is lockless and the tags are 1482 * looked up in bulk, one radix tree node at a 1483 * time, so there is a sizable window for page 1484 * reclaim to evict a page we saw tagged. 1485 * 1486 * Skip over it. 1487 */ 1488 continue; 1489 } 1490 1491 head = compound_head(page); 1492 if (!page_cache_get_speculative(head)) 1493 goto repeat; 1494 1495 /* The page was split under us? */ 1496 if (compound_head(page) != head) { 1497 put_page(head); 1498 goto repeat; 1499 } 1500 1501 /* Has the page moved? */ 1502 if (unlikely(page != *slot)) { 1503 put_page(head); 1504 goto repeat; 1505 } 1506 1507 pages[ret] = page; 1508 if (++ret == nr_pages) 1509 break; 1510 } 1511 1512 rcu_read_unlock(); 1513 1514 if (ret) 1515 *index = pages[ret - 1]->index + 1; 1516 1517 return ret; 1518 } 1519 EXPORT_SYMBOL(find_get_pages_tag); 1520 1521 /** 1522 * find_get_entries_tag - find and return entries that match @tag 1523 * @mapping: the address_space to search 1524 * @start: the starting page cache index 1525 * @tag: the tag index 1526 * @nr_entries: the maximum number of entries 1527 * @entries: where the resulting entries are placed 1528 * @indices: the cache indices corresponding to the entries in @entries 1529 * 1530 * Like find_get_entries, except we only return entries which are tagged with 1531 * @tag. 1532 */ 1533 unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, 1534 int tag, unsigned int nr_entries, 1535 struct page **entries, pgoff_t *indices) 1536 { 1537 void **slot; 1538 unsigned int ret = 0; 1539 struct radix_tree_iter iter; 1540 1541 if (!nr_entries) 1542 return 0; 1543 1544 rcu_read_lock(); 1545 radix_tree_for_each_tagged(slot, &mapping->page_tree, 1546 &iter, start, tag) { 1547 struct page *head, *page; 1548 repeat: 1549 page = radix_tree_deref_slot(slot); 1550 if (unlikely(!page)) 1551 continue; 1552 if (radix_tree_exception(page)) { 1553 if (radix_tree_deref_retry(page)) { 1554 slot = radix_tree_iter_retry(&iter); 1555 continue; 1556 } 1557 1558 /* 1559 * A shadow entry of a recently evicted page, a swap 1560 * entry from shmem/tmpfs or a DAX entry. Return it 1561 * without attempting to raise page count. 1562 */ 1563 goto export; 1564 } 1565 1566 head = compound_head(page); 1567 if (!page_cache_get_speculative(head)) 1568 goto repeat; 1569 1570 /* The page was split under us? */ 1571 if (compound_head(page) != head) { 1572 put_page(head); 1573 goto repeat; 1574 } 1575 1576 /* Has the page moved? */ 1577 if (unlikely(page != *slot)) { 1578 put_page(head); 1579 goto repeat; 1580 } 1581 export: 1582 indices[ret] = iter.index; 1583 entries[ret] = page; 1584 if (++ret == nr_entries) 1585 break; 1586 } 1587 rcu_read_unlock(); 1588 return ret; 1589 } 1590 EXPORT_SYMBOL(find_get_entries_tag); 1591 1592 /* 1593 * CD/DVDs are error prone. When a medium error occurs, the driver may fail 1594 * a _large_ part of the i/o request. Imagine the worst scenario: 1595 * 1596 * ---R__________________________________________B__________ 1597 * ^ reading here ^ bad block(assume 4k) 1598 * 1599 * read(R) => miss => readahead(R...B) => media error => frustrating retries 1600 * => failing the whole request => read(R) => read(R+1) => 1601 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => 1602 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => 1603 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... 1604 * 1605 * It is going insane. Fix it by quickly scaling down the readahead size. 1606 */ 1607 static void shrink_readahead_size_eio(struct file *filp, 1608 struct file_ra_state *ra) 1609 { 1610 ra->ra_pages /= 4; 1611 } 1612 1613 /** 1614 * do_generic_file_read - generic file read routine 1615 * @filp: the file to read 1616 * @ppos: current file position 1617 * @iter: data destination 1618 * @written: already copied 1619 * 1620 * This is a generic file read routine, and uses the 1621 * mapping->a_ops->readpage() function for the actual low-level stuff. 1622 * 1623 * This is really ugly. But the goto's actually try to clarify some 1624 * of the logic when it comes to error handling etc. 1625 */ 1626 static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos, 1627 struct iov_iter *iter, ssize_t written) 1628 { 1629 struct address_space *mapping = filp->f_mapping; 1630 struct inode *inode = mapping->host; 1631 struct file_ra_state *ra = &filp->f_ra; 1632 pgoff_t index; 1633 pgoff_t last_index; 1634 pgoff_t prev_index; 1635 unsigned long offset; /* offset into pagecache page */ 1636 unsigned int prev_offset; 1637 int error = 0; 1638 1639 if (unlikely(*ppos >= inode->i_sb->s_maxbytes)) 1640 return 0; 1641 iov_iter_truncate(iter, inode->i_sb->s_maxbytes); 1642 1643 index = *ppos >> PAGE_SHIFT; 1644 prev_index = ra->prev_pos >> PAGE_SHIFT; 1645 prev_offset = ra->prev_pos & (PAGE_SIZE-1); 1646 last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT; 1647 offset = *ppos & ~PAGE_MASK; 1648 1649 for (;;) { 1650 struct page *page; 1651 pgoff_t end_index; 1652 loff_t isize; 1653 unsigned long nr, ret; 1654 1655 cond_resched(); 1656 find_page: 1657 page = find_get_page(mapping, index); 1658 if (!page) { 1659 page_cache_sync_readahead(mapping, 1660 ra, filp, 1661 index, last_index - index); 1662 page = find_get_page(mapping, index); 1663 if (unlikely(page == NULL)) 1664 goto no_cached_page; 1665 } 1666 if (PageReadahead(page)) { 1667 page_cache_async_readahead(mapping, 1668 ra, filp, page, 1669 index, last_index - index); 1670 } 1671 if (!PageUptodate(page)) { 1672 /* 1673 * See comment in do_read_cache_page on why 1674 * wait_on_page_locked is used to avoid unnecessarily 1675 * serialisations and why it's safe. 1676 */ 1677 error = wait_on_page_locked_killable(page); 1678 if (unlikely(error)) 1679 goto readpage_error; 1680 if (PageUptodate(page)) 1681 goto page_ok; 1682 1683 if (inode->i_blkbits == PAGE_SHIFT || 1684 !mapping->a_ops->is_partially_uptodate) 1685 goto page_not_up_to_date; 1686 /* pipes can't handle partially uptodate pages */ 1687 if (unlikely(iter->type & ITER_PIPE)) 1688 goto page_not_up_to_date; 1689 if (!trylock_page(page)) 1690 goto page_not_up_to_date; 1691 /* Did it get truncated before we got the lock? */ 1692 if (!page->mapping) 1693 goto page_not_up_to_date_locked; 1694 if (!mapping->a_ops->is_partially_uptodate(page, 1695 offset, iter->count)) 1696 goto page_not_up_to_date_locked; 1697 unlock_page(page); 1698 } 1699 page_ok: 1700 /* 1701 * i_size must be checked after we know the page is Uptodate. 1702 * 1703 * Checking i_size after the check allows us to calculate 1704 * the correct value for "nr", which means the zero-filled 1705 * part of the page is not copied back to userspace (unless 1706 * another truncate extends the file - this is desired though). 1707 */ 1708 1709 isize = i_size_read(inode); 1710 end_index = (isize - 1) >> PAGE_SHIFT; 1711 if (unlikely(!isize || index > end_index)) { 1712 put_page(page); 1713 goto out; 1714 } 1715 1716 /* nr is the maximum number of bytes to copy from this page */ 1717 nr = PAGE_SIZE; 1718 if (index == end_index) { 1719 nr = ((isize - 1) & ~PAGE_MASK) + 1; 1720 if (nr <= offset) { 1721 put_page(page); 1722 goto out; 1723 } 1724 } 1725 nr = nr - offset; 1726 1727 /* If users can be writing to this page using arbitrary 1728 * virtual addresses, take care about potential aliasing 1729 * before reading the page on the kernel side. 1730 */ 1731 if (mapping_writably_mapped(mapping)) 1732 flush_dcache_page(page); 1733 1734 /* 1735 * When a sequential read accesses a page several times, 1736 * only mark it as accessed the first time. 1737 */ 1738 if (prev_index != index || offset != prev_offset) 1739 mark_page_accessed(page); 1740 prev_index = index; 1741 1742 /* 1743 * Ok, we have the page, and it's up-to-date, so 1744 * now we can copy it to user space... 1745 */ 1746 1747 ret = copy_page_to_iter(page, offset, nr, iter); 1748 offset += ret; 1749 index += offset >> PAGE_SHIFT; 1750 offset &= ~PAGE_MASK; 1751 prev_offset = offset; 1752 1753 put_page(page); 1754 written += ret; 1755 if (!iov_iter_count(iter)) 1756 goto out; 1757 if (ret < nr) { 1758 error = -EFAULT; 1759 goto out; 1760 } 1761 continue; 1762 1763 page_not_up_to_date: 1764 /* Get exclusive access to the page ... */ 1765 error = lock_page_killable(page); 1766 if (unlikely(error)) 1767 goto readpage_error; 1768 1769 page_not_up_to_date_locked: 1770 /* Did it get truncated before we got the lock? */ 1771 if (!page->mapping) { 1772 unlock_page(page); 1773 put_page(page); 1774 continue; 1775 } 1776 1777 /* Did somebody else fill it already? */ 1778 if (PageUptodate(page)) { 1779 unlock_page(page); 1780 goto page_ok; 1781 } 1782 1783 readpage: 1784 /* 1785 * A previous I/O error may have been due to temporary 1786 * failures, eg. multipath errors. 1787 * PG_error will be set again if readpage fails. 1788 */ 1789 ClearPageError(page); 1790 /* Start the actual read. The read will unlock the page. */ 1791 error = mapping->a_ops->readpage(filp, page); 1792 1793 if (unlikely(error)) { 1794 if (error == AOP_TRUNCATED_PAGE) { 1795 put_page(page); 1796 error = 0; 1797 goto find_page; 1798 } 1799 goto readpage_error; 1800 } 1801 1802 if (!PageUptodate(page)) { 1803 error = lock_page_killable(page); 1804 if (unlikely(error)) 1805 goto readpage_error; 1806 if (!PageUptodate(page)) { 1807 if (page->mapping == NULL) { 1808 /* 1809 * invalidate_mapping_pages got it 1810 */ 1811 unlock_page(page); 1812 put_page(page); 1813 goto find_page; 1814 } 1815 unlock_page(page); 1816 shrink_readahead_size_eio(filp, ra); 1817 error = -EIO; 1818 goto readpage_error; 1819 } 1820 unlock_page(page); 1821 } 1822 1823 goto page_ok; 1824 1825 readpage_error: 1826 /* UHHUH! A synchronous read error occurred. Report it */ 1827 put_page(page); 1828 goto out; 1829 1830 no_cached_page: 1831 /* 1832 * Ok, it wasn't cached, so we need to create a new 1833 * page.. 1834 */ 1835 page = page_cache_alloc_cold(mapping); 1836 if (!page) { 1837 error = -ENOMEM; 1838 goto out; 1839 } 1840 error = add_to_page_cache_lru(page, mapping, index, 1841 mapping_gfp_constraint(mapping, GFP_KERNEL)); 1842 if (error) { 1843 put_page(page); 1844 if (error == -EEXIST) { 1845 error = 0; 1846 goto find_page; 1847 } 1848 goto out; 1849 } 1850 goto readpage; 1851 } 1852 1853 out: 1854 ra->prev_pos = prev_index; 1855 ra->prev_pos <<= PAGE_SHIFT; 1856 ra->prev_pos |= prev_offset; 1857 1858 *ppos = ((loff_t)index << PAGE_SHIFT) + offset; 1859 file_accessed(filp); 1860 return written ? written : error; 1861 } 1862 1863 /** 1864 * generic_file_read_iter - generic filesystem read routine 1865 * @iocb: kernel I/O control block 1866 * @iter: destination for the data read 1867 * 1868 * This is the "read_iter()" routine for all filesystems 1869 * that can use the page cache directly. 1870 */ 1871 ssize_t 1872 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 1873 { 1874 struct file *file = iocb->ki_filp; 1875 ssize_t retval = 0; 1876 size_t count = iov_iter_count(iter); 1877 1878 if (!count) 1879 goto out; /* skip atime */ 1880 1881 if (iocb->ki_flags & IOCB_DIRECT) { 1882 struct address_space *mapping = file->f_mapping; 1883 struct inode *inode = mapping->host; 1884 struct iov_iter data = *iter; 1885 loff_t size; 1886 1887 size = i_size_read(inode); 1888 retval = filemap_write_and_wait_range(mapping, iocb->ki_pos, 1889 iocb->ki_pos + count - 1); 1890 if (retval < 0) 1891 goto out; 1892 1893 file_accessed(file); 1894 1895 retval = mapping->a_ops->direct_IO(iocb, &data); 1896 if (retval >= 0) { 1897 iocb->ki_pos += retval; 1898 iov_iter_advance(iter, retval); 1899 } 1900 1901 /* 1902 * Btrfs can have a short DIO read if we encounter 1903 * compressed extents, so if there was an error, or if 1904 * we've already read everything we wanted to, or if 1905 * there was a short read because we hit EOF, go ahead 1906 * and return. Otherwise fallthrough to buffered io for 1907 * the rest of the read. Buffered reads will not work for 1908 * DAX files, so don't bother trying. 1909 */ 1910 if (retval < 0 || !iov_iter_count(iter) || iocb->ki_pos >= size || 1911 IS_DAX(inode)) 1912 goto out; 1913 } 1914 1915 retval = do_generic_file_read(file, &iocb->ki_pos, iter, retval); 1916 out: 1917 return retval; 1918 } 1919 EXPORT_SYMBOL(generic_file_read_iter); 1920 1921 #ifdef CONFIG_MMU 1922 /** 1923 * page_cache_read - adds requested page to the page cache if not already there 1924 * @file: file to read 1925 * @offset: page index 1926 * @gfp_mask: memory allocation flags 1927 * 1928 * This adds the requested page to the page cache if it isn't already there, 1929 * and schedules an I/O to read in its contents from disk. 1930 */ 1931 static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask) 1932 { 1933 struct address_space *mapping = file->f_mapping; 1934 struct page *page; 1935 int ret; 1936 1937 do { 1938 page = __page_cache_alloc(gfp_mask|__GFP_COLD); 1939 if (!page) 1940 return -ENOMEM; 1941 1942 ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_KERNEL); 1943 if (ret == 0) 1944 ret = mapping->a_ops->readpage(file, page); 1945 else if (ret == -EEXIST) 1946 ret = 0; /* losing race to add is OK */ 1947 1948 put_page(page); 1949 1950 } while (ret == AOP_TRUNCATED_PAGE); 1951 1952 return ret; 1953 } 1954 1955 #define MMAP_LOTSAMISS (100) 1956 1957 /* 1958 * Synchronous readahead happens when we don't even find 1959 * a page in the page cache at all. 1960 */ 1961 static void do_sync_mmap_readahead(struct vm_area_struct *vma, 1962 struct file_ra_state *ra, 1963 struct file *file, 1964 pgoff_t offset) 1965 { 1966 struct address_space *mapping = file->f_mapping; 1967 1968 /* If we don't want any read-ahead, don't bother */ 1969 if (vma->vm_flags & VM_RAND_READ) 1970 return; 1971 if (!ra->ra_pages) 1972 return; 1973 1974 if (vma->vm_flags & VM_SEQ_READ) { 1975 page_cache_sync_readahead(mapping, ra, file, offset, 1976 ra->ra_pages); 1977 return; 1978 } 1979 1980 /* Avoid banging the cache line if not needed */ 1981 if (ra->mmap_miss < MMAP_LOTSAMISS * 10) 1982 ra->mmap_miss++; 1983 1984 /* 1985 * Do we miss much more than hit in this file? If so, 1986 * stop bothering with read-ahead. It will only hurt. 1987 */ 1988 if (ra->mmap_miss > MMAP_LOTSAMISS) 1989 return; 1990 1991 /* 1992 * mmap read-around 1993 */ 1994 ra->start = max_t(long, 0, offset - ra->ra_pages / 2); 1995 ra->size = ra->ra_pages; 1996 ra->async_size = ra->ra_pages / 4; 1997 ra_submit(ra, mapping, file); 1998 } 1999 2000 /* 2001 * Asynchronous readahead happens when we find the page and PG_readahead, 2002 * so we want to possibly extend the readahead further.. 2003 */ 2004 static void do_async_mmap_readahead(struct vm_area_struct *vma, 2005 struct file_ra_state *ra, 2006 struct file *file, 2007 struct page *page, 2008 pgoff_t offset) 2009 { 2010 struct address_space *mapping = file->f_mapping; 2011 2012 /* If we don't want any read-ahead, don't bother */ 2013 if (vma->vm_flags & VM_RAND_READ) 2014 return; 2015 if (ra->mmap_miss > 0) 2016 ra->mmap_miss--; 2017 if (PageReadahead(page)) 2018 page_cache_async_readahead(mapping, ra, file, 2019 page, offset, ra->ra_pages); 2020 } 2021 2022 /** 2023 * filemap_fault - read in file data for page fault handling 2024 * @vma: vma in which the fault was taken 2025 * @vmf: struct vm_fault containing details of the fault 2026 * 2027 * filemap_fault() is invoked via the vma operations vector for a 2028 * mapped memory region to read in file data during a page fault. 2029 * 2030 * The goto's are kind of ugly, but this streamlines the normal case of having 2031 * it in the page cache, and handles the special cases reasonably without 2032 * having a lot of duplicated code. 2033 * 2034 * vma->vm_mm->mmap_sem must be held on entry. 2035 * 2036 * If our return value has VM_FAULT_RETRY set, it's because 2037 * lock_page_or_retry() returned 0. 2038 * The mmap_sem has usually been released in this case. 2039 * See __lock_page_or_retry() for the exception. 2040 * 2041 * If our return value does not have VM_FAULT_RETRY set, the mmap_sem 2042 * has not been released. 2043 * 2044 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set. 2045 */ 2046 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 2047 { 2048 int error; 2049 struct file *file = vma->vm_file; 2050 struct address_space *mapping = file->f_mapping; 2051 struct file_ra_state *ra = &file->f_ra; 2052 struct inode *inode = mapping->host; 2053 pgoff_t offset = vmf->pgoff; 2054 struct page *page; 2055 loff_t size; 2056 int ret = 0; 2057 2058 size = round_up(i_size_read(inode), PAGE_SIZE); 2059 if (offset >= size >> PAGE_SHIFT) 2060 return VM_FAULT_SIGBUS; 2061 2062 /* 2063 * Do we have something in the page cache already? 2064 */ 2065 page = find_get_page(mapping, offset); 2066 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { 2067 /* 2068 * We found the page, so try async readahead before 2069 * waiting for the lock. 2070 */ 2071 do_async_mmap_readahead(vma, ra, file, page, offset); 2072 } else if (!page) { 2073 /* No page in the page cache at all */ 2074 do_sync_mmap_readahead(vma, ra, file, offset); 2075 count_vm_event(PGMAJFAULT); 2076 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 2077 ret = VM_FAULT_MAJOR; 2078 retry_find: 2079 page = find_get_page(mapping, offset); 2080 if (!page) 2081 goto no_cached_page; 2082 } 2083 2084 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { 2085 put_page(page); 2086 return ret | VM_FAULT_RETRY; 2087 } 2088 2089 /* Did it get truncated? */ 2090 if (unlikely(page->mapping != mapping)) { 2091 unlock_page(page); 2092 put_page(page); 2093 goto retry_find; 2094 } 2095 VM_BUG_ON_PAGE(page->index != offset, page); 2096 2097 /* 2098 * We have a locked page in the page cache, now we need to check 2099 * that it's up-to-date. If not, it is going to be due to an error. 2100 */ 2101 if (unlikely(!PageUptodate(page))) 2102 goto page_not_uptodate; 2103 2104 /* 2105 * Found the page and have a reference on it. 2106 * We must recheck i_size under page lock. 2107 */ 2108 size = round_up(i_size_read(inode), PAGE_SIZE); 2109 if (unlikely(offset >= size >> PAGE_SHIFT)) { 2110 unlock_page(page); 2111 put_page(page); 2112 return VM_FAULT_SIGBUS; 2113 } 2114 2115 vmf->page = page; 2116 return ret | VM_FAULT_LOCKED; 2117 2118 no_cached_page: 2119 /* 2120 * We're only likely to ever get here if MADV_RANDOM is in 2121 * effect. 2122 */ 2123 error = page_cache_read(file, offset, vmf->gfp_mask); 2124 2125 /* 2126 * The page we want has now been added to the page cache. 2127 * In the unlikely event that someone removed it in the 2128 * meantime, we'll just come back here and read it again. 2129 */ 2130 if (error >= 0) 2131 goto retry_find; 2132 2133 /* 2134 * An error return from page_cache_read can result if the 2135 * system is low on memory, or a problem occurs while trying 2136 * to schedule I/O. 2137 */ 2138 if (error == -ENOMEM) 2139 return VM_FAULT_OOM; 2140 return VM_FAULT_SIGBUS; 2141 2142 page_not_uptodate: 2143 /* 2144 * Umm, take care of errors if the page isn't up-to-date. 2145 * Try to re-read it _once_. We do this synchronously, 2146 * because there really aren't any performance issues here 2147 * and we need to check for errors. 2148 */ 2149 ClearPageError(page); 2150 error = mapping->a_ops->readpage(file, page); 2151 if (!error) { 2152 wait_on_page_locked(page); 2153 if (!PageUptodate(page)) 2154 error = -EIO; 2155 } 2156 put_page(page); 2157 2158 if (!error || error == AOP_TRUNCATED_PAGE) 2159 goto retry_find; 2160 2161 /* Things didn't work out. Return zero to tell the mm layer so. */ 2162 shrink_readahead_size_eio(file, ra); 2163 return VM_FAULT_SIGBUS; 2164 } 2165 EXPORT_SYMBOL(filemap_fault); 2166 2167 void filemap_map_pages(struct vm_fault *vmf, 2168 pgoff_t start_pgoff, pgoff_t end_pgoff) 2169 { 2170 struct radix_tree_iter iter; 2171 void **slot; 2172 struct file *file = vmf->vma->vm_file; 2173 struct address_space *mapping = file->f_mapping; 2174 pgoff_t last_pgoff = start_pgoff; 2175 loff_t size; 2176 struct page *head, *page; 2177 2178 rcu_read_lock(); 2179 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, 2180 start_pgoff) { 2181 if (iter.index > end_pgoff) 2182 break; 2183 repeat: 2184 page = radix_tree_deref_slot(slot); 2185 if (unlikely(!page)) 2186 goto next; 2187 if (radix_tree_exception(page)) { 2188 if (radix_tree_deref_retry(page)) { 2189 slot = radix_tree_iter_retry(&iter); 2190 continue; 2191 } 2192 goto next; 2193 } 2194 2195 head = compound_head(page); 2196 if (!page_cache_get_speculative(head)) 2197 goto repeat; 2198 2199 /* The page was split under us? */ 2200 if (compound_head(page) != head) { 2201 put_page(head); 2202 goto repeat; 2203 } 2204 2205 /* Has the page moved? */ 2206 if (unlikely(page != *slot)) { 2207 put_page(head); 2208 goto repeat; 2209 } 2210 2211 if (!PageUptodate(page) || 2212 PageReadahead(page) || 2213 PageHWPoison(page)) 2214 goto skip; 2215 if (!trylock_page(page)) 2216 goto skip; 2217 2218 if (page->mapping != mapping || !PageUptodate(page)) 2219 goto unlock; 2220 2221 size = round_up(i_size_read(mapping->host), PAGE_SIZE); 2222 if (page->index >= size >> PAGE_SHIFT) 2223 goto unlock; 2224 2225 if (file->f_ra.mmap_miss > 0) 2226 file->f_ra.mmap_miss--; 2227 2228 vmf->address += (iter.index - last_pgoff) << PAGE_SHIFT; 2229 if (vmf->pte) 2230 vmf->pte += iter.index - last_pgoff; 2231 last_pgoff = iter.index; 2232 if (alloc_set_pte(vmf, NULL, page)) 2233 goto unlock; 2234 unlock_page(page); 2235 goto next; 2236 unlock: 2237 unlock_page(page); 2238 skip: 2239 put_page(page); 2240 next: 2241 /* Huge page is mapped? No need to proceed. */ 2242 if (pmd_trans_huge(*vmf->pmd)) 2243 break; 2244 if (iter.index == end_pgoff) 2245 break; 2246 } 2247 rcu_read_unlock(); 2248 } 2249 EXPORT_SYMBOL(filemap_map_pages); 2250 2251 int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 2252 { 2253 struct page *page = vmf->page; 2254 struct inode *inode = file_inode(vma->vm_file); 2255 int ret = VM_FAULT_LOCKED; 2256 2257 sb_start_pagefault(inode->i_sb); 2258 file_update_time(vma->vm_file); 2259 lock_page(page); 2260 if (page->mapping != inode->i_mapping) { 2261 unlock_page(page); 2262 ret = VM_FAULT_NOPAGE; 2263 goto out; 2264 } 2265 /* 2266 * We mark the page dirty already here so that when freeze is in 2267 * progress, we are guaranteed that writeback during freezing will 2268 * see the dirty page and writeprotect it again. 2269 */ 2270 set_page_dirty(page); 2271 wait_for_stable_page(page); 2272 out: 2273 sb_end_pagefault(inode->i_sb); 2274 return ret; 2275 } 2276 EXPORT_SYMBOL(filemap_page_mkwrite); 2277 2278 const struct vm_operations_struct generic_file_vm_ops = { 2279 .fault = filemap_fault, 2280 .map_pages = filemap_map_pages, 2281 .page_mkwrite = filemap_page_mkwrite, 2282 }; 2283 2284 /* This is used for a general mmap of a disk file */ 2285 2286 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 2287 { 2288 struct address_space *mapping = file->f_mapping; 2289 2290 if (!mapping->a_ops->readpage) 2291 return -ENOEXEC; 2292 file_accessed(file); 2293 vma->vm_ops = &generic_file_vm_ops; 2294 return 0; 2295 } 2296 2297 /* 2298 * This is for filesystems which do not implement ->writepage. 2299 */ 2300 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) 2301 { 2302 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) 2303 return -EINVAL; 2304 return generic_file_mmap(file, vma); 2305 } 2306 #else 2307 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 2308 { 2309 return -ENOSYS; 2310 } 2311 int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) 2312 { 2313 return -ENOSYS; 2314 } 2315 #endif /* CONFIG_MMU */ 2316 2317 EXPORT_SYMBOL(generic_file_mmap); 2318 EXPORT_SYMBOL(generic_file_readonly_mmap); 2319 2320 static struct page *wait_on_page_read(struct page *page) 2321 { 2322 if (!IS_ERR(page)) { 2323 wait_on_page_locked(page); 2324 if (!PageUptodate(page)) { 2325 put_page(page); 2326 page = ERR_PTR(-EIO); 2327 } 2328 } 2329 return page; 2330 } 2331 2332 static struct page *do_read_cache_page(struct address_space *mapping, 2333 pgoff_t index, 2334 int (*filler)(void *, struct page *), 2335 void *data, 2336 gfp_t gfp) 2337 { 2338 struct page *page; 2339 int err; 2340 repeat: 2341 page = find_get_page(mapping, index); 2342 if (!page) { 2343 page = __page_cache_alloc(gfp | __GFP_COLD); 2344 if (!page) 2345 return ERR_PTR(-ENOMEM); 2346 err = add_to_page_cache_lru(page, mapping, index, gfp); 2347 if (unlikely(err)) { 2348 put_page(page); 2349 if (err == -EEXIST) 2350 goto repeat; 2351 /* Presumably ENOMEM for radix tree node */ 2352 return ERR_PTR(err); 2353 } 2354 2355 filler: 2356 err = filler(data, page); 2357 if (err < 0) { 2358 put_page(page); 2359 return ERR_PTR(err); 2360 } 2361 2362 page = wait_on_page_read(page); 2363 if (IS_ERR(page)) 2364 return page; 2365 goto out; 2366 } 2367 if (PageUptodate(page)) 2368 goto out; 2369 2370 /* 2371 * Page is not up to date and may be locked due one of the following 2372 * case a: Page is being filled and the page lock is held 2373 * case b: Read/write error clearing the page uptodate status 2374 * case c: Truncation in progress (page locked) 2375 * case d: Reclaim in progress 2376 * 2377 * Case a, the page will be up to date when the page is unlocked. 2378 * There is no need to serialise on the page lock here as the page 2379 * is pinned so the lock gives no additional protection. Even if the 2380 * the page is truncated, the data is still valid if PageUptodate as 2381 * it's a race vs truncate race. 2382 * Case b, the page will not be up to date 2383 * Case c, the page may be truncated but in itself, the data may still 2384 * be valid after IO completes as it's a read vs truncate race. The 2385 * operation must restart if the page is not uptodate on unlock but 2386 * otherwise serialising on page lock to stabilise the mapping gives 2387 * no additional guarantees to the caller as the page lock is 2388 * released before return. 2389 * Case d, similar to truncation. If reclaim holds the page lock, it 2390 * will be a race with remove_mapping that determines if the mapping 2391 * is valid on unlock but otherwise the data is valid and there is 2392 * no need to serialise with page lock. 2393 * 2394 * As the page lock gives no additional guarantee, we optimistically 2395 * wait on the page to be unlocked and check if it's up to date and 2396 * use the page if it is. Otherwise, the page lock is required to 2397 * distinguish between the different cases. The motivation is that we 2398 * avoid spurious serialisations and wakeups when multiple processes 2399 * wait on the same page for IO to complete. 2400 */ 2401 wait_on_page_locked(page); 2402 if (PageUptodate(page)) 2403 goto out; 2404 2405 /* Distinguish between all the cases under the safety of the lock */ 2406 lock_page(page); 2407 2408 /* Case c or d, restart the operation */ 2409 if (!page->mapping) { 2410 unlock_page(page); 2411 put_page(page); 2412 goto repeat; 2413 } 2414 2415 /* Someone else locked and filled the page in a very small window */ 2416 if (PageUptodate(page)) { 2417 unlock_page(page); 2418 goto out; 2419 } 2420 goto filler; 2421 2422 out: 2423 mark_page_accessed(page); 2424 return page; 2425 } 2426 2427 /** 2428 * read_cache_page - read into page cache, fill it if needed 2429 * @mapping: the page's address_space 2430 * @index: the page index 2431 * @filler: function to perform the read 2432 * @data: first arg to filler(data, page) function, often left as NULL 2433 * 2434 * Read into the page cache. If a page already exists, and PageUptodate() is 2435 * not set, try to fill the page and wait for it to become unlocked. 2436 * 2437 * If the page does not get brought uptodate, return -EIO. 2438 */ 2439 struct page *read_cache_page(struct address_space *mapping, 2440 pgoff_t index, 2441 int (*filler)(void *, struct page *), 2442 void *data) 2443 { 2444 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping)); 2445 } 2446 EXPORT_SYMBOL(read_cache_page); 2447 2448 /** 2449 * read_cache_page_gfp - read into page cache, using specified page allocation flags. 2450 * @mapping: the page's address_space 2451 * @index: the page index 2452 * @gfp: the page allocator flags to use if allocating 2453 * 2454 * This is the same as "read_mapping_page(mapping, index, NULL)", but with 2455 * any new page allocations done using the specified allocation flags. 2456 * 2457 * If the page does not get brought uptodate, return -EIO. 2458 */ 2459 struct page *read_cache_page_gfp(struct address_space *mapping, 2460 pgoff_t index, 2461 gfp_t gfp) 2462 { 2463 filler_t *filler = (filler_t *)mapping->a_ops->readpage; 2464 2465 return do_read_cache_page(mapping, index, filler, NULL, gfp); 2466 } 2467 EXPORT_SYMBOL(read_cache_page_gfp); 2468 2469 /* 2470 * Performs necessary checks before doing a write 2471 * 2472 * Can adjust writing position or amount of bytes to write. 2473 * Returns appropriate error code that caller should return or 2474 * zero in case that write should be allowed. 2475 */ 2476 inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from) 2477 { 2478 struct file *file = iocb->ki_filp; 2479 struct inode *inode = file->f_mapping->host; 2480 unsigned long limit = rlimit(RLIMIT_FSIZE); 2481 loff_t pos; 2482 2483 if (!iov_iter_count(from)) 2484 return 0; 2485 2486 /* FIXME: this is for backwards compatibility with 2.4 */ 2487 if (iocb->ki_flags & IOCB_APPEND) 2488 iocb->ki_pos = i_size_read(inode); 2489 2490 pos = iocb->ki_pos; 2491 2492 if (limit != RLIM_INFINITY) { 2493 if (iocb->ki_pos >= limit) { 2494 send_sig(SIGXFSZ, current, 0); 2495 return -EFBIG; 2496 } 2497 iov_iter_truncate(from, limit - (unsigned long)pos); 2498 } 2499 2500 /* 2501 * LFS rule 2502 */ 2503 if (unlikely(pos + iov_iter_count(from) > MAX_NON_LFS && 2504 !(file->f_flags & O_LARGEFILE))) { 2505 if (pos >= MAX_NON_LFS) 2506 return -EFBIG; 2507 iov_iter_truncate(from, MAX_NON_LFS - (unsigned long)pos); 2508 } 2509 2510 /* 2511 * Are we about to exceed the fs block limit ? 2512 * 2513 * If we have written data it becomes a short write. If we have 2514 * exceeded without writing data we send a signal and return EFBIG. 2515 * Linus frestrict idea will clean these up nicely.. 2516 */ 2517 if (unlikely(pos >= inode->i_sb->s_maxbytes)) 2518 return -EFBIG; 2519 2520 iov_iter_truncate(from, inode->i_sb->s_maxbytes - pos); 2521 return iov_iter_count(from); 2522 } 2523 EXPORT_SYMBOL(generic_write_checks); 2524 2525 int pagecache_write_begin(struct file *file, struct address_space *mapping, 2526 loff_t pos, unsigned len, unsigned flags, 2527 struct page **pagep, void **fsdata) 2528 { 2529 const struct address_space_operations *aops = mapping->a_ops; 2530 2531 return aops->write_begin(file, mapping, pos, len, flags, 2532 pagep, fsdata); 2533 } 2534 EXPORT_SYMBOL(pagecache_write_begin); 2535 2536 int pagecache_write_end(struct file *file, struct address_space *mapping, 2537 loff_t pos, unsigned len, unsigned copied, 2538 struct page *page, void *fsdata) 2539 { 2540 const struct address_space_operations *aops = mapping->a_ops; 2541 2542 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); 2543 } 2544 EXPORT_SYMBOL(pagecache_write_end); 2545 2546 ssize_t 2547 generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) 2548 { 2549 struct file *file = iocb->ki_filp; 2550 struct address_space *mapping = file->f_mapping; 2551 struct inode *inode = mapping->host; 2552 loff_t pos = iocb->ki_pos; 2553 ssize_t written; 2554 size_t write_len; 2555 pgoff_t end; 2556 struct iov_iter data; 2557 2558 write_len = iov_iter_count(from); 2559 end = (pos + write_len - 1) >> PAGE_SHIFT; 2560 2561 written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1); 2562 if (written) 2563 goto out; 2564 2565 /* 2566 * After a write we want buffered reads to be sure to go to disk to get 2567 * the new data. We invalidate clean cached page from the region we're 2568 * about to write. We do this *before* the write so that we can return 2569 * without clobbering -EIOCBQUEUED from ->direct_IO(). 2570 */ 2571 if (mapping->nrpages) { 2572 written = invalidate_inode_pages2_range(mapping, 2573 pos >> PAGE_SHIFT, end); 2574 /* 2575 * If a page can not be invalidated, return 0 to fall back 2576 * to buffered write. 2577 */ 2578 if (written) { 2579 if (written == -EBUSY) 2580 return 0; 2581 goto out; 2582 } 2583 } 2584 2585 data = *from; 2586 written = mapping->a_ops->direct_IO(iocb, &data); 2587 2588 /* 2589 * Finally, try again to invalidate clean pages which might have been 2590 * cached by non-direct readahead, or faulted in by get_user_pages() 2591 * if the source of the write was an mmap'ed region of the file 2592 * we're writing. Either one is a pretty crazy thing to do, 2593 * so we don't support it 100%. If this invalidation 2594 * fails, tough, the write still worked... 2595 */ 2596 if (mapping->nrpages) { 2597 invalidate_inode_pages2_range(mapping, 2598 pos >> PAGE_SHIFT, end); 2599 } 2600 2601 if (written > 0) { 2602 pos += written; 2603 iov_iter_advance(from, written); 2604 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { 2605 i_size_write(inode, pos); 2606 mark_inode_dirty(inode); 2607 } 2608 iocb->ki_pos = pos; 2609 } 2610 out: 2611 return written; 2612 } 2613 EXPORT_SYMBOL(generic_file_direct_write); 2614 2615 /* 2616 * Find or create a page at the given pagecache position. Return the locked 2617 * page. This function is specifically for buffered writes. 2618 */ 2619 struct page *grab_cache_page_write_begin(struct address_space *mapping, 2620 pgoff_t index, unsigned flags) 2621 { 2622 struct page *page; 2623 int fgp_flags = FGP_LOCK|FGP_WRITE|FGP_CREAT; 2624 2625 if (flags & AOP_FLAG_NOFS) 2626 fgp_flags |= FGP_NOFS; 2627 2628 page = pagecache_get_page(mapping, index, fgp_flags, 2629 mapping_gfp_mask(mapping)); 2630 if (page) 2631 wait_for_stable_page(page); 2632 2633 return page; 2634 } 2635 EXPORT_SYMBOL(grab_cache_page_write_begin); 2636 2637 ssize_t generic_perform_write(struct file *file, 2638 struct iov_iter *i, loff_t pos) 2639 { 2640 struct address_space *mapping = file->f_mapping; 2641 const struct address_space_operations *a_ops = mapping->a_ops; 2642 long status = 0; 2643 ssize_t written = 0; 2644 unsigned int flags = 0; 2645 2646 /* 2647 * Copies from kernel address space cannot fail (NFSD is a big user). 2648 */ 2649 if (!iter_is_iovec(i)) 2650 flags |= AOP_FLAG_UNINTERRUPTIBLE; 2651 2652 do { 2653 struct page *page; 2654 unsigned long offset; /* Offset into pagecache page */ 2655 unsigned long bytes; /* Bytes to write to page */ 2656 size_t copied; /* Bytes copied from user */ 2657 void *fsdata; 2658 2659 offset = (pos & (PAGE_SIZE - 1)); 2660 bytes = min_t(unsigned long, PAGE_SIZE - offset, 2661 iov_iter_count(i)); 2662 2663 again: 2664 /* 2665 * Bring in the user page that we will copy from _first_. 2666 * Otherwise there's a nasty deadlock on copying from the 2667 * same page as we're writing to, without it being marked 2668 * up-to-date. 2669 * 2670 * Not only is this an optimisation, but it is also required 2671 * to check that the address is actually valid, when atomic 2672 * usercopies are used, below. 2673 */ 2674 if (unlikely(iov_iter_fault_in_readable(i, bytes))) { 2675 status = -EFAULT; 2676 break; 2677 } 2678 2679 if (fatal_signal_pending(current)) { 2680 status = -EINTR; 2681 break; 2682 } 2683 2684 status = a_ops->write_begin(file, mapping, pos, bytes, flags, 2685 &page, &fsdata); 2686 if (unlikely(status < 0)) 2687 break; 2688 2689 if (mapping_writably_mapped(mapping)) 2690 flush_dcache_page(page); 2691 2692 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 2693 flush_dcache_page(page); 2694 2695 status = a_ops->write_end(file, mapping, pos, bytes, copied, 2696 page, fsdata); 2697 if (unlikely(status < 0)) 2698 break; 2699 copied = status; 2700 2701 cond_resched(); 2702 2703 iov_iter_advance(i, copied); 2704 if (unlikely(copied == 0)) { 2705 /* 2706 * If we were unable to copy any data at all, we must 2707 * fall back to a single segment length write. 2708 * 2709 * If we didn't fallback here, we could livelock 2710 * because not all segments in the iov can be copied at 2711 * once without a pagefault. 2712 */ 2713 bytes = min_t(unsigned long, PAGE_SIZE - offset, 2714 iov_iter_single_seg_count(i)); 2715 goto again; 2716 } 2717 pos += copied; 2718 written += copied; 2719 2720 balance_dirty_pages_ratelimited(mapping); 2721 } while (iov_iter_count(i)); 2722 2723 return written ? written : status; 2724 } 2725 EXPORT_SYMBOL(generic_perform_write); 2726 2727 /** 2728 * __generic_file_write_iter - write data to a file 2729 * @iocb: IO state structure (file, offset, etc.) 2730 * @from: iov_iter with data to write 2731 * 2732 * This function does all the work needed for actually writing data to a 2733 * file. It does all basic checks, removes SUID from the file, updates 2734 * modification times and calls proper subroutines depending on whether we 2735 * do direct IO or a standard buffered write. 2736 * 2737 * It expects i_mutex to be grabbed unless we work on a block device or similar 2738 * object which does not need locking at all. 2739 * 2740 * This function does *not* take care of syncing data in case of O_SYNC write. 2741 * A caller has to handle it. This is mainly due to the fact that we want to 2742 * avoid syncing under i_mutex. 2743 */ 2744 ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 2745 { 2746 struct file *file = iocb->ki_filp; 2747 struct address_space * mapping = file->f_mapping; 2748 struct inode *inode = mapping->host; 2749 ssize_t written = 0; 2750 ssize_t err; 2751 ssize_t status; 2752 2753 /* We can write back this queue in page reclaim */ 2754 current->backing_dev_info = inode_to_bdi(inode); 2755 err = file_remove_privs(file); 2756 if (err) 2757 goto out; 2758 2759 err = file_update_time(file); 2760 if (err) 2761 goto out; 2762 2763 if (iocb->ki_flags & IOCB_DIRECT) { 2764 loff_t pos, endbyte; 2765 2766 written = generic_file_direct_write(iocb, from); 2767 /* 2768 * If the write stopped short of completing, fall back to 2769 * buffered writes. Some filesystems do this for writes to 2770 * holes, for example. For DAX files, a buffered write will 2771 * not succeed (even if it did, DAX does not handle dirty 2772 * page-cache pages correctly). 2773 */ 2774 if (written < 0 || !iov_iter_count(from) || IS_DAX(inode)) 2775 goto out; 2776 2777 status = generic_perform_write(file, from, pos = iocb->ki_pos); 2778 /* 2779 * If generic_perform_write() returned a synchronous error 2780 * then we want to return the number of bytes which were 2781 * direct-written, or the error code if that was zero. Note 2782 * that this differs from normal direct-io semantics, which 2783 * will return -EFOO even if some bytes were written. 2784 */ 2785 if (unlikely(status < 0)) { 2786 err = status; 2787 goto out; 2788 } 2789 /* 2790 * We need to ensure that the page cache pages are written to 2791 * disk and invalidated to preserve the expected O_DIRECT 2792 * semantics. 2793 */ 2794 endbyte = pos + status - 1; 2795 err = filemap_write_and_wait_range(mapping, pos, endbyte); 2796 if (err == 0) { 2797 iocb->ki_pos = endbyte + 1; 2798 written += status; 2799 invalidate_mapping_pages(mapping, 2800 pos >> PAGE_SHIFT, 2801 endbyte >> PAGE_SHIFT); 2802 } else { 2803 /* 2804 * We don't know how much we wrote, so just return 2805 * the number of bytes which were direct-written 2806 */ 2807 } 2808 } else { 2809 written = generic_perform_write(file, from, iocb->ki_pos); 2810 if (likely(written > 0)) 2811 iocb->ki_pos += written; 2812 } 2813 out: 2814 current->backing_dev_info = NULL; 2815 return written ? written : err; 2816 } 2817 EXPORT_SYMBOL(__generic_file_write_iter); 2818 2819 /** 2820 * generic_file_write_iter - write data to a file 2821 * @iocb: IO state structure 2822 * @from: iov_iter with data to write 2823 * 2824 * This is a wrapper around __generic_file_write_iter() to be used by most 2825 * filesystems. It takes care of syncing the file in case of O_SYNC file 2826 * and acquires i_mutex as needed. 2827 */ 2828 ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 2829 { 2830 struct file *file = iocb->ki_filp; 2831 struct inode *inode = file->f_mapping->host; 2832 ssize_t ret; 2833 2834 inode_lock(inode); 2835 ret = generic_write_checks(iocb, from); 2836 if (ret > 0) 2837 ret = __generic_file_write_iter(iocb, from); 2838 inode_unlock(inode); 2839 2840 if (ret > 0) 2841 ret = generic_write_sync(iocb, ret); 2842 return ret; 2843 } 2844 EXPORT_SYMBOL(generic_file_write_iter); 2845 2846 /** 2847 * try_to_release_page() - release old fs-specific metadata on a page 2848 * 2849 * @page: the page which the kernel is trying to free 2850 * @gfp_mask: memory allocation flags (and I/O mode) 2851 * 2852 * The address_space is to try to release any data against the page 2853 * (presumably at page->private). If the release was successful, return `1'. 2854 * Otherwise return zero. 2855 * 2856 * This may also be called if PG_fscache is set on a page, indicating that the 2857 * page is known to the local caching routines. 2858 * 2859 * The @gfp_mask argument specifies whether I/O may be performed to release 2860 * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS). 2861 * 2862 */ 2863 int try_to_release_page(struct page *page, gfp_t gfp_mask) 2864 { 2865 struct address_space * const mapping = page->mapping; 2866 2867 BUG_ON(!PageLocked(page)); 2868 if (PageWriteback(page)) 2869 return 0; 2870 2871 if (mapping && mapping->a_ops->releasepage) 2872 return mapping->a_ops->releasepage(page, gfp_mask); 2873 return try_to_free_buffers(page); 2874 } 2875 2876 EXPORT_SYMBOL(try_to_release_page); 2877