1 /* 2 * linux/mm/filemap.c 3 * 4 * Copyright (C) 1994-1999 Linus Torvalds 5 */ 6 7 /* 8 * This file handles the generic file mmap semantics used by 9 * most "normal" filesystems (but you don't /have/ to use this: 10 * the NFS filesystem used to do this differently, for example) 11 */ 12 #include <linux/export.h> 13 #include <linux/compiler.h> 14 #include <linux/dax.h> 15 #include <linux/fs.h> 16 #include <linux/uaccess.h> 17 #include <linux/capability.h> 18 #include <linux/kernel_stat.h> 19 #include <linux/gfp.h> 20 #include <linux/mm.h> 21 #include <linux/swap.h> 22 #include <linux/mman.h> 23 #include <linux/pagemap.h> 24 #include <linux/file.h> 25 #include <linux/uio.h> 26 #include <linux/hash.h> 27 #include <linux/writeback.h> 28 #include <linux/backing-dev.h> 29 #include <linux/pagevec.h> 30 #include <linux/blkdev.h> 31 #include <linux/security.h> 32 #include <linux/cpuset.h> 33 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */ 34 #include <linux/hugetlb.h> 35 #include <linux/memcontrol.h> 36 #include <linux/cleancache.h> 37 #include <linux/rmap.h> 38 #include "internal.h" 39 40 #define CREATE_TRACE_POINTS 41 #include <trace/events/filemap.h> 42 43 /* 44 * FIXME: remove all knowledge of the buffer layer from the core VM 45 */ 46 #include <linux/buffer_head.h> /* for try_to_free_buffers */ 47 48 #include <asm/mman.h> 49 50 /* 51 * Shared mappings implemented 30.11.1994. It's not fully working yet, 52 * though. 53 * 54 * Shared mappings now work. 15.8.1995 Bruno. 55 * 56 * finished 'unifying' the page and buffer cache and SMP-threaded the 57 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> 58 * 59 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> 60 */ 61 62 /* 63 * Lock ordering: 64 * 65 * ->i_mmap_rwsem (truncate_pagecache) 66 * ->private_lock (__free_pte->__set_page_dirty_buffers) 67 * ->swap_lock (exclusive_swap_page, others) 68 * ->mapping->tree_lock 69 * 70 * ->i_mutex 71 * ->i_mmap_rwsem (truncate->unmap_mapping_range) 72 * 73 * ->mmap_sem 74 * ->i_mmap_rwsem 75 * ->page_table_lock or pte_lock (various, mainly in memory.c) 76 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) 77 * 78 * ->mmap_sem 79 * ->lock_page (access_process_vm) 80 * 81 * ->i_mutex (generic_perform_write) 82 * ->mmap_sem (fault_in_pages_readable->do_page_fault) 83 * 84 * bdi->wb.list_lock 85 * sb_lock (fs/fs-writeback.c) 86 * ->mapping->tree_lock (__sync_single_inode) 87 * 88 * ->i_mmap_rwsem 89 * ->anon_vma.lock (vma_adjust) 90 * 91 * ->anon_vma.lock 92 * ->page_table_lock or pte_lock (anon_vma_prepare and various) 93 * 94 * ->page_table_lock or pte_lock 95 * ->swap_lock (try_to_unmap_one) 96 * ->private_lock (try_to_unmap_one) 97 * ->tree_lock (try_to_unmap_one) 98 * ->zone_lru_lock(zone) (follow_page->mark_page_accessed) 99 * ->zone_lru_lock(zone) (check_pte_range->isolate_lru_page) 100 * ->private_lock (page_remove_rmap->set_page_dirty) 101 * ->tree_lock (page_remove_rmap->set_page_dirty) 102 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) 103 * ->inode->i_lock (page_remove_rmap->set_page_dirty) 104 * ->memcg->move_lock (page_remove_rmap->lock_page_memcg) 105 * bdi.wb->list_lock (zap_pte_range->set_page_dirty) 106 * ->inode->i_lock (zap_pte_range->set_page_dirty) 107 * ->private_lock (zap_pte_range->__set_page_dirty_buffers) 108 * 109 * ->i_mmap_rwsem 110 * ->tasklist_lock (memory_failure, collect_procs_ao) 111 */ 112 113 static int page_cache_tree_insert(struct address_space *mapping, 114 struct page *page, void **shadowp) 115 { 116 struct radix_tree_node *node; 117 void **slot; 118 int error; 119 120 error = __radix_tree_create(&mapping->page_tree, page->index, 0, 121 &node, &slot); 122 if (error) 123 return error; 124 if (*slot) { 125 void *p; 126 127 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 128 if (!radix_tree_exceptional_entry(p)) 129 return -EEXIST; 130 131 mapping->nrexceptional--; 132 if (!dax_mapping(mapping)) { 133 if (shadowp) 134 *shadowp = p; 135 } else { 136 /* DAX can replace empty locked entry with a hole */ 137 WARN_ON_ONCE(p != 138 (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | 139 RADIX_DAX_ENTRY_LOCK)); 140 /* Wakeup waiters for exceptional entry lock */ 141 dax_wake_mapping_entry_waiter(mapping, page->index, 142 false); 143 } 144 } 145 __radix_tree_replace(&mapping->page_tree, node, slot, page, 146 workingset_update_node, mapping); 147 mapping->nrpages++; 148 return 0; 149 } 150 151 static void page_cache_tree_delete(struct address_space *mapping, 152 struct page *page, void *shadow) 153 { 154 int i, nr; 155 156 /* hugetlb pages are represented by one entry in the radix tree */ 157 nr = PageHuge(page) ? 1 : hpage_nr_pages(page); 158 159 VM_BUG_ON_PAGE(!PageLocked(page), page); 160 VM_BUG_ON_PAGE(PageTail(page), page); 161 VM_BUG_ON_PAGE(nr != 1 && shadow, page); 162 163 for (i = 0; i < nr; i++) { 164 struct radix_tree_node *node; 165 void **slot; 166 167 __radix_tree_lookup(&mapping->page_tree, page->index + i, 168 &node, &slot); 169 170 if (!node) { 171 VM_BUG_ON_PAGE(nr != 1, page); 172 /* 173 * We need a node to properly account shadow 174 * entries. Don't plant any without. XXX 175 */ 176 shadow = NULL; 177 } 178 179 radix_tree_clear_tags(&mapping->page_tree, node, slot); 180 __radix_tree_replace(&mapping->page_tree, node, slot, shadow, 181 workingset_update_node, mapping); 182 } 183 184 if (shadow) { 185 mapping->nrexceptional += nr; 186 /* 187 * Make sure the nrexceptional update is committed before 188 * the nrpages update so that final truncate racing 189 * with reclaim does not see both counters 0 at the 190 * same time and miss a shadow entry. 191 */ 192 smp_wmb(); 193 } 194 mapping->nrpages -= nr; 195 } 196 197 /* 198 * Delete a page from the page cache and free it. Caller has to make 199 * sure the page is locked and that nobody else uses it - or that usage 200 * is safe. The caller must hold the mapping's tree_lock. 201 */ 202 void __delete_from_page_cache(struct page *page, void *shadow) 203 { 204 struct address_space *mapping = page->mapping; 205 int nr = hpage_nr_pages(page); 206 207 trace_mm_filemap_delete_from_page_cache(page); 208 /* 209 * if we're uptodate, flush out into the cleancache, otherwise 210 * invalidate any existing cleancache entries. We can't leave 211 * stale data around in the cleancache once our page is gone 212 */ 213 if (PageUptodate(page) && PageMappedToDisk(page)) 214 cleancache_put_page(page); 215 else 216 cleancache_invalidate_page(mapping, page); 217 218 VM_BUG_ON_PAGE(PageTail(page), page); 219 VM_BUG_ON_PAGE(page_mapped(page), page); 220 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) { 221 int mapcount; 222 223 pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n", 224 current->comm, page_to_pfn(page)); 225 dump_page(page, "still mapped when deleted"); 226 dump_stack(); 227 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 228 229 mapcount = page_mapcount(page); 230 if (mapping_exiting(mapping) && 231 page_count(page) >= mapcount + 2) { 232 /* 233 * All vmas have already been torn down, so it's 234 * a good bet that actually the page is unmapped, 235 * and we'd prefer not to leak it: if we're wrong, 236 * some other bad page check should catch it later. 237 */ 238 page_mapcount_reset(page); 239 page_ref_sub(page, mapcount); 240 } 241 } 242 243 page_cache_tree_delete(mapping, page, shadow); 244 245 page->mapping = NULL; 246 /* Leave page->index set: truncation lookup relies upon it */ 247 248 /* hugetlb pages do not participate in page cache accounting. */ 249 if (!PageHuge(page)) 250 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); 251 if (PageSwapBacked(page)) { 252 __mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr); 253 if (PageTransHuge(page)) 254 __dec_node_page_state(page, NR_SHMEM_THPS); 255 } else { 256 VM_BUG_ON_PAGE(PageTransHuge(page) && !PageHuge(page), page); 257 } 258 259 /* 260 * At this point page must be either written or cleaned by truncate. 261 * Dirty page here signals a bug and loss of unwritten data. 262 * 263 * This fixes dirty accounting after removing the page entirely but 264 * leaves PageDirty set: it has no effect for truncated page and 265 * anyway will be cleared before returning page into buddy allocator. 266 */ 267 if (WARN_ON_ONCE(PageDirty(page))) 268 account_page_cleaned(page, mapping, inode_to_wb(mapping->host)); 269 } 270 271 /** 272 * delete_from_page_cache - delete page from page cache 273 * @page: the page which the kernel is trying to remove from page cache 274 * 275 * This must be called only on pages that have been verified to be in the page 276 * cache and locked. It will never put the page into the free list, the caller 277 * has a reference on the page. 278 */ 279 void delete_from_page_cache(struct page *page) 280 { 281 struct address_space *mapping = page_mapping(page); 282 unsigned long flags; 283 void (*freepage)(struct page *); 284 285 BUG_ON(!PageLocked(page)); 286 287 freepage = mapping->a_ops->freepage; 288 289 spin_lock_irqsave(&mapping->tree_lock, flags); 290 __delete_from_page_cache(page, NULL); 291 spin_unlock_irqrestore(&mapping->tree_lock, flags); 292 293 if (freepage) 294 freepage(page); 295 296 if (PageTransHuge(page) && !PageHuge(page)) { 297 page_ref_sub(page, HPAGE_PMD_NR); 298 VM_BUG_ON_PAGE(page_count(page) <= 0, page); 299 } else { 300 put_page(page); 301 } 302 } 303 EXPORT_SYMBOL(delete_from_page_cache); 304 305 int filemap_check_errors(struct address_space *mapping) 306 { 307 int ret = 0; 308 /* Check for outstanding write errors */ 309 if (test_bit(AS_ENOSPC, &mapping->flags) && 310 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) 311 ret = -ENOSPC; 312 if (test_bit(AS_EIO, &mapping->flags) && 313 test_and_clear_bit(AS_EIO, &mapping->flags)) 314 ret = -EIO; 315 return ret; 316 } 317 EXPORT_SYMBOL(filemap_check_errors); 318 319 /** 320 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range 321 * @mapping: address space structure to write 322 * @start: offset in bytes where the range starts 323 * @end: offset in bytes where the range ends (inclusive) 324 * @sync_mode: enable synchronous operation 325 * 326 * Start writeback against all of a mapping's dirty pages that lie 327 * within the byte offsets <start, end> inclusive. 328 * 329 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as 330 * opposed to a regular memory cleansing writeback. The difference between 331 * these two operations is that if a dirty page/buffer is encountered, it must 332 * be waited upon, and not just skipped over. 333 */ 334 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 335 loff_t end, int sync_mode) 336 { 337 int ret; 338 struct writeback_control wbc = { 339 .sync_mode = sync_mode, 340 .nr_to_write = LONG_MAX, 341 .range_start = start, 342 .range_end = end, 343 }; 344 345 if (!mapping_cap_writeback_dirty(mapping)) 346 return 0; 347 348 wbc_attach_fdatawrite_inode(&wbc, mapping->host); 349 ret = do_writepages(mapping, &wbc); 350 wbc_detach_inode(&wbc); 351 return ret; 352 } 353 354 static inline int __filemap_fdatawrite(struct address_space *mapping, 355 int sync_mode) 356 { 357 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); 358 } 359 360 int filemap_fdatawrite(struct address_space *mapping) 361 { 362 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); 363 } 364 EXPORT_SYMBOL(filemap_fdatawrite); 365 366 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 367 loff_t end) 368 { 369 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); 370 } 371 EXPORT_SYMBOL(filemap_fdatawrite_range); 372 373 /** 374 * filemap_flush - mostly a non-blocking flush 375 * @mapping: target address_space 376 * 377 * This is a mostly non-blocking flush. Not suitable for data-integrity 378 * purposes - I/O may not be started against all dirty pages. 379 */ 380 int filemap_flush(struct address_space *mapping) 381 { 382 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); 383 } 384 EXPORT_SYMBOL(filemap_flush); 385 386 static int __filemap_fdatawait_range(struct address_space *mapping, 387 loff_t start_byte, loff_t end_byte) 388 { 389 pgoff_t index = start_byte >> PAGE_SHIFT; 390 pgoff_t end = end_byte >> PAGE_SHIFT; 391 struct pagevec pvec; 392 int nr_pages; 393 int ret = 0; 394 395 if (end_byte < start_byte) 396 goto out; 397 398 pagevec_init(&pvec, 0); 399 while ((index <= end) && 400 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 401 PAGECACHE_TAG_WRITEBACK, 402 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { 403 unsigned i; 404 405 for (i = 0; i < nr_pages; i++) { 406 struct page *page = pvec.pages[i]; 407 408 /* until radix tree lookup accepts end_index */ 409 if (page->index > end) 410 continue; 411 412 wait_on_page_writeback(page); 413 if (TestClearPageError(page)) 414 ret = -EIO; 415 } 416 pagevec_release(&pvec); 417 cond_resched(); 418 } 419 out: 420 return ret; 421 } 422 423 /** 424 * filemap_fdatawait_range - wait for writeback to complete 425 * @mapping: address space structure to wait for 426 * @start_byte: offset in bytes where the range starts 427 * @end_byte: offset in bytes where the range ends (inclusive) 428 * 429 * Walk the list of under-writeback pages of the given address space 430 * in the given range and wait for all of them. Check error status of 431 * the address space and return it. 432 * 433 * Since the error status of the address space is cleared by this function, 434 * callers are responsible for checking the return value and handling and/or 435 * reporting the error. 436 */ 437 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, 438 loff_t end_byte) 439 { 440 int ret, ret2; 441 442 ret = __filemap_fdatawait_range(mapping, start_byte, end_byte); 443 ret2 = filemap_check_errors(mapping); 444 if (!ret) 445 ret = ret2; 446 447 return ret; 448 } 449 EXPORT_SYMBOL(filemap_fdatawait_range); 450 451 /** 452 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors 453 * @mapping: address space structure to wait for 454 * 455 * Walk the list of under-writeback pages of the given address space 456 * and wait for all of them. Unlike filemap_fdatawait(), this function 457 * does not clear error status of the address space. 458 * 459 * Use this function if callers don't handle errors themselves. Expected 460 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), 461 * fsfreeze(8) 462 */ 463 void filemap_fdatawait_keep_errors(struct address_space *mapping) 464 { 465 loff_t i_size = i_size_read(mapping->host); 466 467 if (i_size == 0) 468 return; 469 470 __filemap_fdatawait_range(mapping, 0, i_size - 1); 471 } 472 473 /** 474 * filemap_fdatawait - wait for all under-writeback pages to complete 475 * @mapping: address space structure to wait for 476 * 477 * Walk the list of under-writeback pages of the given address space 478 * and wait for all of them. Check error status of the address space 479 * and return it. 480 * 481 * Since the error status of the address space is cleared by this function, 482 * callers are responsible for checking the return value and handling and/or 483 * reporting the error. 484 */ 485 int filemap_fdatawait(struct address_space *mapping) 486 { 487 loff_t i_size = i_size_read(mapping->host); 488 489 if (i_size == 0) 490 return 0; 491 492 return filemap_fdatawait_range(mapping, 0, i_size - 1); 493 } 494 EXPORT_SYMBOL(filemap_fdatawait); 495 496 int filemap_write_and_wait(struct address_space *mapping) 497 { 498 int err = 0; 499 500 if ((!dax_mapping(mapping) && mapping->nrpages) || 501 (dax_mapping(mapping) && mapping->nrexceptional)) { 502 err = filemap_fdatawrite(mapping); 503 /* 504 * Even if the above returned error, the pages may be 505 * written partially (e.g. -ENOSPC), so we wait for it. 506 * But the -EIO is special case, it may indicate the worst 507 * thing (e.g. bug) happened, so we avoid waiting for it. 508 */ 509 if (err != -EIO) { 510 int err2 = filemap_fdatawait(mapping); 511 if (!err) 512 err = err2; 513 } 514 } else { 515 err = filemap_check_errors(mapping); 516 } 517 return err; 518 } 519 EXPORT_SYMBOL(filemap_write_and_wait); 520 521 /** 522 * filemap_write_and_wait_range - write out & wait on a file range 523 * @mapping: the address_space for the pages 524 * @lstart: offset in bytes where the range starts 525 * @lend: offset in bytes where the range ends (inclusive) 526 * 527 * Write out and wait upon file offsets lstart->lend, inclusive. 528 * 529 * Note that `lend' is inclusive (describes the last byte to be written) so 530 * that this function can be used to write to the very end-of-file (end = -1). 531 */ 532 int filemap_write_and_wait_range(struct address_space *mapping, 533 loff_t lstart, loff_t lend) 534 { 535 int err = 0; 536 537 if ((!dax_mapping(mapping) && mapping->nrpages) || 538 (dax_mapping(mapping) && mapping->nrexceptional)) { 539 err = __filemap_fdatawrite_range(mapping, lstart, lend, 540 WB_SYNC_ALL); 541 /* See comment of filemap_write_and_wait() */ 542 if (err != -EIO) { 543 int err2 = filemap_fdatawait_range(mapping, 544 lstart, lend); 545 if (!err) 546 err = err2; 547 } 548 } else { 549 err = filemap_check_errors(mapping); 550 } 551 return err; 552 } 553 EXPORT_SYMBOL(filemap_write_and_wait_range); 554 555 /** 556 * replace_page_cache_page - replace a pagecache page with a new one 557 * @old: page to be replaced 558 * @new: page to replace with 559 * @gfp_mask: allocation mode 560 * 561 * This function replaces a page in the pagecache with a new one. On 562 * success it acquires the pagecache reference for the new page and 563 * drops it for the old page. Both the old and new pages must be 564 * locked. This function does not add the new page to the LRU, the 565 * caller must do that. 566 * 567 * The remove + add is atomic. The only way this function can fail is 568 * memory allocation failure. 569 */ 570 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) 571 { 572 int error; 573 574 VM_BUG_ON_PAGE(!PageLocked(old), old); 575 VM_BUG_ON_PAGE(!PageLocked(new), new); 576 VM_BUG_ON_PAGE(new->mapping, new); 577 578 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); 579 if (!error) { 580 struct address_space *mapping = old->mapping; 581 void (*freepage)(struct page *); 582 unsigned long flags; 583 584 pgoff_t offset = old->index; 585 freepage = mapping->a_ops->freepage; 586 587 get_page(new); 588 new->mapping = mapping; 589 new->index = offset; 590 591 spin_lock_irqsave(&mapping->tree_lock, flags); 592 __delete_from_page_cache(old, NULL); 593 error = page_cache_tree_insert(mapping, new, NULL); 594 BUG_ON(error); 595 596 /* 597 * hugetlb pages do not participate in page cache accounting. 598 */ 599 if (!PageHuge(new)) 600 __inc_node_page_state(new, NR_FILE_PAGES); 601 if (PageSwapBacked(new)) 602 __inc_node_page_state(new, NR_SHMEM); 603 spin_unlock_irqrestore(&mapping->tree_lock, flags); 604 mem_cgroup_migrate(old, new); 605 radix_tree_preload_end(); 606 if (freepage) 607 freepage(old); 608 put_page(old); 609 } 610 611 return error; 612 } 613 EXPORT_SYMBOL_GPL(replace_page_cache_page); 614 615 static int __add_to_page_cache_locked(struct page *page, 616 struct address_space *mapping, 617 pgoff_t offset, gfp_t gfp_mask, 618 void **shadowp) 619 { 620 int huge = PageHuge(page); 621 struct mem_cgroup *memcg; 622 int error; 623 624 VM_BUG_ON_PAGE(!PageLocked(page), page); 625 VM_BUG_ON_PAGE(PageSwapBacked(page), page); 626 627 if (!huge) { 628 error = mem_cgroup_try_charge(page, current->mm, 629 gfp_mask, &memcg, false); 630 if (error) 631 return error; 632 } 633 634 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); 635 if (error) { 636 if (!huge) 637 mem_cgroup_cancel_charge(page, memcg, false); 638 return error; 639 } 640 641 get_page(page); 642 page->mapping = mapping; 643 page->index = offset; 644 645 spin_lock_irq(&mapping->tree_lock); 646 error = page_cache_tree_insert(mapping, page, shadowp); 647 radix_tree_preload_end(); 648 if (unlikely(error)) 649 goto err_insert; 650 651 /* hugetlb pages do not participate in page cache accounting. */ 652 if (!huge) 653 __inc_node_page_state(page, NR_FILE_PAGES); 654 spin_unlock_irq(&mapping->tree_lock); 655 if (!huge) 656 mem_cgroup_commit_charge(page, memcg, false, false); 657 trace_mm_filemap_add_to_page_cache(page); 658 return 0; 659 err_insert: 660 page->mapping = NULL; 661 /* Leave page->index set: truncation relies upon it */ 662 spin_unlock_irq(&mapping->tree_lock); 663 if (!huge) 664 mem_cgroup_cancel_charge(page, memcg, false); 665 put_page(page); 666 return error; 667 } 668 669 /** 670 * add_to_page_cache_locked - add a locked page to the pagecache 671 * @page: page to add 672 * @mapping: the page's address_space 673 * @offset: page index 674 * @gfp_mask: page allocation mode 675 * 676 * This function is used to add a page to the pagecache. It must be locked. 677 * This function does not add the page to the LRU. The caller must do that. 678 */ 679 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, 680 pgoff_t offset, gfp_t gfp_mask) 681 { 682 return __add_to_page_cache_locked(page, mapping, offset, 683 gfp_mask, NULL); 684 } 685 EXPORT_SYMBOL(add_to_page_cache_locked); 686 687 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 688 pgoff_t offset, gfp_t gfp_mask) 689 { 690 void *shadow = NULL; 691 int ret; 692 693 __SetPageLocked(page); 694 ret = __add_to_page_cache_locked(page, mapping, offset, 695 gfp_mask, &shadow); 696 if (unlikely(ret)) 697 __ClearPageLocked(page); 698 else { 699 /* 700 * The page might have been evicted from cache only 701 * recently, in which case it should be activated like 702 * any other repeatedly accessed page. 703 * The exception is pages getting rewritten; evicting other 704 * data from the working set, only to cache data that will 705 * get overwritten with something else, is a waste of memory. 706 */ 707 if (!(gfp_mask & __GFP_WRITE) && 708 shadow && workingset_refault(shadow)) { 709 SetPageActive(page); 710 workingset_activation(page); 711 } else 712 ClearPageActive(page); 713 lru_cache_add(page); 714 } 715 return ret; 716 } 717 EXPORT_SYMBOL_GPL(add_to_page_cache_lru); 718 719 #ifdef CONFIG_NUMA 720 struct page *__page_cache_alloc(gfp_t gfp) 721 { 722 int n; 723 struct page *page; 724 725 if (cpuset_do_page_mem_spread()) { 726 unsigned int cpuset_mems_cookie; 727 do { 728 cpuset_mems_cookie = read_mems_allowed_begin(); 729 n = cpuset_mem_spread_node(); 730 page = __alloc_pages_node(n, gfp, 0); 731 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); 732 733 return page; 734 } 735 return alloc_pages(gfp, 0); 736 } 737 EXPORT_SYMBOL(__page_cache_alloc); 738 #endif 739 740 /* 741 * In order to wait for pages to become available there must be 742 * waitqueues associated with pages. By using a hash table of 743 * waitqueues where the bucket discipline is to maintain all 744 * waiters on the same queue and wake all when any of the pages 745 * become available, and for the woken contexts to check to be 746 * sure the appropriate page became available, this saves space 747 * at a cost of "thundering herd" phenomena during rare hash 748 * collisions. 749 */ 750 wait_queue_head_t *page_waitqueue(struct page *page) 751 { 752 return bit_waitqueue(page, 0); 753 } 754 EXPORT_SYMBOL(page_waitqueue); 755 756 void wait_on_page_bit(struct page *page, int bit_nr) 757 { 758 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 759 760 if (test_bit(bit_nr, &page->flags)) 761 __wait_on_bit(page_waitqueue(page), &wait, bit_wait_io, 762 TASK_UNINTERRUPTIBLE); 763 } 764 EXPORT_SYMBOL(wait_on_page_bit); 765 766 int wait_on_page_bit_killable(struct page *page, int bit_nr) 767 { 768 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 769 770 if (!test_bit(bit_nr, &page->flags)) 771 return 0; 772 773 return __wait_on_bit(page_waitqueue(page), &wait, 774 bit_wait_io, TASK_KILLABLE); 775 } 776 777 int wait_on_page_bit_killable_timeout(struct page *page, 778 int bit_nr, unsigned long timeout) 779 { 780 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 781 782 wait.key.timeout = jiffies + timeout; 783 if (!test_bit(bit_nr, &page->flags)) 784 return 0; 785 return __wait_on_bit(page_waitqueue(page), &wait, 786 bit_wait_io_timeout, TASK_KILLABLE); 787 } 788 EXPORT_SYMBOL_GPL(wait_on_page_bit_killable_timeout); 789 790 /** 791 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue 792 * @page: Page defining the wait queue of interest 793 * @waiter: Waiter to add to the queue 794 * 795 * Add an arbitrary @waiter to the wait queue for the nominated @page. 796 */ 797 void add_page_wait_queue(struct page *page, wait_queue_t *waiter) 798 { 799 wait_queue_head_t *q = page_waitqueue(page); 800 unsigned long flags; 801 802 spin_lock_irqsave(&q->lock, flags); 803 __add_wait_queue(q, waiter); 804 spin_unlock_irqrestore(&q->lock, flags); 805 } 806 EXPORT_SYMBOL_GPL(add_page_wait_queue); 807 808 /** 809 * unlock_page - unlock a locked page 810 * @page: the page 811 * 812 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). 813 * Also wakes sleepers in wait_on_page_writeback() because the wakeup 814 * mechanism between PageLocked pages and PageWriteback pages is shared. 815 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. 816 * 817 * The mb is necessary to enforce ordering between the clear_bit and the read 818 * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()). 819 */ 820 void unlock_page(struct page *page) 821 { 822 page = compound_head(page); 823 VM_BUG_ON_PAGE(!PageLocked(page), page); 824 clear_bit_unlock(PG_locked, &page->flags); 825 smp_mb__after_atomic(); 826 wake_up_page(page, PG_locked); 827 } 828 EXPORT_SYMBOL(unlock_page); 829 830 /** 831 * end_page_writeback - end writeback against a page 832 * @page: the page 833 */ 834 void end_page_writeback(struct page *page) 835 { 836 /* 837 * TestClearPageReclaim could be used here but it is an atomic 838 * operation and overkill in this particular case. Failing to 839 * shuffle a page marked for immediate reclaim is too mild to 840 * justify taking an atomic operation penalty at the end of 841 * ever page writeback. 842 */ 843 if (PageReclaim(page)) { 844 ClearPageReclaim(page); 845 rotate_reclaimable_page(page); 846 } 847 848 if (!test_clear_page_writeback(page)) 849 BUG(); 850 851 smp_mb__after_atomic(); 852 wake_up_page(page, PG_writeback); 853 } 854 EXPORT_SYMBOL(end_page_writeback); 855 856 /* 857 * After completing I/O on a page, call this routine to update the page 858 * flags appropriately 859 */ 860 void page_endio(struct page *page, bool is_write, int err) 861 { 862 if (!is_write) { 863 if (!err) { 864 SetPageUptodate(page); 865 } else { 866 ClearPageUptodate(page); 867 SetPageError(page); 868 } 869 unlock_page(page); 870 } else { 871 if (err) { 872 SetPageError(page); 873 if (page->mapping) 874 mapping_set_error(page->mapping, err); 875 } 876 end_page_writeback(page); 877 } 878 } 879 EXPORT_SYMBOL_GPL(page_endio); 880 881 /** 882 * __lock_page - get a lock on the page, assuming we need to sleep to get it 883 * @page: the page to lock 884 */ 885 void __lock_page(struct page *page) 886 { 887 struct page *page_head = compound_head(page); 888 DEFINE_WAIT_BIT(wait, &page_head->flags, PG_locked); 889 890 __wait_on_bit_lock(page_waitqueue(page_head), &wait, bit_wait_io, 891 TASK_UNINTERRUPTIBLE); 892 } 893 EXPORT_SYMBOL(__lock_page); 894 895 int __lock_page_killable(struct page *page) 896 { 897 struct page *page_head = compound_head(page); 898 DEFINE_WAIT_BIT(wait, &page_head->flags, PG_locked); 899 900 return __wait_on_bit_lock(page_waitqueue(page_head), &wait, 901 bit_wait_io, TASK_KILLABLE); 902 } 903 EXPORT_SYMBOL_GPL(__lock_page_killable); 904 905 /* 906 * Return values: 907 * 1 - page is locked; mmap_sem is still held. 908 * 0 - page is not locked. 909 * mmap_sem has been released (up_read()), unless flags had both 910 * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in 911 * which case mmap_sem is still held. 912 * 913 * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1 914 * with the page locked and the mmap_sem unperturbed. 915 */ 916 int __lock_page_or_retry(struct page *page, struct mm_struct *mm, 917 unsigned int flags) 918 { 919 if (flags & FAULT_FLAG_ALLOW_RETRY) { 920 /* 921 * CAUTION! In this case, mmap_sem is not released 922 * even though return 0. 923 */ 924 if (flags & FAULT_FLAG_RETRY_NOWAIT) 925 return 0; 926 927 up_read(&mm->mmap_sem); 928 if (flags & FAULT_FLAG_KILLABLE) 929 wait_on_page_locked_killable(page); 930 else 931 wait_on_page_locked(page); 932 return 0; 933 } else { 934 if (flags & FAULT_FLAG_KILLABLE) { 935 int ret; 936 937 ret = __lock_page_killable(page); 938 if (ret) { 939 up_read(&mm->mmap_sem); 940 return 0; 941 } 942 } else 943 __lock_page(page); 944 return 1; 945 } 946 } 947 948 /** 949 * page_cache_next_hole - find the next hole (not-present entry) 950 * @mapping: mapping 951 * @index: index 952 * @max_scan: maximum range to search 953 * 954 * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the 955 * lowest indexed hole. 956 * 957 * Returns: the index of the hole if found, otherwise returns an index 958 * outside of the set specified (in which case 'return - index >= 959 * max_scan' will be true). In rare cases of index wrap-around, 0 will 960 * be returned. 961 * 962 * page_cache_next_hole may be called under rcu_read_lock. However, 963 * like radix_tree_gang_lookup, this will not atomically search a 964 * snapshot of the tree at a single point in time. For example, if a 965 * hole is created at index 5, then subsequently a hole is created at 966 * index 10, page_cache_next_hole covering both indexes may return 10 967 * if called under rcu_read_lock. 968 */ 969 pgoff_t page_cache_next_hole(struct address_space *mapping, 970 pgoff_t index, unsigned long max_scan) 971 { 972 unsigned long i; 973 974 for (i = 0; i < max_scan; i++) { 975 struct page *page; 976 977 page = radix_tree_lookup(&mapping->page_tree, index); 978 if (!page || radix_tree_exceptional_entry(page)) 979 break; 980 index++; 981 if (index == 0) 982 break; 983 } 984 985 return index; 986 } 987 EXPORT_SYMBOL(page_cache_next_hole); 988 989 /** 990 * page_cache_prev_hole - find the prev hole (not-present entry) 991 * @mapping: mapping 992 * @index: index 993 * @max_scan: maximum range to search 994 * 995 * Search backwards in the range [max(index-max_scan+1, 0), index] for 996 * the first hole. 997 * 998 * Returns: the index of the hole if found, otherwise returns an index 999 * outside of the set specified (in which case 'index - return >= 1000 * max_scan' will be true). In rare cases of wrap-around, ULONG_MAX 1001 * will be returned. 1002 * 1003 * page_cache_prev_hole may be called under rcu_read_lock. However, 1004 * like radix_tree_gang_lookup, this will not atomically search a 1005 * snapshot of the tree at a single point in time. For example, if a 1006 * hole is created at index 10, then subsequently a hole is created at 1007 * index 5, page_cache_prev_hole covering both indexes may return 5 if 1008 * called under rcu_read_lock. 1009 */ 1010 pgoff_t page_cache_prev_hole(struct address_space *mapping, 1011 pgoff_t index, unsigned long max_scan) 1012 { 1013 unsigned long i; 1014 1015 for (i = 0; i < max_scan; i++) { 1016 struct page *page; 1017 1018 page = radix_tree_lookup(&mapping->page_tree, index); 1019 if (!page || radix_tree_exceptional_entry(page)) 1020 break; 1021 index--; 1022 if (index == ULONG_MAX) 1023 break; 1024 } 1025 1026 return index; 1027 } 1028 EXPORT_SYMBOL(page_cache_prev_hole); 1029 1030 /** 1031 * find_get_entry - find and get a page cache entry 1032 * @mapping: the address_space to search 1033 * @offset: the page cache index 1034 * 1035 * Looks up the page cache slot at @mapping & @offset. If there is a 1036 * page cache page, it is returned with an increased refcount. 1037 * 1038 * If the slot holds a shadow entry of a previously evicted page, or a 1039 * swap entry from shmem/tmpfs, it is returned. 1040 * 1041 * Otherwise, %NULL is returned. 1042 */ 1043 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) 1044 { 1045 void **pagep; 1046 struct page *head, *page; 1047 1048 rcu_read_lock(); 1049 repeat: 1050 page = NULL; 1051 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); 1052 if (pagep) { 1053 page = radix_tree_deref_slot(pagep); 1054 if (unlikely(!page)) 1055 goto out; 1056 if (radix_tree_exception(page)) { 1057 if (radix_tree_deref_retry(page)) 1058 goto repeat; 1059 /* 1060 * A shadow entry of a recently evicted page, 1061 * or a swap entry from shmem/tmpfs. Return 1062 * it without attempting to raise page count. 1063 */ 1064 goto out; 1065 } 1066 1067 head = compound_head(page); 1068 if (!page_cache_get_speculative(head)) 1069 goto repeat; 1070 1071 /* The page was split under us? */ 1072 if (compound_head(page) != head) { 1073 put_page(head); 1074 goto repeat; 1075 } 1076 1077 /* 1078 * Has the page moved? 1079 * This is part of the lockless pagecache protocol. See 1080 * include/linux/pagemap.h for details. 1081 */ 1082 if (unlikely(page != *pagep)) { 1083 put_page(head); 1084 goto repeat; 1085 } 1086 } 1087 out: 1088 rcu_read_unlock(); 1089 1090 return page; 1091 } 1092 EXPORT_SYMBOL(find_get_entry); 1093 1094 /** 1095 * find_lock_entry - locate, pin and lock a page cache entry 1096 * @mapping: the address_space to search 1097 * @offset: the page cache index 1098 * 1099 * Looks up the page cache slot at @mapping & @offset. If there is a 1100 * page cache page, it is returned locked and with an increased 1101 * refcount. 1102 * 1103 * If the slot holds a shadow entry of a previously evicted page, or a 1104 * swap entry from shmem/tmpfs, it is returned. 1105 * 1106 * Otherwise, %NULL is returned. 1107 * 1108 * find_lock_entry() may sleep. 1109 */ 1110 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset) 1111 { 1112 struct page *page; 1113 1114 repeat: 1115 page = find_get_entry(mapping, offset); 1116 if (page && !radix_tree_exception(page)) { 1117 lock_page(page); 1118 /* Has the page been truncated? */ 1119 if (unlikely(page_mapping(page) != mapping)) { 1120 unlock_page(page); 1121 put_page(page); 1122 goto repeat; 1123 } 1124 VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page); 1125 } 1126 return page; 1127 } 1128 EXPORT_SYMBOL(find_lock_entry); 1129 1130 /** 1131 * pagecache_get_page - find and get a page reference 1132 * @mapping: the address_space to search 1133 * @offset: the page index 1134 * @fgp_flags: PCG flags 1135 * @gfp_mask: gfp mask to use for the page cache data page allocation 1136 * 1137 * Looks up the page cache slot at @mapping & @offset. 1138 * 1139 * PCG flags modify how the page is returned. 1140 * 1141 * FGP_ACCESSED: the page will be marked accessed 1142 * FGP_LOCK: Page is return locked 1143 * FGP_CREAT: If page is not present then a new page is allocated using 1144 * @gfp_mask and added to the page cache and the VM's LRU 1145 * list. The page is returned locked and with an increased 1146 * refcount. Otherwise, %NULL is returned. 1147 * 1148 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even 1149 * if the GFP flags specified for FGP_CREAT are atomic. 1150 * 1151 * If there is a page cache page, it is returned with an increased refcount. 1152 */ 1153 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, 1154 int fgp_flags, gfp_t gfp_mask) 1155 { 1156 struct page *page; 1157 1158 repeat: 1159 page = find_get_entry(mapping, offset); 1160 if (radix_tree_exceptional_entry(page)) 1161 page = NULL; 1162 if (!page) 1163 goto no_page; 1164 1165 if (fgp_flags & FGP_LOCK) { 1166 if (fgp_flags & FGP_NOWAIT) { 1167 if (!trylock_page(page)) { 1168 put_page(page); 1169 return NULL; 1170 } 1171 } else { 1172 lock_page(page); 1173 } 1174 1175 /* Has the page been truncated? */ 1176 if (unlikely(page->mapping != mapping)) { 1177 unlock_page(page); 1178 put_page(page); 1179 goto repeat; 1180 } 1181 VM_BUG_ON_PAGE(page->index != offset, page); 1182 } 1183 1184 if (page && (fgp_flags & FGP_ACCESSED)) 1185 mark_page_accessed(page); 1186 1187 no_page: 1188 if (!page && (fgp_flags & FGP_CREAT)) { 1189 int err; 1190 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping)) 1191 gfp_mask |= __GFP_WRITE; 1192 if (fgp_flags & FGP_NOFS) 1193 gfp_mask &= ~__GFP_FS; 1194 1195 page = __page_cache_alloc(gfp_mask); 1196 if (!page) 1197 return NULL; 1198 1199 if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK))) 1200 fgp_flags |= FGP_LOCK; 1201 1202 /* Init accessed so avoid atomic mark_page_accessed later */ 1203 if (fgp_flags & FGP_ACCESSED) 1204 __SetPageReferenced(page); 1205 1206 err = add_to_page_cache_lru(page, mapping, offset, 1207 gfp_mask & GFP_RECLAIM_MASK); 1208 if (unlikely(err)) { 1209 put_page(page); 1210 page = NULL; 1211 if (err == -EEXIST) 1212 goto repeat; 1213 } 1214 } 1215 1216 return page; 1217 } 1218 EXPORT_SYMBOL(pagecache_get_page); 1219 1220 /** 1221 * find_get_entries - gang pagecache lookup 1222 * @mapping: The address_space to search 1223 * @start: The starting page cache index 1224 * @nr_entries: The maximum number of entries 1225 * @entries: Where the resulting entries are placed 1226 * @indices: The cache indices corresponding to the entries in @entries 1227 * 1228 * find_get_entries() will search for and return a group of up to 1229 * @nr_entries entries in the mapping. The entries are placed at 1230 * @entries. find_get_entries() takes a reference against any actual 1231 * pages it returns. 1232 * 1233 * The search returns a group of mapping-contiguous page cache entries 1234 * with ascending indexes. There may be holes in the indices due to 1235 * not-present pages. 1236 * 1237 * Any shadow entries of evicted pages, or swap entries from 1238 * shmem/tmpfs, are included in the returned array. 1239 * 1240 * find_get_entries() returns the number of pages and shadow entries 1241 * which were found. 1242 */ 1243 unsigned find_get_entries(struct address_space *mapping, 1244 pgoff_t start, unsigned int nr_entries, 1245 struct page **entries, pgoff_t *indices) 1246 { 1247 void **slot; 1248 unsigned int ret = 0; 1249 struct radix_tree_iter iter; 1250 1251 if (!nr_entries) 1252 return 0; 1253 1254 rcu_read_lock(); 1255 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 1256 struct page *head, *page; 1257 repeat: 1258 page = radix_tree_deref_slot(slot); 1259 if (unlikely(!page)) 1260 continue; 1261 if (radix_tree_exception(page)) { 1262 if (radix_tree_deref_retry(page)) { 1263 slot = radix_tree_iter_retry(&iter); 1264 continue; 1265 } 1266 /* 1267 * A shadow entry of a recently evicted page, a swap 1268 * entry from shmem/tmpfs or a DAX entry. Return it 1269 * without attempting to raise page count. 1270 */ 1271 goto export; 1272 } 1273 1274 head = compound_head(page); 1275 if (!page_cache_get_speculative(head)) 1276 goto repeat; 1277 1278 /* The page was split under us? */ 1279 if (compound_head(page) != head) { 1280 put_page(head); 1281 goto repeat; 1282 } 1283 1284 /* Has the page moved? */ 1285 if (unlikely(page != *slot)) { 1286 put_page(head); 1287 goto repeat; 1288 } 1289 export: 1290 indices[ret] = iter.index; 1291 entries[ret] = page; 1292 if (++ret == nr_entries) 1293 break; 1294 } 1295 rcu_read_unlock(); 1296 return ret; 1297 } 1298 1299 /** 1300 * find_get_pages - gang pagecache lookup 1301 * @mapping: The address_space to search 1302 * @start: The starting page index 1303 * @nr_pages: The maximum number of pages 1304 * @pages: Where the resulting pages are placed 1305 * 1306 * find_get_pages() will search for and return a group of up to 1307 * @nr_pages pages in the mapping. The pages are placed at @pages. 1308 * find_get_pages() takes a reference against the returned pages. 1309 * 1310 * The search returns a group of mapping-contiguous pages with ascending 1311 * indexes. There may be holes in the indices due to not-present pages. 1312 * 1313 * find_get_pages() returns the number of pages which were found. 1314 */ 1315 unsigned find_get_pages(struct address_space *mapping, pgoff_t start, 1316 unsigned int nr_pages, struct page **pages) 1317 { 1318 struct radix_tree_iter iter; 1319 void **slot; 1320 unsigned ret = 0; 1321 1322 if (unlikely(!nr_pages)) 1323 return 0; 1324 1325 rcu_read_lock(); 1326 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 1327 struct page *head, *page; 1328 repeat: 1329 page = radix_tree_deref_slot(slot); 1330 if (unlikely(!page)) 1331 continue; 1332 1333 if (radix_tree_exception(page)) { 1334 if (radix_tree_deref_retry(page)) { 1335 slot = radix_tree_iter_retry(&iter); 1336 continue; 1337 } 1338 /* 1339 * A shadow entry of a recently evicted page, 1340 * or a swap entry from shmem/tmpfs. Skip 1341 * over it. 1342 */ 1343 continue; 1344 } 1345 1346 head = compound_head(page); 1347 if (!page_cache_get_speculative(head)) 1348 goto repeat; 1349 1350 /* The page was split under us? */ 1351 if (compound_head(page) != head) { 1352 put_page(head); 1353 goto repeat; 1354 } 1355 1356 /* Has the page moved? */ 1357 if (unlikely(page != *slot)) { 1358 put_page(head); 1359 goto repeat; 1360 } 1361 1362 pages[ret] = page; 1363 if (++ret == nr_pages) 1364 break; 1365 } 1366 1367 rcu_read_unlock(); 1368 return ret; 1369 } 1370 1371 /** 1372 * find_get_pages_contig - gang contiguous pagecache lookup 1373 * @mapping: The address_space to search 1374 * @index: The starting page index 1375 * @nr_pages: The maximum number of pages 1376 * @pages: Where the resulting pages are placed 1377 * 1378 * find_get_pages_contig() works exactly like find_get_pages(), except 1379 * that the returned number of pages are guaranteed to be contiguous. 1380 * 1381 * find_get_pages_contig() returns the number of pages which were found. 1382 */ 1383 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, 1384 unsigned int nr_pages, struct page **pages) 1385 { 1386 struct radix_tree_iter iter; 1387 void **slot; 1388 unsigned int ret = 0; 1389 1390 if (unlikely(!nr_pages)) 1391 return 0; 1392 1393 rcu_read_lock(); 1394 radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) { 1395 struct page *head, *page; 1396 repeat: 1397 page = radix_tree_deref_slot(slot); 1398 /* The hole, there no reason to continue */ 1399 if (unlikely(!page)) 1400 break; 1401 1402 if (radix_tree_exception(page)) { 1403 if (radix_tree_deref_retry(page)) { 1404 slot = radix_tree_iter_retry(&iter); 1405 continue; 1406 } 1407 /* 1408 * A shadow entry of a recently evicted page, 1409 * or a swap entry from shmem/tmpfs. Stop 1410 * looking for contiguous pages. 1411 */ 1412 break; 1413 } 1414 1415 head = compound_head(page); 1416 if (!page_cache_get_speculative(head)) 1417 goto repeat; 1418 1419 /* The page was split under us? */ 1420 if (compound_head(page) != head) { 1421 put_page(head); 1422 goto repeat; 1423 } 1424 1425 /* Has the page moved? */ 1426 if (unlikely(page != *slot)) { 1427 put_page(head); 1428 goto repeat; 1429 } 1430 1431 /* 1432 * must check mapping and index after taking the ref. 1433 * otherwise we can get both false positives and false 1434 * negatives, which is just confusing to the caller. 1435 */ 1436 if (page->mapping == NULL || page_to_pgoff(page) != iter.index) { 1437 put_page(page); 1438 break; 1439 } 1440 1441 pages[ret] = page; 1442 if (++ret == nr_pages) 1443 break; 1444 } 1445 rcu_read_unlock(); 1446 return ret; 1447 } 1448 EXPORT_SYMBOL(find_get_pages_contig); 1449 1450 /** 1451 * find_get_pages_tag - find and return pages that match @tag 1452 * @mapping: the address_space to search 1453 * @index: the starting page index 1454 * @tag: the tag index 1455 * @nr_pages: the maximum number of pages 1456 * @pages: where the resulting pages are placed 1457 * 1458 * Like find_get_pages, except we only return pages which are tagged with 1459 * @tag. We update @index to index the next page for the traversal. 1460 */ 1461 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, 1462 int tag, unsigned int nr_pages, struct page **pages) 1463 { 1464 struct radix_tree_iter iter; 1465 void **slot; 1466 unsigned ret = 0; 1467 1468 if (unlikely(!nr_pages)) 1469 return 0; 1470 1471 rcu_read_lock(); 1472 radix_tree_for_each_tagged(slot, &mapping->page_tree, 1473 &iter, *index, tag) { 1474 struct page *head, *page; 1475 repeat: 1476 page = radix_tree_deref_slot(slot); 1477 if (unlikely(!page)) 1478 continue; 1479 1480 if (radix_tree_exception(page)) { 1481 if (radix_tree_deref_retry(page)) { 1482 slot = radix_tree_iter_retry(&iter); 1483 continue; 1484 } 1485 /* 1486 * A shadow entry of a recently evicted page. 1487 * 1488 * Those entries should never be tagged, but 1489 * this tree walk is lockless and the tags are 1490 * looked up in bulk, one radix tree node at a 1491 * time, so there is a sizable window for page 1492 * reclaim to evict a page we saw tagged. 1493 * 1494 * Skip over it. 1495 */ 1496 continue; 1497 } 1498 1499 head = compound_head(page); 1500 if (!page_cache_get_speculative(head)) 1501 goto repeat; 1502 1503 /* The page was split under us? */ 1504 if (compound_head(page) != head) { 1505 put_page(head); 1506 goto repeat; 1507 } 1508 1509 /* Has the page moved? */ 1510 if (unlikely(page != *slot)) { 1511 put_page(head); 1512 goto repeat; 1513 } 1514 1515 pages[ret] = page; 1516 if (++ret == nr_pages) 1517 break; 1518 } 1519 1520 rcu_read_unlock(); 1521 1522 if (ret) 1523 *index = pages[ret - 1]->index + 1; 1524 1525 return ret; 1526 } 1527 EXPORT_SYMBOL(find_get_pages_tag); 1528 1529 /** 1530 * find_get_entries_tag - find and return entries that match @tag 1531 * @mapping: the address_space to search 1532 * @start: the starting page cache index 1533 * @tag: the tag index 1534 * @nr_entries: the maximum number of entries 1535 * @entries: where the resulting entries are placed 1536 * @indices: the cache indices corresponding to the entries in @entries 1537 * 1538 * Like find_get_entries, except we only return entries which are tagged with 1539 * @tag. 1540 */ 1541 unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, 1542 int tag, unsigned int nr_entries, 1543 struct page **entries, pgoff_t *indices) 1544 { 1545 void **slot; 1546 unsigned int ret = 0; 1547 struct radix_tree_iter iter; 1548 1549 if (!nr_entries) 1550 return 0; 1551 1552 rcu_read_lock(); 1553 radix_tree_for_each_tagged(slot, &mapping->page_tree, 1554 &iter, start, tag) { 1555 struct page *head, *page; 1556 repeat: 1557 page = radix_tree_deref_slot(slot); 1558 if (unlikely(!page)) 1559 continue; 1560 if (radix_tree_exception(page)) { 1561 if (radix_tree_deref_retry(page)) { 1562 slot = radix_tree_iter_retry(&iter); 1563 continue; 1564 } 1565 1566 /* 1567 * A shadow entry of a recently evicted page, a swap 1568 * entry from shmem/tmpfs or a DAX entry. Return it 1569 * without attempting to raise page count. 1570 */ 1571 goto export; 1572 } 1573 1574 head = compound_head(page); 1575 if (!page_cache_get_speculative(head)) 1576 goto repeat; 1577 1578 /* The page was split under us? */ 1579 if (compound_head(page) != head) { 1580 put_page(head); 1581 goto repeat; 1582 } 1583 1584 /* Has the page moved? */ 1585 if (unlikely(page != *slot)) { 1586 put_page(head); 1587 goto repeat; 1588 } 1589 export: 1590 indices[ret] = iter.index; 1591 entries[ret] = page; 1592 if (++ret == nr_entries) 1593 break; 1594 } 1595 rcu_read_unlock(); 1596 return ret; 1597 } 1598 EXPORT_SYMBOL(find_get_entries_tag); 1599 1600 /* 1601 * CD/DVDs are error prone. When a medium error occurs, the driver may fail 1602 * a _large_ part of the i/o request. Imagine the worst scenario: 1603 * 1604 * ---R__________________________________________B__________ 1605 * ^ reading here ^ bad block(assume 4k) 1606 * 1607 * read(R) => miss => readahead(R...B) => media error => frustrating retries 1608 * => failing the whole request => read(R) => read(R+1) => 1609 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => 1610 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => 1611 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... 1612 * 1613 * It is going insane. Fix it by quickly scaling down the readahead size. 1614 */ 1615 static void shrink_readahead_size_eio(struct file *filp, 1616 struct file_ra_state *ra) 1617 { 1618 ra->ra_pages /= 4; 1619 } 1620 1621 /** 1622 * do_generic_file_read - generic file read routine 1623 * @filp: the file to read 1624 * @ppos: current file position 1625 * @iter: data destination 1626 * @written: already copied 1627 * 1628 * This is a generic file read routine, and uses the 1629 * mapping->a_ops->readpage() function for the actual low-level stuff. 1630 * 1631 * This is really ugly. But the goto's actually try to clarify some 1632 * of the logic when it comes to error handling etc. 1633 */ 1634 static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos, 1635 struct iov_iter *iter, ssize_t written) 1636 { 1637 struct address_space *mapping = filp->f_mapping; 1638 struct inode *inode = mapping->host; 1639 struct file_ra_state *ra = &filp->f_ra; 1640 pgoff_t index; 1641 pgoff_t last_index; 1642 pgoff_t prev_index; 1643 unsigned long offset; /* offset into pagecache page */ 1644 unsigned int prev_offset; 1645 int error = 0; 1646 1647 if (unlikely(*ppos >= inode->i_sb->s_maxbytes)) 1648 return -EINVAL; 1649 iov_iter_truncate(iter, inode->i_sb->s_maxbytes); 1650 1651 index = *ppos >> PAGE_SHIFT; 1652 prev_index = ra->prev_pos >> PAGE_SHIFT; 1653 prev_offset = ra->prev_pos & (PAGE_SIZE-1); 1654 last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT; 1655 offset = *ppos & ~PAGE_MASK; 1656 1657 for (;;) { 1658 struct page *page; 1659 pgoff_t end_index; 1660 loff_t isize; 1661 unsigned long nr, ret; 1662 1663 cond_resched(); 1664 find_page: 1665 page = find_get_page(mapping, index); 1666 if (!page) { 1667 page_cache_sync_readahead(mapping, 1668 ra, filp, 1669 index, last_index - index); 1670 page = find_get_page(mapping, index); 1671 if (unlikely(page == NULL)) 1672 goto no_cached_page; 1673 } 1674 if (PageReadahead(page)) { 1675 page_cache_async_readahead(mapping, 1676 ra, filp, page, 1677 index, last_index - index); 1678 } 1679 if (!PageUptodate(page)) { 1680 /* 1681 * See comment in do_read_cache_page on why 1682 * wait_on_page_locked is used to avoid unnecessarily 1683 * serialisations and why it's safe. 1684 */ 1685 error = wait_on_page_locked_killable(page); 1686 if (unlikely(error)) 1687 goto readpage_error; 1688 if (PageUptodate(page)) 1689 goto page_ok; 1690 1691 if (inode->i_blkbits == PAGE_SHIFT || 1692 !mapping->a_ops->is_partially_uptodate) 1693 goto page_not_up_to_date; 1694 /* pipes can't handle partially uptodate pages */ 1695 if (unlikely(iter->type & ITER_PIPE)) 1696 goto page_not_up_to_date; 1697 if (!trylock_page(page)) 1698 goto page_not_up_to_date; 1699 /* Did it get truncated before we got the lock? */ 1700 if (!page->mapping) 1701 goto page_not_up_to_date_locked; 1702 if (!mapping->a_ops->is_partially_uptodate(page, 1703 offset, iter->count)) 1704 goto page_not_up_to_date_locked; 1705 unlock_page(page); 1706 } 1707 page_ok: 1708 /* 1709 * i_size must be checked after we know the page is Uptodate. 1710 * 1711 * Checking i_size after the check allows us to calculate 1712 * the correct value for "nr", which means the zero-filled 1713 * part of the page is not copied back to userspace (unless 1714 * another truncate extends the file - this is desired though). 1715 */ 1716 1717 isize = i_size_read(inode); 1718 end_index = (isize - 1) >> PAGE_SHIFT; 1719 if (unlikely(!isize || index > end_index)) { 1720 put_page(page); 1721 goto out; 1722 } 1723 1724 /* nr is the maximum number of bytes to copy from this page */ 1725 nr = PAGE_SIZE; 1726 if (index == end_index) { 1727 nr = ((isize - 1) & ~PAGE_MASK) + 1; 1728 if (nr <= offset) { 1729 put_page(page); 1730 goto out; 1731 } 1732 } 1733 nr = nr - offset; 1734 1735 /* If users can be writing to this page using arbitrary 1736 * virtual addresses, take care about potential aliasing 1737 * before reading the page on the kernel side. 1738 */ 1739 if (mapping_writably_mapped(mapping)) 1740 flush_dcache_page(page); 1741 1742 /* 1743 * When a sequential read accesses a page several times, 1744 * only mark it as accessed the first time. 1745 */ 1746 if (prev_index != index || offset != prev_offset) 1747 mark_page_accessed(page); 1748 prev_index = index; 1749 1750 /* 1751 * Ok, we have the page, and it's up-to-date, so 1752 * now we can copy it to user space... 1753 */ 1754 1755 ret = copy_page_to_iter(page, offset, nr, iter); 1756 offset += ret; 1757 index += offset >> PAGE_SHIFT; 1758 offset &= ~PAGE_MASK; 1759 prev_offset = offset; 1760 1761 put_page(page); 1762 written += ret; 1763 if (!iov_iter_count(iter)) 1764 goto out; 1765 if (ret < nr) { 1766 error = -EFAULT; 1767 goto out; 1768 } 1769 continue; 1770 1771 page_not_up_to_date: 1772 /* Get exclusive access to the page ... */ 1773 error = lock_page_killable(page); 1774 if (unlikely(error)) 1775 goto readpage_error; 1776 1777 page_not_up_to_date_locked: 1778 /* Did it get truncated before we got the lock? */ 1779 if (!page->mapping) { 1780 unlock_page(page); 1781 put_page(page); 1782 continue; 1783 } 1784 1785 /* Did somebody else fill it already? */ 1786 if (PageUptodate(page)) { 1787 unlock_page(page); 1788 goto page_ok; 1789 } 1790 1791 readpage: 1792 /* 1793 * A previous I/O error may have been due to temporary 1794 * failures, eg. multipath errors. 1795 * PG_error will be set again if readpage fails. 1796 */ 1797 ClearPageError(page); 1798 /* Start the actual read. The read will unlock the page. */ 1799 error = mapping->a_ops->readpage(filp, page); 1800 1801 if (unlikely(error)) { 1802 if (error == AOP_TRUNCATED_PAGE) { 1803 put_page(page); 1804 error = 0; 1805 goto find_page; 1806 } 1807 goto readpage_error; 1808 } 1809 1810 if (!PageUptodate(page)) { 1811 error = lock_page_killable(page); 1812 if (unlikely(error)) 1813 goto readpage_error; 1814 if (!PageUptodate(page)) { 1815 if (page->mapping == NULL) { 1816 /* 1817 * invalidate_mapping_pages got it 1818 */ 1819 unlock_page(page); 1820 put_page(page); 1821 goto find_page; 1822 } 1823 unlock_page(page); 1824 shrink_readahead_size_eio(filp, ra); 1825 error = -EIO; 1826 goto readpage_error; 1827 } 1828 unlock_page(page); 1829 } 1830 1831 goto page_ok; 1832 1833 readpage_error: 1834 /* UHHUH! A synchronous read error occurred. Report it */ 1835 put_page(page); 1836 goto out; 1837 1838 no_cached_page: 1839 /* 1840 * Ok, it wasn't cached, so we need to create a new 1841 * page.. 1842 */ 1843 page = page_cache_alloc_cold(mapping); 1844 if (!page) { 1845 error = -ENOMEM; 1846 goto out; 1847 } 1848 error = add_to_page_cache_lru(page, mapping, index, 1849 mapping_gfp_constraint(mapping, GFP_KERNEL)); 1850 if (error) { 1851 put_page(page); 1852 if (error == -EEXIST) { 1853 error = 0; 1854 goto find_page; 1855 } 1856 goto out; 1857 } 1858 goto readpage; 1859 } 1860 1861 out: 1862 ra->prev_pos = prev_index; 1863 ra->prev_pos <<= PAGE_SHIFT; 1864 ra->prev_pos |= prev_offset; 1865 1866 *ppos = ((loff_t)index << PAGE_SHIFT) + offset; 1867 file_accessed(filp); 1868 return written ? written : error; 1869 } 1870 1871 /** 1872 * generic_file_read_iter - generic filesystem read routine 1873 * @iocb: kernel I/O control block 1874 * @iter: destination for the data read 1875 * 1876 * This is the "read_iter()" routine for all filesystems 1877 * that can use the page cache directly. 1878 */ 1879 ssize_t 1880 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 1881 { 1882 struct file *file = iocb->ki_filp; 1883 ssize_t retval = 0; 1884 size_t count = iov_iter_count(iter); 1885 1886 if (!count) 1887 goto out; /* skip atime */ 1888 1889 if (iocb->ki_flags & IOCB_DIRECT) { 1890 struct address_space *mapping = file->f_mapping; 1891 struct inode *inode = mapping->host; 1892 struct iov_iter data = *iter; 1893 loff_t size; 1894 1895 size = i_size_read(inode); 1896 retval = filemap_write_and_wait_range(mapping, iocb->ki_pos, 1897 iocb->ki_pos + count - 1); 1898 if (retval < 0) 1899 goto out; 1900 1901 file_accessed(file); 1902 1903 retval = mapping->a_ops->direct_IO(iocb, &data); 1904 if (retval >= 0) { 1905 iocb->ki_pos += retval; 1906 iov_iter_advance(iter, retval); 1907 } 1908 1909 /* 1910 * Btrfs can have a short DIO read if we encounter 1911 * compressed extents, so if there was an error, or if 1912 * we've already read everything we wanted to, or if 1913 * there was a short read because we hit EOF, go ahead 1914 * and return. Otherwise fallthrough to buffered io for 1915 * the rest of the read. Buffered reads will not work for 1916 * DAX files, so don't bother trying. 1917 */ 1918 if (retval < 0 || !iov_iter_count(iter) || iocb->ki_pos >= size || 1919 IS_DAX(inode)) 1920 goto out; 1921 } 1922 1923 retval = do_generic_file_read(file, &iocb->ki_pos, iter, retval); 1924 out: 1925 return retval; 1926 } 1927 EXPORT_SYMBOL(generic_file_read_iter); 1928 1929 #ifdef CONFIG_MMU 1930 /** 1931 * page_cache_read - adds requested page to the page cache if not already there 1932 * @file: file to read 1933 * @offset: page index 1934 * @gfp_mask: memory allocation flags 1935 * 1936 * This adds the requested page to the page cache if it isn't already there, 1937 * and schedules an I/O to read in its contents from disk. 1938 */ 1939 static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask) 1940 { 1941 struct address_space *mapping = file->f_mapping; 1942 struct page *page; 1943 int ret; 1944 1945 do { 1946 page = __page_cache_alloc(gfp_mask|__GFP_COLD); 1947 if (!page) 1948 return -ENOMEM; 1949 1950 ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_KERNEL); 1951 if (ret == 0) 1952 ret = mapping->a_ops->readpage(file, page); 1953 else if (ret == -EEXIST) 1954 ret = 0; /* losing race to add is OK */ 1955 1956 put_page(page); 1957 1958 } while (ret == AOP_TRUNCATED_PAGE); 1959 1960 return ret; 1961 } 1962 1963 #define MMAP_LOTSAMISS (100) 1964 1965 /* 1966 * Synchronous readahead happens when we don't even find 1967 * a page in the page cache at all. 1968 */ 1969 static void do_sync_mmap_readahead(struct vm_area_struct *vma, 1970 struct file_ra_state *ra, 1971 struct file *file, 1972 pgoff_t offset) 1973 { 1974 struct address_space *mapping = file->f_mapping; 1975 1976 /* If we don't want any read-ahead, don't bother */ 1977 if (vma->vm_flags & VM_RAND_READ) 1978 return; 1979 if (!ra->ra_pages) 1980 return; 1981 1982 if (vma->vm_flags & VM_SEQ_READ) { 1983 page_cache_sync_readahead(mapping, ra, file, offset, 1984 ra->ra_pages); 1985 return; 1986 } 1987 1988 /* Avoid banging the cache line if not needed */ 1989 if (ra->mmap_miss < MMAP_LOTSAMISS * 10) 1990 ra->mmap_miss++; 1991 1992 /* 1993 * Do we miss much more than hit in this file? If so, 1994 * stop bothering with read-ahead. It will only hurt. 1995 */ 1996 if (ra->mmap_miss > MMAP_LOTSAMISS) 1997 return; 1998 1999 /* 2000 * mmap read-around 2001 */ 2002 ra->start = max_t(long, 0, offset - ra->ra_pages / 2); 2003 ra->size = ra->ra_pages; 2004 ra->async_size = ra->ra_pages / 4; 2005 ra_submit(ra, mapping, file); 2006 } 2007 2008 /* 2009 * Asynchronous readahead happens when we find the page and PG_readahead, 2010 * so we want to possibly extend the readahead further.. 2011 */ 2012 static void do_async_mmap_readahead(struct vm_area_struct *vma, 2013 struct file_ra_state *ra, 2014 struct file *file, 2015 struct page *page, 2016 pgoff_t offset) 2017 { 2018 struct address_space *mapping = file->f_mapping; 2019 2020 /* If we don't want any read-ahead, don't bother */ 2021 if (vma->vm_flags & VM_RAND_READ) 2022 return; 2023 if (ra->mmap_miss > 0) 2024 ra->mmap_miss--; 2025 if (PageReadahead(page)) 2026 page_cache_async_readahead(mapping, ra, file, 2027 page, offset, ra->ra_pages); 2028 } 2029 2030 /** 2031 * filemap_fault - read in file data for page fault handling 2032 * @vma: vma in which the fault was taken 2033 * @vmf: struct vm_fault containing details of the fault 2034 * 2035 * filemap_fault() is invoked via the vma operations vector for a 2036 * mapped memory region to read in file data during a page fault. 2037 * 2038 * The goto's are kind of ugly, but this streamlines the normal case of having 2039 * it in the page cache, and handles the special cases reasonably without 2040 * having a lot of duplicated code. 2041 * 2042 * vma->vm_mm->mmap_sem must be held on entry. 2043 * 2044 * If our return value has VM_FAULT_RETRY set, it's because 2045 * lock_page_or_retry() returned 0. 2046 * The mmap_sem has usually been released in this case. 2047 * See __lock_page_or_retry() for the exception. 2048 * 2049 * If our return value does not have VM_FAULT_RETRY set, the mmap_sem 2050 * has not been released. 2051 * 2052 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set. 2053 */ 2054 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 2055 { 2056 int error; 2057 struct file *file = vma->vm_file; 2058 struct address_space *mapping = file->f_mapping; 2059 struct file_ra_state *ra = &file->f_ra; 2060 struct inode *inode = mapping->host; 2061 pgoff_t offset = vmf->pgoff; 2062 struct page *page; 2063 loff_t size; 2064 int ret = 0; 2065 2066 size = round_up(i_size_read(inode), PAGE_SIZE); 2067 if (offset >= size >> PAGE_SHIFT) 2068 return VM_FAULT_SIGBUS; 2069 2070 /* 2071 * Do we have something in the page cache already? 2072 */ 2073 page = find_get_page(mapping, offset); 2074 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { 2075 /* 2076 * We found the page, so try async readahead before 2077 * waiting for the lock. 2078 */ 2079 do_async_mmap_readahead(vma, ra, file, page, offset); 2080 } else if (!page) { 2081 /* No page in the page cache at all */ 2082 do_sync_mmap_readahead(vma, ra, file, offset); 2083 count_vm_event(PGMAJFAULT); 2084 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 2085 ret = VM_FAULT_MAJOR; 2086 retry_find: 2087 page = find_get_page(mapping, offset); 2088 if (!page) 2089 goto no_cached_page; 2090 } 2091 2092 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { 2093 put_page(page); 2094 return ret | VM_FAULT_RETRY; 2095 } 2096 2097 /* Did it get truncated? */ 2098 if (unlikely(page->mapping != mapping)) { 2099 unlock_page(page); 2100 put_page(page); 2101 goto retry_find; 2102 } 2103 VM_BUG_ON_PAGE(page->index != offset, page); 2104 2105 /* 2106 * We have a locked page in the page cache, now we need to check 2107 * that it's up-to-date. If not, it is going to be due to an error. 2108 */ 2109 if (unlikely(!PageUptodate(page))) 2110 goto page_not_uptodate; 2111 2112 /* 2113 * Found the page and have a reference on it. 2114 * We must recheck i_size under page lock. 2115 */ 2116 size = round_up(i_size_read(inode), PAGE_SIZE); 2117 if (unlikely(offset >= size >> PAGE_SHIFT)) { 2118 unlock_page(page); 2119 put_page(page); 2120 return VM_FAULT_SIGBUS; 2121 } 2122 2123 vmf->page = page; 2124 return ret | VM_FAULT_LOCKED; 2125 2126 no_cached_page: 2127 /* 2128 * We're only likely to ever get here if MADV_RANDOM is in 2129 * effect. 2130 */ 2131 error = page_cache_read(file, offset, vmf->gfp_mask); 2132 2133 /* 2134 * The page we want has now been added to the page cache. 2135 * In the unlikely event that someone removed it in the 2136 * meantime, we'll just come back here and read it again. 2137 */ 2138 if (error >= 0) 2139 goto retry_find; 2140 2141 /* 2142 * An error return from page_cache_read can result if the 2143 * system is low on memory, or a problem occurs while trying 2144 * to schedule I/O. 2145 */ 2146 if (error == -ENOMEM) 2147 return VM_FAULT_OOM; 2148 return VM_FAULT_SIGBUS; 2149 2150 page_not_uptodate: 2151 /* 2152 * Umm, take care of errors if the page isn't up-to-date. 2153 * Try to re-read it _once_. We do this synchronously, 2154 * because there really aren't any performance issues here 2155 * and we need to check for errors. 2156 */ 2157 ClearPageError(page); 2158 error = mapping->a_ops->readpage(file, page); 2159 if (!error) { 2160 wait_on_page_locked(page); 2161 if (!PageUptodate(page)) 2162 error = -EIO; 2163 } 2164 put_page(page); 2165 2166 if (!error || error == AOP_TRUNCATED_PAGE) 2167 goto retry_find; 2168 2169 /* Things didn't work out. Return zero to tell the mm layer so. */ 2170 shrink_readahead_size_eio(file, ra); 2171 return VM_FAULT_SIGBUS; 2172 } 2173 EXPORT_SYMBOL(filemap_fault); 2174 2175 void filemap_map_pages(struct fault_env *fe, 2176 pgoff_t start_pgoff, pgoff_t end_pgoff) 2177 { 2178 struct radix_tree_iter iter; 2179 void **slot; 2180 struct file *file = fe->vma->vm_file; 2181 struct address_space *mapping = file->f_mapping; 2182 pgoff_t last_pgoff = start_pgoff; 2183 loff_t size; 2184 struct page *head, *page; 2185 2186 rcu_read_lock(); 2187 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, 2188 start_pgoff) { 2189 if (iter.index > end_pgoff) 2190 break; 2191 repeat: 2192 page = radix_tree_deref_slot(slot); 2193 if (unlikely(!page)) 2194 goto next; 2195 if (radix_tree_exception(page)) { 2196 if (radix_tree_deref_retry(page)) { 2197 slot = radix_tree_iter_retry(&iter); 2198 continue; 2199 } 2200 goto next; 2201 } 2202 2203 head = compound_head(page); 2204 if (!page_cache_get_speculative(head)) 2205 goto repeat; 2206 2207 /* The page was split under us? */ 2208 if (compound_head(page) != head) { 2209 put_page(head); 2210 goto repeat; 2211 } 2212 2213 /* Has the page moved? */ 2214 if (unlikely(page != *slot)) { 2215 put_page(head); 2216 goto repeat; 2217 } 2218 2219 if (!PageUptodate(page) || 2220 PageReadahead(page) || 2221 PageHWPoison(page)) 2222 goto skip; 2223 if (!trylock_page(page)) 2224 goto skip; 2225 2226 if (page->mapping != mapping || !PageUptodate(page)) 2227 goto unlock; 2228 2229 size = round_up(i_size_read(mapping->host), PAGE_SIZE); 2230 if (page->index >= size >> PAGE_SHIFT) 2231 goto unlock; 2232 2233 if (file->f_ra.mmap_miss > 0) 2234 file->f_ra.mmap_miss--; 2235 2236 fe->address += (iter.index - last_pgoff) << PAGE_SHIFT; 2237 if (fe->pte) 2238 fe->pte += iter.index - last_pgoff; 2239 last_pgoff = iter.index; 2240 if (alloc_set_pte(fe, NULL, page)) 2241 goto unlock; 2242 unlock_page(page); 2243 goto next; 2244 unlock: 2245 unlock_page(page); 2246 skip: 2247 put_page(page); 2248 next: 2249 /* Huge page is mapped? No need to proceed. */ 2250 if (pmd_trans_huge(*fe->pmd)) 2251 break; 2252 if (iter.index == end_pgoff) 2253 break; 2254 } 2255 rcu_read_unlock(); 2256 } 2257 EXPORT_SYMBOL(filemap_map_pages); 2258 2259 int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 2260 { 2261 struct page *page = vmf->page; 2262 struct inode *inode = file_inode(vma->vm_file); 2263 int ret = VM_FAULT_LOCKED; 2264 2265 sb_start_pagefault(inode->i_sb); 2266 file_update_time(vma->vm_file); 2267 lock_page(page); 2268 if (page->mapping != inode->i_mapping) { 2269 unlock_page(page); 2270 ret = VM_FAULT_NOPAGE; 2271 goto out; 2272 } 2273 /* 2274 * We mark the page dirty already here so that when freeze is in 2275 * progress, we are guaranteed that writeback during freezing will 2276 * see the dirty page and writeprotect it again. 2277 */ 2278 set_page_dirty(page); 2279 wait_for_stable_page(page); 2280 out: 2281 sb_end_pagefault(inode->i_sb); 2282 return ret; 2283 } 2284 EXPORT_SYMBOL(filemap_page_mkwrite); 2285 2286 const struct vm_operations_struct generic_file_vm_ops = { 2287 .fault = filemap_fault, 2288 .map_pages = filemap_map_pages, 2289 .page_mkwrite = filemap_page_mkwrite, 2290 }; 2291 2292 /* This is used for a general mmap of a disk file */ 2293 2294 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 2295 { 2296 struct address_space *mapping = file->f_mapping; 2297 2298 if (!mapping->a_ops->readpage) 2299 return -ENOEXEC; 2300 file_accessed(file); 2301 vma->vm_ops = &generic_file_vm_ops; 2302 return 0; 2303 } 2304 2305 /* 2306 * This is for filesystems which do not implement ->writepage. 2307 */ 2308 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) 2309 { 2310 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) 2311 return -EINVAL; 2312 return generic_file_mmap(file, vma); 2313 } 2314 #else 2315 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 2316 { 2317 return -ENOSYS; 2318 } 2319 int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) 2320 { 2321 return -ENOSYS; 2322 } 2323 #endif /* CONFIG_MMU */ 2324 2325 EXPORT_SYMBOL(generic_file_mmap); 2326 EXPORT_SYMBOL(generic_file_readonly_mmap); 2327 2328 static struct page *wait_on_page_read(struct page *page) 2329 { 2330 if (!IS_ERR(page)) { 2331 wait_on_page_locked(page); 2332 if (!PageUptodate(page)) { 2333 put_page(page); 2334 page = ERR_PTR(-EIO); 2335 } 2336 } 2337 return page; 2338 } 2339 2340 static struct page *do_read_cache_page(struct address_space *mapping, 2341 pgoff_t index, 2342 int (*filler)(void *, struct page *), 2343 void *data, 2344 gfp_t gfp) 2345 { 2346 struct page *page; 2347 int err; 2348 repeat: 2349 page = find_get_page(mapping, index); 2350 if (!page) { 2351 page = __page_cache_alloc(gfp | __GFP_COLD); 2352 if (!page) 2353 return ERR_PTR(-ENOMEM); 2354 err = add_to_page_cache_lru(page, mapping, index, gfp); 2355 if (unlikely(err)) { 2356 put_page(page); 2357 if (err == -EEXIST) 2358 goto repeat; 2359 /* Presumably ENOMEM for radix tree node */ 2360 return ERR_PTR(err); 2361 } 2362 2363 filler: 2364 err = filler(data, page); 2365 if (err < 0) { 2366 put_page(page); 2367 return ERR_PTR(err); 2368 } 2369 2370 page = wait_on_page_read(page); 2371 if (IS_ERR(page)) 2372 return page; 2373 goto out; 2374 } 2375 if (PageUptodate(page)) 2376 goto out; 2377 2378 /* 2379 * Page is not up to date and may be locked due one of the following 2380 * case a: Page is being filled and the page lock is held 2381 * case b: Read/write error clearing the page uptodate status 2382 * case c: Truncation in progress (page locked) 2383 * case d: Reclaim in progress 2384 * 2385 * Case a, the page will be up to date when the page is unlocked. 2386 * There is no need to serialise on the page lock here as the page 2387 * is pinned so the lock gives no additional protection. Even if the 2388 * the page is truncated, the data is still valid if PageUptodate as 2389 * it's a race vs truncate race. 2390 * Case b, the page will not be up to date 2391 * Case c, the page may be truncated but in itself, the data may still 2392 * be valid after IO completes as it's a read vs truncate race. The 2393 * operation must restart if the page is not uptodate on unlock but 2394 * otherwise serialising on page lock to stabilise the mapping gives 2395 * no additional guarantees to the caller as the page lock is 2396 * released before return. 2397 * Case d, similar to truncation. If reclaim holds the page lock, it 2398 * will be a race with remove_mapping that determines if the mapping 2399 * is valid on unlock but otherwise the data is valid and there is 2400 * no need to serialise with page lock. 2401 * 2402 * As the page lock gives no additional guarantee, we optimistically 2403 * wait on the page to be unlocked and check if it's up to date and 2404 * use the page if it is. Otherwise, the page lock is required to 2405 * distinguish between the different cases. The motivation is that we 2406 * avoid spurious serialisations and wakeups when multiple processes 2407 * wait on the same page for IO to complete. 2408 */ 2409 wait_on_page_locked(page); 2410 if (PageUptodate(page)) 2411 goto out; 2412 2413 /* Distinguish between all the cases under the safety of the lock */ 2414 lock_page(page); 2415 2416 /* Case c or d, restart the operation */ 2417 if (!page->mapping) { 2418 unlock_page(page); 2419 put_page(page); 2420 goto repeat; 2421 } 2422 2423 /* Someone else locked and filled the page in a very small window */ 2424 if (PageUptodate(page)) { 2425 unlock_page(page); 2426 goto out; 2427 } 2428 goto filler; 2429 2430 out: 2431 mark_page_accessed(page); 2432 return page; 2433 } 2434 2435 /** 2436 * read_cache_page - read into page cache, fill it if needed 2437 * @mapping: the page's address_space 2438 * @index: the page index 2439 * @filler: function to perform the read 2440 * @data: first arg to filler(data, page) function, often left as NULL 2441 * 2442 * Read into the page cache. If a page already exists, and PageUptodate() is 2443 * not set, try to fill the page and wait for it to become unlocked. 2444 * 2445 * If the page does not get brought uptodate, return -EIO. 2446 */ 2447 struct page *read_cache_page(struct address_space *mapping, 2448 pgoff_t index, 2449 int (*filler)(void *, struct page *), 2450 void *data) 2451 { 2452 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping)); 2453 } 2454 EXPORT_SYMBOL(read_cache_page); 2455 2456 /** 2457 * read_cache_page_gfp - read into page cache, using specified page allocation flags. 2458 * @mapping: the page's address_space 2459 * @index: the page index 2460 * @gfp: the page allocator flags to use if allocating 2461 * 2462 * This is the same as "read_mapping_page(mapping, index, NULL)", but with 2463 * any new page allocations done using the specified allocation flags. 2464 * 2465 * If the page does not get brought uptodate, return -EIO. 2466 */ 2467 struct page *read_cache_page_gfp(struct address_space *mapping, 2468 pgoff_t index, 2469 gfp_t gfp) 2470 { 2471 filler_t *filler = (filler_t *)mapping->a_ops->readpage; 2472 2473 return do_read_cache_page(mapping, index, filler, NULL, gfp); 2474 } 2475 EXPORT_SYMBOL(read_cache_page_gfp); 2476 2477 /* 2478 * Performs necessary checks before doing a write 2479 * 2480 * Can adjust writing position or amount of bytes to write. 2481 * Returns appropriate error code that caller should return or 2482 * zero in case that write should be allowed. 2483 */ 2484 inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from) 2485 { 2486 struct file *file = iocb->ki_filp; 2487 struct inode *inode = file->f_mapping->host; 2488 unsigned long limit = rlimit(RLIMIT_FSIZE); 2489 loff_t pos; 2490 2491 if (!iov_iter_count(from)) 2492 return 0; 2493 2494 /* FIXME: this is for backwards compatibility with 2.4 */ 2495 if (iocb->ki_flags & IOCB_APPEND) 2496 iocb->ki_pos = i_size_read(inode); 2497 2498 pos = iocb->ki_pos; 2499 2500 if (limit != RLIM_INFINITY) { 2501 if (iocb->ki_pos >= limit) { 2502 send_sig(SIGXFSZ, current, 0); 2503 return -EFBIG; 2504 } 2505 iov_iter_truncate(from, limit - (unsigned long)pos); 2506 } 2507 2508 /* 2509 * LFS rule 2510 */ 2511 if (unlikely(pos + iov_iter_count(from) > MAX_NON_LFS && 2512 !(file->f_flags & O_LARGEFILE))) { 2513 if (pos >= MAX_NON_LFS) 2514 return -EFBIG; 2515 iov_iter_truncate(from, MAX_NON_LFS - (unsigned long)pos); 2516 } 2517 2518 /* 2519 * Are we about to exceed the fs block limit ? 2520 * 2521 * If we have written data it becomes a short write. If we have 2522 * exceeded without writing data we send a signal and return EFBIG. 2523 * Linus frestrict idea will clean these up nicely.. 2524 */ 2525 if (unlikely(pos >= inode->i_sb->s_maxbytes)) 2526 return -EFBIG; 2527 2528 iov_iter_truncate(from, inode->i_sb->s_maxbytes - pos); 2529 return iov_iter_count(from); 2530 } 2531 EXPORT_SYMBOL(generic_write_checks); 2532 2533 int pagecache_write_begin(struct file *file, struct address_space *mapping, 2534 loff_t pos, unsigned len, unsigned flags, 2535 struct page **pagep, void **fsdata) 2536 { 2537 const struct address_space_operations *aops = mapping->a_ops; 2538 2539 return aops->write_begin(file, mapping, pos, len, flags, 2540 pagep, fsdata); 2541 } 2542 EXPORT_SYMBOL(pagecache_write_begin); 2543 2544 int pagecache_write_end(struct file *file, struct address_space *mapping, 2545 loff_t pos, unsigned len, unsigned copied, 2546 struct page *page, void *fsdata) 2547 { 2548 const struct address_space_operations *aops = mapping->a_ops; 2549 2550 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); 2551 } 2552 EXPORT_SYMBOL(pagecache_write_end); 2553 2554 ssize_t 2555 generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) 2556 { 2557 struct file *file = iocb->ki_filp; 2558 struct address_space *mapping = file->f_mapping; 2559 struct inode *inode = mapping->host; 2560 loff_t pos = iocb->ki_pos; 2561 ssize_t written; 2562 size_t write_len; 2563 pgoff_t end; 2564 struct iov_iter data; 2565 2566 write_len = iov_iter_count(from); 2567 end = (pos + write_len - 1) >> PAGE_SHIFT; 2568 2569 written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1); 2570 if (written) 2571 goto out; 2572 2573 /* 2574 * After a write we want buffered reads to be sure to go to disk to get 2575 * the new data. We invalidate clean cached page from the region we're 2576 * about to write. We do this *before* the write so that we can return 2577 * without clobbering -EIOCBQUEUED from ->direct_IO(). 2578 */ 2579 if (mapping->nrpages) { 2580 written = invalidate_inode_pages2_range(mapping, 2581 pos >> PAGE_SHIFT, end); 2582 /* 2583 * If a page can not be invalidated, return 0 to fall back 2584 * to buffered write. 2585 */ 2586 if (written) { 2587 if (written == -EBUSY) 2588 return 0; 2589 goto out; 2590 } 2591 } 2592 2593 data = *from; 2594 written = mapping->a_ops->direct_IO(iocb, &data); 2595 2596 /* 2597 * Finally, try again to invalidate clean pages which might have been 2598 * cached by non-direct readahead, or faulted in by get_user_pages() 2599 * if the source of the write was an mmap'ed region of the file 2600 * we're writing. Either one is a pretty crazy thing to do, 2601 * so we don't support it 100%. If this invalidation 2602 * fails, tough, the write still worked... 2603 */ 2604 if (mapping->nrpages) { 2605 invalidate_inode_pages2_range(mapping, 2606 pos >> PAGE_SHIFT, end); 2607 } 2608 2609 if (written > 0) { 2610 pos += written; 2611 iov_iter_advance(from, written); 2612 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { 2613 i_size_write(inode, pos); 2614 mark_inode_dirty(inode); 2615 } 2616 iocb->ki_pos = pos; 2617 } 2618 out: 2619 return written; 2620 } 2621 EXPORT_SYMBOL(generic_file_direct_write); 2622 2623 /* 2624 * Find or create a page at the given pagecache position. Return the locked 2625 * page. This function is specifically for buffered writes. 2626 */ 2627 struct page *grab_cache_page_write_begin(struct address_space *mapping, 2628 pgoff_t index, unsigned flags) 2629 { 2630 struct page *page; 2631 int fgp_flags = FGP_LOCK|FGP_WRITE|FGP_CREAT; 2632 2633 if (flags & AOP_FLAG_NOFS) 2634 fgp_flags |= FGP_NOFS; 2635 2636 page = pagecache_get_page(mapping, index, fgp_flags, 2637 mapping_gfp_mask(mapping)); 2638 if (page) 2639 wait_for_stable_page(page); 2640 2641 return page; 2642 } 2643 EXPORT_SYMBOL(grab_cache_page_write_begin); 2644 2645 ssize_t generic_perform_write(struct file *file, 2646 struct iov_iter *i, loff_t pos) 2647 { 2648 struct address_space *mapping = file->f_mapping; 2649 const struct address_space_operations *a_ops = mapping->a_ops; 2650 long status = 0; 2651 ssize_t written = 0; 2652 unsigned int flags = 0; 2653 2654 /* 2655 * Copies from kernel address space cannot fail (NFSD is a big user). 2656 */ 2657 if (!iter_is_iovec(i)) 2658 flags |= AOP_FLAG_UNINTERRUPTIBLE; 2659 2660 do { 2661 struct page *page; 2662 unsigned long offset; /* Offset into pagecache page */ 2663 unsigned long bytes; /* Bytes to write to page */ 2664 size_t copied; /* Bytes copied from user */ 2665 void *fsdata; 2666 2667 offset = (pos & (PAGE_SIZE - 1)); 2668 bytes = min_t(unsigned long, PAGE_SIZE - offset, 2669 iov_iter_count(i)); 2670 2671 again: 2672 /* 2673 * Bring in the user page that we will copy from _first_. 2674 * Otherwise there's a nasty deadlock on copying from the 2675 * same page as we're writing to, without it being marked 2676 * up-to-date. 2677 * 2678 * Not only is this an optimisation, but it is also required 2679 * to check that the address is actually valid, when atomic 2680 * usercopies are used, below. 2681 */ 2682 if (unlikely(iov_iter_fault_in_readable(i, bytes))) { 2683 status = -EFAULT; 2684 break; 2685 } 2686 2687 if (fatal_signal_pending(current)) { 2688 status = -EINTR; 2689 break; 2690 } 2691 2692 status = a_ops->write_begin(file, mapping, pos, bytes, flags, 2693 &page, &fsdata); 2694 if (unlikely(status < 0)) 2695 break; 2696 2697 if (mapping_writably_mapped(mapping)) 2698 flush_dcache_page(page); 2699 2700 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 2701 flush_dcache_page(page); 2702 2703 status = a_ops->write_end(file, mapping, pos, bytes, copied, 2704 page, fsdata); 2705 if (unlikely(status < 0)) 2706 break; 2707 copied = status; 2708 2709 cond_resched(); 2710 2711 iov_iter_advance(i, copied); 2712 if (unlikely(copied == 0)) { 2713 /* 2714 * If we were unable to copy any data at all, we must 2715 * fall back to a single segment length write. 2716 * 2717 * If we didn't fallback here, we could livelock 2718 * because not all segments in the iov can be copied at 2719 * once without a pagefault. 2720 */ 2721 bytes = min_t(unsigned long, PAGE_SIZE - offset, 2722 iov_iter_single_seg_count(i)); 2723 goto again; 2724 } 2725 pos += copied; 2726 written += copied; 2727 2728 balance_dirty_pages_ratelimited(mapping); 2729 } while (iov_iter_count(i)); 2730 2731 return written ? written : status; 2732 } 2733 EXPORT_SYMBOL(generic_perform_write); 2734 2735 /** 2736 * __generic_file_write_iter - write data to a file 2737 * @iocb: IO state structure (file, offset, etc.) 2738 * @from: iov_iter with data to write 2739 * 2740 * This function does all the work needed for actually writing data to a 2741 * file. It does all basic checks, removes SUID from the file, updates 2742 * modification times and calls proper subroutines depending on whether we 2743 * do direct IO or a standard buffered write. 2744 * 2745 * It expects i_mutex to be grabbed unless we work on a block device or similar 2746 * object which does not need locking at all. 2747 * 2748 * This function does *not* take care of syncing data in case of O_SYNC write. 2749 * A caller has to handle it. This is mainly due to the fact that we want to 2750 * avoid syncing under i_mutex. 2751 */ 2752 ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 2753 { 2754 struct file *file = iocb->ki_filp; 2755 struct address_space * mapping = file->f_mapping; 2756 struct inode *inode = mapping->host; 2757 ssize_t written = 0; 2758 ssize_t err; 2759 ssize_t status; 2760 2761 /* We can write back this queue in page reclaim */ 2762 current->backing_dev_info = inode_to_bdi(inode); 2763 err = file_remove_privs(file); 2764 if (err) 2765 goto out; 2766 2767 err = file_update_time(file); 2768 if (err) 2769 goto out; 2770 2771 if (iocb->ki_flags & IOCB_DIRECT) { 2772 loff_t pos, endbyte; 2773 2774 written = generic_file_direct_write(iocb, from); 2775 /* 2776 * If the write stopped short of completing, fall back to 2777 * buffered writes. Some filesystems do this for writes to 2778 * holes, for example. For DAX files, a buffered write will 2779 * not succeed (even if it did, DAX does not handle dirty 2780 * page-cache pages correctly). 2781 */ 2782 if (written < 0 || !iov_iter_count(from) || IS_DAX(inode)) 2783 goto out; 2784 2785 status = generic_perform_write(file, from, pos = iocb->ki_pos); 2786 /* 2787 * If generic_perform_write() returned a synchronous error 2788 * then we want to return the number of bytes which were 2789 * direct-written, or the error code if that was zero. Note 2790 * that this differs from normal direct-io semantics, which 2791 * will return -EFOO even if some bytes were written. 2792 */ 2793 if (unlikely(status < 0)) { 2794 err = status; 2795 goto out; 2796 } 2797 /* 2798 * We need to ensure that the page cache pages are written to 2799 * disk and invalidated to preserve the expected O_DIRECT 2800 * semantics. 2801 */ 2802 endbyte = pos + status - 1; 2803 err = filemap_write_and_wait_range(mapping, pos, endbyte); 2804 if (err == 0) { 2805 iocb->ki_pos = endbyte + 1; 2806 written += status; 2807 invalidate_mapping_pages(mapping, 2808 pos >> PAGE_SHIFT, 2809 endbyte >> PAGE_SHIFT); 2810 } else { 2811 /* 2812 * We don't know how much we wrote, so just return 2813 * the number of bytes which were direct-written 2814 */ 2815 } 2816 } else { 2817 written = generic_perform_write(file, from, iocb->ki_pos); 2818 if (likely(written > 0)) 2819 iocb->ki_pos += written; 2820 } 2821 out: 2822 current->backing_dev_info = NULL; 2823 return written ? written : err; 2824 } 2825 EXPORT_SYMBOL(__generic_file_write_iter); 2826 2827 /** 2828 * generic_file_write_iter - write data to a file 2829 * @iocb: IO state structure 2830 * @from: iov_iter with data to write 2831 * 2832 * This is a wrapper around __generic_file_write_iter() to be used by most 2833 * filesystems. It takes care of syncing the file in case of O_SYNC file 2834 * and acquires i_mutex as needed. 2835 */ 2836 ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 2837 { 2838 struct file *file = iocb->ki_filp; 2839 struct inode *inode = file->f_mapping->host; 2840 ssize_t ret; 2841 2842 inode_lock(inode); 2843 ret = generic_write_checks(iocb, from); 2844 if (ret > 0) 2845 ret = __generic_file_write_iter(iocb, from); 2846 inode_unlock(inode); 2847 2848 if (ret > 0) 2849 ret = generic_write_sync(iocb, ret); 2850 return ret; 2851 } 2852 EXPORT_SYMBOL(generic_file_write_iter); 2853 2854 /** 2855 * try_to_release_page() - release old fs-specific metadata on a page 2856 * 2857 * @page: the page which the kernel is trying to free 2858 * @gfp_mask: memory allocation flags (and I/O mode) 2859 * 2860 * The address_space is to try to release any data against the page 2861 * (presumably at page->private). If the release was successful, return `1'. 2862 * Otherwise return zero. 2863 * 2864 * This may also be called if PG_fscache is set on a page, indicating that the 2865 * page is known to the local caching routines. 2866 * 2867 * The @gfp_mask argument specifies whether I/O may be performed to release 2868 * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS). 2869 * 2870 */ 2871 int try_to_release_page(struct page *page, gfp_t gfp_mask) 2872 { 2873 struct address_space * const mapping = page->mapping; 2874 2875 BUG_ON(!PageLocked(page)); 2876 if (PageWriteback(page)) 2877 return 0; 2878 2879 if (mapping && mapping->a_ops->releasepage) 2880 return mapping->a_ops->releasepage(page, gfp_mask); 2881 return try_to_free_buffers(page); 2882 } 2883 2884 EXPORT_SYMBOL(try_to_release_page); 2885