1 /* 2 * linux/mm/filemap.c 3 * 4 * Copyright (C) 1994-1999 Linus Torvalds 5 */ 6 7 /* 8 * This file handles the generic file mmap semantics used by 9 * most "normal" filesystems (but you don't /have/ to use this: 10 * the NFS filesystem used to do this differently, for example) 11 */ 12 #include <linux/export.h> 13 #include <linux/compiler.h> 14 #include <linux/dax.h> 15 #include <linux/fs.h> 16 #include <linux/sched/signal.h> 17 #include <linux/uaccess.h> 18 #include <linux/capability.h> 19 #include <linux/kernel_stat.h> 20 #include <linux/gfp.h> 21 #include <linux/mm.h> 22 #include <linux/swap.h> 23 #include <linux/mman.h> 24 #include <linux/pagemap.h> 25 #include <linux/file.h> 26 #include <linux/uio.h> 27 #include <linux/hash.h> 28 #include <linux/writeback.h> 29 #include <linux/backing-dev.h> 30 #include <linux/pagevec.h> 31 #include <linux/blkdev.h> 32 #include <linux/security.h> 33 #include <linux/cpuset.h> 34 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */ 35 #include <linux/hugetlb.h> 36 #include <linux/memcontrol.h> 37 #include <linux/cleancache.h> 38 #include <linux/rmap.h> 39 #include "internal.h" 40 41 #define CREATE_TRACE_POINTS 42 #include <trace/events/filemap.h> 43 44 /* 45 * FIXME: remove all knowledge of the buffer layer from the core VM 46 */ 47 #include <linux/buffer_head.h> /* for try_to_free_buffers */ 48 49 #include <asm/mman.h> 50 51 /* 52 * Shared mappings implemented 30.11.1994. It's not fully working yet, 53 * though. 54 * 55 * Shared mappings now work. 15.8.1995 Bruno. 56 * 57 * finished 'unifying' the page and buffer cache and SMP-threaded the 58 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> 59 * 60 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> 61 */ 62 63 /* 64 * Lock ordering: 65 * 66 * ->i_mmap_rwsem (truncate_pagecache) 67 * ->private_lock (__free_pte->__set_page_dirty_buffers) 68 * ->swap_lock (exclusive_swap_page, others) 69 * ->mapping->tree_lock 70 * 71 * ->i_mutex 72 * ->i_mmap_rwsem (truncate->unmap_mapping_range) 73 * 74 * ->mmap_sem 75 * ->i_mmap_rwsem 76 * ->page_table_lock or pte_lock (various, mainly in memory.c) 77 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) 78 * 79 * ->mmap_sem 80 * ->lock_page (access_process_vm) 81 * 82 * ->i_mutex (generic_perform_write) 83 * ->mmap_sem (fault_in_pages_readable->do_page_fault) 84 * 85 * bdi->wb.list_lock 86 * sb_lock (fs/fs-writeback.c) 87 * ->mapping->tree_lock (__sync_single_inode) 88 * 89 * ->i_mmap_rwsem 90 * ->anon_vma.lock (vma_adjust) 91 * 92 * ->anon_vma.lock 93 * ->page_table_lock or pte_lock (anon_vma_prepare and various) 94 * 95 * ->page_table_lock or pte_lock 96 * ->swap_lock (try_to_unmap_one) 97 * ->private_lock (try_to_unmap_one) 98 * ->tree_lock (try_to_unmap_one) 99 * ->zone_lru_lock(zone) (follow_page->mark_page_accessed) 100 * ->zone_lru_lock(zone) (check_pte_range->isolate_lru_page) 101 * ->private_lock (page_remove_rmap->set_page_dirty) 102 * ->tree_lock (page_remove_rmap->set_page_dirty) 103 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) 104 * ->inode->i_lock (page_remove_rmap->set_page_dirty) 105 * ->memcg->move_lock (page_remove_rmap->lock_page_memcg) 106 * bdi.wb->list_lock (zap_pte_range->set_page_dirty) 107 * ->inode->i_lock (zap_pte_range->set_page_dirty) 108 * ->private_lock (zap_pte_range->__set_page_dirty_buffers) 109 * 110 * ->i_mmap_rwsem 111 * ->tasklist_lock (memory_failure, collect_procs_ao) 112 */ 113 114 static int page_cache_tree_insert(struct address_space *mapping, 115 struct page *page, void **shadowp) 116 { 117 struct radix_tree_node *node; 118 void **slot; 119 int error; 120 121 error = __radix_tree_create(&mapping->page_tree, page->index, 0, 122 &node, &slot); 123 if (error) 124 return error; 125 if (*slot) { 126 void *p; 127 128 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 129 if (!radix_tree_exceptional_entry(p)) 130 return -EEXIST; 131 132 mapping->nrexceptional--; 133 if (shadowp) 134 *shadowp = p; 135 } 136 __radix_tree_replace(&mapping->page_tree, node, slot, page, 137 workingset_update_node, mapping); 138 mapping->nrpages++; 139 return 0; 140 } 141 142 static void page_cache_tree_delete(struct address_space *mapping, 143 struct page *page, void *shadow) 144 { 145 int i, nr; 146 147 /* hugetlb pages are represented by one entry in the radix tree */ 148 nr = PageHuge(page) ? 1 : hpage_nr_pages(page); 149 150 VM_BUG_ON_PAGE(!PageLocked(page), page); 151 VM_BUG_ON_PAGE(PageTail(page), page); 152 VM_BUG_ON_PAGE(nr != 1 && shadow, page); 153 154 for (i = 0; i < nr; i++) { 155 struct radix_tree_node *node; 156 void **slot; 157 158 __radix_tree_lookup(&mapping->page_tree, page->index + i, 159 &node, &slot); 160 161 VM_BUG_ON_PAGE(!node && nr != 1, page); 162 163 radix_tree_clear_tags(&mapping->page_tree, node, slot); 164 __radix_tree_replace(&mapping->page_tree, node, slot, shadow, 165 workingset_update_node, mapping); 166 } 167 168 if (shadow) { 169 mapping->nrexceptional += nr; 170 /* 171 * Make sure the nrexceptional update is committed before 172 * the nrpages update so that final truncate racing 173 * with reclaim does not see both counters 0 at the 174 * same time and miss a shadow entry. 175 */ 176 smp_wmb(); 177 } 178 mapping->nrpages -= nr; 179 } 180 181 /* 182 * Delete a page from the page cache and free it. Caller has to make 183 * sure the page is locked and that nobody else uses it - or that usage 184 * is safe. The caller must hold the mapping's tree_lock. 185 */ 186 void __delete_from_page_cache(struct page *page, void *shadow) 187 { 188 struct address_space *mapping = page->mapping; 189 int nr = hpage_nr_pages(page); 190 191 trace_mm_filemap_delete_from_page_cache(page); 192 /* 193 * if we're uptodate, flush out into the cleancache, otherwise 194 * invalidate any existing cleancache entries. We can't leave 195 * stale data around in the cleancache once our page is gone 196 */ 197 if (PageUptodate(page) && PageMappedToDisk(page)) 198 cleancache_put_page(page); 199 else 200 cleancache_invalidate_page(mapping, page); 201 202 VM_BUG_ON_PAGE(PageTail(page), page); 203 VM_BUG_ON_PAGE(page_mapped(page), page); 204 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) { 205 int mapcount; 206 207 pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n", 208 current->comm, page_to_pfn(page)); 209 dump_page(page, "still mapped when deleted"); 210 dump_stack(); 211 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 212 213 mapcount = page_mapcount(page); 214 if (mapping_exiting(mapping) && 215 page_count(page) >= mapcount + 2) { 216 /* 217 * All vmas have already been torn down, so it's 218 * a good bet that actually the page is unmapped, 219 * and we'd prefer not to leak it: if we're wrong, 220 * some other bad page check should catch it later. 221 */ 222 page_mapcount_reset(page); 223 page_ref_sub(page, mapcount); 224 } 225 } 226 227 page_cache_tree_delete(mapping, page, shadow); 228 229 page->mapping = NULL; 230 /* Leave page->index set: truncation lookup relies upon it */ 231 232 /* hugetlb pages do not participate in page cache accounting. */ 233 if (PageHuge(page)) 234 return; 235 236 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); 237 if (PageSwapBacked(page)) { 238 __mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr); 239 if (PageTransHuge(page)) 240 __dec_node_page_state(page, NR_SHMEM_THPS); 241 } else { 242 VM_BUG_ON_PAGE(PageTransHuge(page), page); 243 } 244 245 /* 246 * At this point page must be either written or cleaned by truncate. 247 * Dirty page here signals a bug and loss of unwritten data. 248 * 249 * This fixes dirty accounting after removing the page entirely but 250 * leaves PageDirty set: it has no effect for truncated page and 251 * anyway will be cleared before returning page into buddy allocator. 252 */ 253 if (WARN_ON_ONCE(PageDirty(page))) 254 account_page_cleaned(page, mapping, inode_to_wb(mapping->host)); 255 } 256 257 /** 258 * delete_from_page_cache - delete page from page cache 259 * @page: the page which the kernel is trying to remove from page cache 260 * 261 * This must be called only on pages that have been verified to be in the page 262 * cache and locked. It will never put the page into the free list, the caller 263 * has a reference on the page. 264 */ 265 void delete_from_page_cache(struct page *page) 266 { 267 struct address_space *mapping = page_mapping(page); 268 unsigned long flags; 269 void (*freepage)(struct page *); 270 271 BUG_ON(!PageLocked(page)); 272 273 freepage = mapping->a_ops->freepage; 274 275 spin_lock_irqsave(&mapping->tree_lock, flags); 276 __delete_from_page_cache(page, NULL); 277 spin_unlock_irqrestore(&mapping->tree_lock, flags); 278 279 if (freepage) 280 freepage(page); 281 282 if (PageTransHuge(page) && !PageHuge(page)) { 283 page_ref_sub(page, HPAGE_PMD_NR); 284 VM_BUG_ON_PAGE(page_count(page) <= 0, page); 285 } else { 286 put_page(page); 287 } 288 } 289 EXPORT_SYMBOL(delete_from_page_cache); 290 291 int filemap_check_errors(struct address_space *mapping) 292 { 293 int ret = 0; 294 /* Check for outstanding write errors */ 295 if (test_bit(AS_ENOSPC, &mapping->flags) && 296 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) 297 ret = -ENOSPC; 298 if (test_bit(AS_EIO, &mapping->flags) && 299 test_and_clear_bit(AS_EIO, &mapping->flags)) 300 ret = -EIO; 301 return ret; 302 } 303 EXPORT_SYMBOL(filemap_check_errors); 304 305 static int filemap_check_and_keep_errors(struct address_space *mapping) 306 { 307 /* Check for outstanding write errors */ 308 if (test_bit(AS_EIO, &mapping->flags)) 309 return -EIO; 310 if (test_bit(AS_ENOSPC, &mapping->flags)) 311 return -ENOSPC; 312 return 0; 313 } 314 315 /** 316 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range 317 * @mapping: address space structure to write 318 * @start: offset in bytes where the range starts 319 * @end: offset in bytes where the range ends (inclusive) 320 * @sync_mode: enable synchronous operation 321 * 322 * Start writeback against all of a mapping's dirty pages that lie 323 * within the byte offsets <start, end> inclusive. 324 * 325 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as 326 * opposed to a regular memory cleansing writeback. The difference between 327 * these two operations is that if a dirty page/buffer is encountered, it must 328 * be waited upon, and not just skipped over. 329 */ 330 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 331 loff_t end, int sync_mode) 332 { 333 int ret; 334 struct writeback_control wbc = { 335 .sync_mode = sync_mode, 336 .nr_to_write = LONG_MAX, 337 .range_start = start, 338 .range_end = end, 339 }; 340 341 if (!mapping_cap_writeback_dirty(mapping)) 342 return 0; 343 344 wbc_attach_fdatawrite_inode(&wbc, mapping->host); 345 ret = do_writepages(mapping, &wbc); 346 wbc_detach_inode(&wbc); 347 return ret; 348 } 349 350 static inline int __filemap_fdatawrite(struct address_space *mapping, 351 int sync_mode) 352 { 353 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); 354 } 355 356 int filemap_fdatawrite(struct address_space *mapping) 357 { 358 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); 359 } 360 EXPORT_SYMBOL(filemap_fdatawrite); 361 362 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 363 loff_t end) 364 { 365 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); 366 } 367 EXPORT_SYMBOL(filemap_fdatawrite_range); 368 369 /** 370 * filemap_flush - mostly a non-blocking flush 371 * @mapping: target address_space 372 * 373 * This is a mostly non-blocking flush. Not suitable for data-integrity 374 * purposes - I/O may not be started against all dirty pages. 375 */ 376 int filemap_flush(struct address_space *mapping) 377 { 378 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); 379 } 380 EXPORT_SYMBOL(filemap_flush); 381 382 /** 383 * filemap_range_has_page - check if a page exists in range. 384 * @mapping: address space within which to check 385 * @start_byte: offset in bytes where the range starts 386 * @end_byte: offset in bytes where the range ends (inclusive) 387 * 388 * Find at least one page in the range supplied, usually used to check if 389 * direct writing in this range will trigger a writeback. 390 */ 391 bool filemap_range_has_page(struct address_space *mapping, 392 loff_t start_byte, loff_t end_byte) 393 { 394 pgoff_t index = start_byte >> PAGE_SHIFT; 395 pgoff_t end = end_byte >> PAGE_SHIFT; 396 struct page *page; 397 398 if (end_byte < start_byte) 399 return false; 400 401 if (mapping->nrpages == 0) 402 return false; 403 404 if (!find_get_pages_range(mapping, &index, end, 1, &page)) 405 return false; 406 put_page(page); 407 return true; 408 } 409 EXPORT_SYMBOL(filemap_range_has_page); 410 411 static void __filemap_fdatawait_range(struct address_space *mapping, 412 loff_t start_byte, loff_t end_byte) 413 { 414 pgoff_t index = start_byte >> PAGE_SHIFT; 415 pgoff_t end = end_byte >> PAGE_SHIFT; 416 struct pagevec pvec; 417 int nr_pages; 418 419 if (end_byte < start_byte) 420 return; 421 422 pagevec_init(&pvec, 0); 423 while ((index <= end) && 424 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 425 PAGECACHE_TAG_WRITEBACK, 426 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { 427 unsigned i; 428 429 for (i = 0; i < nr_pages; i++) { 430 struct page *page = pvec.pages[i]; 431 432 /* until radix tree lookup accepts end_index */ 433 if (page->index > end) 434 continue; 435 436 wait_on_page_writeback(page); 437 ClearPageError(page); 438 } 439 pagevec_release(&pvec); 440 cond_resched(); 441 } 442 } 443 444 /** 445 * filemap_fdatawait_range - wait for writeback to complete 446 * @mapping: address space structure to wait for 447 * @start_byte: offset in bytes where the range starts 448 * @end_byte: offset in bytes where the range ends (inclusive) 449 * 450 * Walk the list of under-writeback pages of the given address space 451 * in the given range and wait for all of them. Check error status of 452 * the address space and return it. 453 * 454 * Since the error status of the address space is cleared by this function, 455 * callers are responsible for checking the return value and handling and/or 456 * reporting the error. 457 */ 458 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, 459 loff_t end_byte) 460 { 461 __filemap_fdatawait_range(mapping, start_byte, end_byte); 462 return filemap_check_errors(mapping); 463 } 464 EXPORT_SYMBOL(filemap_fdatawait_range); 465 466 /** 467 * file_fdatawait_range - wait for writeback to complete 468 * @file: file pointing to address space structure to wait for 469 * @start_byte: offset in bytes where the range starts 470 * @end_byte: offset in bytes where the range ends (inclusive) 471 * 472 * Walk the list of under-writeback pages of the address space that file 473 * refers to, in the given range and wait for all of them. Check error 474 * status of the address space vs. the file->f_wb_err cursor and return it. 475 * 476 * Since the error status of the file is advanced by this function, 477 * callers are responsible for checking the return value and handling and/or 478 * reporting the error. 479 */ 480 int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte) 481 { 482 struct address_space *mapping = file->f_mapping; 483 484 __filemap_fdatawait_range(mapping, start_byte, end_byte); 485 return file_check_and_advance_wb_err(file); 486 } 487 EXPORT_SYMBOL(file_fdatawait_range); 488 489 /** 490 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors 491 * @mapping: address space structure to wait for 492 * 493 * Walk the list of under-writeback pages of the given address space 494 * and wait for all of them. Unlike filemap_fdatawait(), this function 495 * does not clear error status of the address space. 496 * 497 * Use this function if callers don't handle errors themselves. Expected 498 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), 499 * fsfreeze(8) 500 */ 501 int filemap_fdatawait_keep_errors(struct address_space *mapping) 502 { 503 __filemap_fdatawait_range(mapping, 0, LLONG_MAX); 504 return filemap_check_and_keep_errors(mapping); 505 } 506 EXPORT_SYMBOL(filemap_fdatawait_keep_errors); 507 508 static bool mapping_needs_writeback(struct address_space *mapping) 509 { 510 return (!dax_mapping(mapping) && mapping->nrpages) || 511 (dax_mapping(mapping) && mapping->nrexceptional); 512 } 513 514 int filemap_write_and_wait(struct address_space *mapping) 515 { 516 int err = 0; 517 518 if (mapping_needs_writeback(mapping)) { 519 err = filemap_fdatawrite(mapping); 520 /* 521 * Even if the above returned error, the pages may be 522 * written partially (e.g. -ENOSPC), so we wait for it. 523 * But the -EIO is special case, it may indicate the worst 524 * thing (e.g. bug) happened, so we avoid waiting for it. 525 */ 526 if (err != -EIO) { 527 int err2 = filemap_fdatawait(mapping); 528 if (!err) 529 err = err2; 530 } else { 531 /* Clear any previously stored errors */ 532 filemap_check_errors(mapping); 533 } 534 } else { 535 err = filemap_check_errors(mapping); 536 } 537 return err; 538 } 539 EXPORT_SYMBOL(filemap_write_and_wait); 540 541 /** 542 * filemap_write_and_wait_range - write out & wait on a file range 543 * @mapping: the address_space for the pages 544 * @lstart: offset in bytes where the range starts 545 * @lend: offset in bytes where the range ends (inclusive) 546 * 547 * Write out and wait upon file offsets lstart->lend, inclusive. 548 * 549 * Note that @lend is inclusive (describes the last byte to be written) so 550 * that this function can be used to write to the very end-of-file (end = -1). 551 */ 552 int filemap_write_and_wait_range(struct address_space *mapping, 553 loff_t lstart, loff_t lend) 554 { 555 int err = 0; 556 557 if (mapping_needs_writeback(mapping)) { 558 err = __filemap_fdatawrite_range(mapping, lstart, lend, 559 WB_SYNC_ALL); 560 /* See comment of filemap_write_and_wait() */ 561 if (err != -EIO) { 562 int err2 = filemap_fdatawait_range(mapping, 563 lstart, lend); 564 if (!err) 565 err = err2; 566 } else { 567 /* Clear any previously stored errors */ 568 filemap_check_errors(mapping); 569 } 570 } else { 571 err = filemap_check_errors(mapping); 572 } 573 return err; 574 } 575 EXPORT_SYMBOL(filemap_write_and_wait_range); 576 577 void __filemap_set_wb_err(struct address_space *mapping, int err) 578 { 579 errseq_t eseq = errseq_set(&mapping->wb_err, err); 580 581 trace_filemap_set_wb_err(mapping, eseq); 582 } 583 EXPORT_SYMBOL(__filemap_set_wb_err); 584 585 /** 586 * file_check_and_advance_wb_err - report wb error (if any) that was previously 587 * and advance wb_err to current one 588 * @file: struct file on which the error is being reported 589 * 590 * When userland calls fsync (or something like nfsd does the equivalent), we 591 * want to report any writeback errors that occurred since the last fsync (or 592 * since the file was opened if there haven't been any). 593 * 594 * Grab the wb_err from the mapping. If it matches what we have in the file, 595 * then just quickly return 0. The file is all caught up. 596 * 597 * If it doesn't match, then take the mapping value, set the "seen" flag in 598 * it and try to swap it into place. If it works, or another task beat us 599 * to it with the new value, then update the f_wb_err and return the error 600 * portion. The error at this point must be reported via proper channels 601 * (a'la fsync, or NFS COMMIT operation, etc.). 602 * 603 * While we handle mapping->wb_err with atomic operations, the f_wb_err 604 * value is protected by the f_lock since we must ensure that it reflects 605 * the latest value swapped in for this file descriptor. 606 */ 607 int file_check_and_advance_wb_err(struct file *file) 608 { 609 int err = 0; 610 errseq_t old = READ_ONCE(file->f_wb_err); 611 struct address_space *mapping = file->f_mapping; 612 613 /* Locklessly handle the common case where nothing has changed */ 614 if (errseq_check(&mapping->wb_err, old)) { 615 /* Something changed, must use slow path */ 616 spin_lock(&file->f_lock); 617 old = file->f_wb_err; 618 err = errseq_check_and_advance(&mapping->wb_err, 619 &file->f_wb_err); 620 trace_file_check_and_advance_wb_err(file, old); 621 spin_unlock(&file->f_lock); 622 } 623 return err; 624 } 625 EXPORT_SYMBOL(file_check_and_advance_wb_err); 626 627 /** 628 * file_write_and_wait_range - write out & wait on a file range 629 * @file: file pointing to address_space with pages 630 * @lstart: offset in bytes where the range starts 631 * @lend: offset in bytes where the range ends (inclusive) 632 * 633 * Write out and wait upon file offsets lstart->lend, inclusive. 634 * 635 * Note that @lend is inclusive (describes the last byte to be written) so 636 * that this function can be used to write to the very end-of-file (end = -1). 637 * 638 * After writing out and waiting on the data, we check and advance the 639 * f_wb_err cursor to the latest value, and return any errors detected there. 640 */ 641 int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend) 642 { 643 int err = 0, err2; 644 struct address_space *mapping = file->f_mapping; 645 646 if (mapping_needs_writeback(mapping)) { 647 err = __filemap_fdatawrite_range(mapping, lstart, lend, 648 WB_SYNC_ALL); 649 /* See comment of filemap_write_and_wait() */ 650 if (err != -EIO) 651 __filemap_fdatawait_range(mapping, lstart, lend); 652 } 653 err2 = file_check_and_advance_wb_err(file); 654 if (!err) 655 err = err2; 656 return err; 657 } 658 EXPORT_SYMBOL(file_write_and_wait_range); 659 660 /** 661 * replace_page_cache_page - replace a pagecache page with a new one 662 * @old: page to be replaced 663 * @new: page to replace with 664 * @gfp_mask: allocation mode 665 * 666 * This function replaces a page in the pagecache with a new one. On 667 * success it acquires the pagecache reference for the new page and 668 * drops it for the old page. Both the old and new pages must be 669 * locked. This function does not add the new page to the LRU, the 670 * caller must do that. 671 * 672 * The remove + add is atomic. The only way this function can fail is 673 * memory allocation failure. 674 */ 675 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) 676 { 677 int error; 678 679 VM_BUG_ON_PAGE(!PageLocked(old), old); 680 VM_BUG_ON_PAGE(!PageLocked(new), new); 681 VM_BUG_ON_PAGE(new->mapping, new); 682 683 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); 684 if (!error) { 685 struct address_space *mapping = old->mapping; 686 void (*freepage)(struct page *); 687 unsigned long flags; 688 689 pgoff_t offset = old->index; 690 freepage = mapping->a_ops->freepage; 691 692 get_page(new); 693 new->mapping = mapping; 694 new->index = offset; 695 696 spin_lock_irqsave(&mapping->tree_lock, flags); 697 __delete_from_page_cache(old, NULL); 698 error = page_cache_tree_insert(mapping, new, NULL); 699 BUG_ON(error); 700 701 /* 702 * hugetlb pages do not participate in page cache accounting. 703 */ 704 if (!PageHuge(new)) 705 __inc_node_page_state(new, NR_FILE_PAGES); 706 if (PageSwapBacked(new)) 707 __inc_node_page_state(new, NR_SHMEM); 708 spin_unlock_irqrestore(&mapping->tree_lock, flags); 709 mem_cgroup_migrate(old, new); 710 radix_tree_preload_end(); 711 if (freepage) 712 freepage(old); 713 put_page(old); 714 } 715 716 return error; 717 } 718 EXPORT_SYMBOL_GPL(replace_page_cache_page); 719 720 static int __add_to_page_cache_locked(struct page *page, 721 struct address_space *mapping, 722 pgoff_t offset, gfp_t gfp_mask, 723 void **shadowp) 724 { 725 int huge = PageHuge(page); 726 struct mem_cgroup *memcg; 727 int error; 728 729 VM_BUG_ON_PAGE(!PageLocked(page), page); 730 VM_BUG_ON_PAGE(PageSwapBacked(page), page); 731 732 if (!huge) { 733 error = mem_cgroup_try_charge(page, current->mm, 734 gfp_mask, &memcg, false); 735 if (error) 736 return error; 737 } 738 739 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); 740 if (error) { 741 if (!huge) 742 mem_cgroup_cancel_charge(page, memcg, false); 743 return error; 744 } 745 746 get_page(page); 747 page->mapping = mapping; 748 page->index = offset; 749 750 spin_lock_irq(&mapping->tree_lock); 751 error = page_cache_tree_insert(mapping, page, shadowp); 752 radix_tree_preload_end(); 753 if (unlikely(error)) 754 goto err_insert; 755 756 /* hugetlb pages do not participate in page cache accounting. */ 757 if (!huge) 758 __inc_node_page_state(page, NR_FILE_PAGES); 759 spin_unlock_irq(&mapping->tree_lock); 760 if (!huge) 761 mem_cgroup_commit_charge(page, memcg, false, false); 762 trace_mm_filemap_add_to_page_cache(page); 763 return 0; 764 err_insert: 765 page->mapping = NULL; 766 /* Leave page->index set: truncation relies upon it */ 767 spin_unlock_irq(&mapping->tree_lock); 768 if (!huge) 769 mem_cgroup_cancel_charge(page, memcg, false); 770 put_page(page); 771 return error; 772 } 773 774 /** 775 * add_to_page_cache_locked - add a locked page to the pagecache 776 * @page: page to add 777 * @mapping: the page's address_space 778 * @offset: page index 779 * @gfp_mask: page allocation mode 780 * 781 * This function is used to add a page to the pagecache. It must be locked. 782 * This function does not add the page to the LRU. The caller must do that. 783 */ 784 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, 785 pgoff_t offset, gfp_t gfp_mask) 786 { 787 return __add_to_page_cache_locked(page, mapping, offset, 788 gfp_mask, NULL); 789 } 790 EXPORT_SYMBOL(add_to_page_cache_locked); 791 792 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 793 pgoff_t offset, gfp_t gfp_mask) 794 { 795 void *shadow = NULL; 796 int ret; 797 798 __SetPageLocked(page); 799 ret = __add_to_page_cache_locked(page, mapping, offset, 800 gfp_mask, &shadow); 801 if (unlikely(ret)) 802 __ClearPageLocked(page); 803 else { 804 /* 805 * The page might have been evicted from cache only 806 * recently, in which case it should be activated like 807 * any other repeatedly accessed page. 808 * The exception is pages getting rewritten; evicting other 809 * data from the working set, only to cache data that will 810 * get overwritten with something else, is a waste of memory. 811 */ 812 if (!(gfp_mask & __GFP_WRITE) && 813 shadow && workingset_refault(shadow)) { 814 SetPageActive(page); 815 workingset_activation(page); 816 } else 817 ClearPageActive(page); 818 lru_cache_add(page); 819 } 820 return ret; 821 } 822 EXPORT_SYMBOL_GPL(add_to_page_cache_lru); 823 824 #ifdef CONFIG_NUMA 825 struct page *__page_cache_alloc(gfp_t gfp) 826 { 827 int n; 828 struct page *page; 829 830 if (cpuset_do_page_mem_spread()) { 831 unsigned int cpuset_mems_cookie; 832 do { 833 cpuset_mems_cookie = read_mems_allowed_begin(); 834 n = cpuset_mem_spread_node(); 835 page = __alloc_pages_node(n, gfp, 0); 836 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); 837 838 return page; 839 } 840 return alloc_pages(gfp, 0); 841 } 842 EXPORT_SYMBOL(__page_cache_alloc); 843 #endif 844 845 /* 846 * In order to wait for pages to become available there must be 847 * waitqueues associated with pages. By using a hash table of 848 * waitqueues where the bucket discipline is to maintain all 849 * waiters on the same queue and wake all when any of the pages 850 * become available, and for the woken contexts to check to be 851 * sure the appropriate page became available, this saves space 852 * at a cost of "thundering herd" phenomena during rare hash 853 * collisions. 854 */ 855 #define PAGE_WAIT_TABLE_BITS 8 856 #define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS) 857 static wait_queue_head_t page_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned; 858 859 static wait_queue_head_t *page_waitqueue(struct page *page) 860 { 861 return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)]; 862 } 863 864 void __init pagecache_init(void) 865 { 866 int i; 867 868 for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++) 869 init_waitqueue_head(&page_wait_table[i]); 870 871 page_writeback_init(); 872 } 873 874 /* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */ 875 struct wait_page_key { 876 struct page *page; 877 int bit_nr; 878 int page_match; 879 }; 880 881 struct wait_page_queue { 882 struct page *page; 883 int bit_nr; 884 wait_queue_entry_t wait; 885 }; 886 887 static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg) 888 { 889 struct wait_page_key *key = arg; 890 struct wait_page_queue *wait_page 891 = container_of(wait, struct wait_page_queue, wait); 892 893 if (wait_page->page != key->page) 894 return 0; 895 key->page_match = 1; 896 897 if (wait_page->bit_nr != key->bit_nr) 898 return 0; 899 900 /* Stop walking if it's locked */ 901 if (test_bit(key->bit_nr, &key->page->flags)) 902 return -1; 903 904 return autoremove_wake_function(wait, mode, sync, key); 905 } 906 907 static void wake_up_page_bit(struct page *page, int bit_nr) 908 { 909 wait_queue_head_t *q = page_waitqueue(page); 910 struct wait_page_key key; 911 unsigned long flags; 912 913 key.page = page; 914 key.bit_nr = bit_nr; 915 key.page_match = 0; 916 917 spin_lock_irqsave(&q->lock, flags); 918 __wake_up_locked_key(q, TASK_NORMAL, &key); 919 /* 920 * It is possible for other pages to have collided on the waitqueue 921 * hash, so in that case check for a page match. That prevents a long- 922 * term waiter 923 * 924 * It is still possible to miss a case here, when we woke page waiters 925 * and removed them from the waitqueue, but there are still other 926 * page waiters. 927 */ 928 if (!waitqueue_active(q) || !key.page_match) { 929 ClearPageWaiters(page); 930 /* 931 * It's possible to miss clearing Waiters here, when we woke 932 * our page waiters, but the hashed waitqueue has waiters for 933 * other pages on it. 934 * 935 * That's okay, it's a rare case. The next waker will clear it. 936 */ 937 } 938 spin_unlock_irqrestore(&q->lock, flags); 939 } 940 941 static void wake_up_page(struct page *page, int bit) 942 { 943 if (!PageWaiters(page)) 944 return; 945 wake_up_page_bit(page, bit); 946 } 947 948 static inline int wait_on_page_bit_common(wait_queue_head_t *q, 949 struct page *page, int bit_nr, int state, bool lock) 950 { 951 struct wait_page_queue wait_page; 952 wait_queue_entry_t *wait = &wait_page.wait; 953 int ret = 0; 954 955 init_wait(wait); 956 wait->flags = lock ? WQ_FLAG_EXCLUSIVE : 0; 957 wait->func = wake_page_function; 958 wait_page.page = page; 959 wait_page.bit_nr = bit_nr; 960 961 for (;;) { 962 spin_lock_irq(&q->lock); 963 964 if (likely(list_empty(&wait->entry))) { 965 __add_wait_queue_entry_tail(q, wait); 966 SetPageWaiters(page); 967 } 968 969 set_current_state(state); 970 971 spin_unlock_irq(&q->lock); 972 973 if (likely(test_bit(bit_nr, &page->flags))) { 974 io_schedule(); 975 } 976 977 if (lock) { 978 if (!test_and_set_bit_lock(bit_nr, &page->flags)) 979 break; 980 } else { 981 if (!test_bit(bit_nr, &page->flags)) 982 break; 983 } 984 985 if (unlikely(signal_pending_state(state, current))) { 986 ret = -EINTR; 987 break; 988 } 989 } 990 991 finish_wait(q, wait); 992 993 /* 994 * A signal could leave PageWaiters set. Clearing it here if 995 * !waitqueue_active would be possible (by open-coding finish_wait), 996 * but still fail to catch it in the case of wait hash collision. We 997 * already can fail to clear wait hash collision cases, so don't 998 * bother with signals either. 999 */ 1000 1001 return ret; 1002 } 1003 1004 void wait_on_page_bit(struct page *page, int bit_nr) 1005 { 1006 wait_queue_head_t *q = page_waitqueue(page); 1007 wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, false); 1008 } 1009 EXPORT_SYMBOL(wait_on_page_bit); 1010 1011 int wait_on_page_bit_killable(struct page *page, int bit_nr) 1012 { 1013 wait_queue_head_t *q = page_waitqueue(page); 1014 return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, false); 1015 } 1016 1017 /** 1018 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue 1019 * @page: Page defining the wait queue of interest 1020 * @waiter: Waiter to add to the queue 1021 * 1022 * Add an arbitrary @waiter to the wait queue for the nominated @page. 1023 */ 1024 void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter) 1025 { 1026 wait_queue_head_t *q = page_waitqueue(page); 1027 unsigned long flags; 1028 1029 spin_lock_irqsave(&q->lock, flags); 1030 __add_wait_queue_entry_tail(q, waiter); 1031 SetPageWaiters(page); 1032 spin_unlock_irqrestore(&q->lock, flags); 1033 } 1034 EXPORT_SYMBOL_GPL(add_page_wait_queue); 1035 1036 #ifndef clear_bit_unlock_is_negative_byte 1037 1038 /* 1039 * PG_waiters is the high bit in the same byte as PG_lock. 1040 * 1041 * On x86 (and on many other architectures), we can clear PG_lock and 1042 * test the sign bit at the same time. But if the architecture does 1043 * not support that special operation, we just do this all by hand 1044 * instead. 1045 * 1046 * The read of PG_waiters has to be after (or concurrently with) PG_locked 1047 * being cleared, but a memory barrier should be unneccssary since it is 1048 * in the same byte as PG_locked. 1049 */ 1050 static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem) 1051 { 1052 clear_bit_unlock(nr, mem); 1053 /* smp_mb__after_atomic(); */ 1054 return test_bit(PG_waiters, mem); 1055 } 1056 1057 #endif 1058 1059 /** 1060 * unlock_page - unlock a locked page 1061 * @page: the page 1062 * 1063 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). 1064 * Also wakes sleepers in wait_on_page_writeback() because the wakeup 1065 * mechanism between PageLocked pages and PageWriteback pages is shared. 1066 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. 1067 * 1068 * Note that this depends on PG_waiters being the sign bit in the byte 1069 * that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to 1070 * clear the PG_locked bit and test PG_waiters at the same time fairly 1071 * portably (architectures that do LL/SC can test any bit, while x86 can 1072 * test the sign bit). 1073 */ 1074 void unlock_page(struct page *page) 1075 { 1076 BUILD_BUG_ON(PG_waiters != 7); 1077 page = compound_head(page); 1078 VM_BUG_ON_PAGE(!PageLocked(page), page); 1079 if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags)) 1080 wake_up_page_bit(page, PG_locked); 1081 } 1082 EXPORT_SYMBOL(unlock_page); 1083 1084 /** 1085 * end_page_writeback - end writeback against a page 1086 * @page: the page 1087 */ 1088 void end_page_writeback(struct page *page) 1089 { 1090 /* 1091 * TestClearPageReclaim could be used here but it is an atomic 1092 * operation and overkill in this particular case. Failing to 1093 * shuffle a page marked for immediate reclaim is too mild to 1094 * justify taking an atomic operation penalty at the end of 1095 * ever page writeback. 1096 */ 1097 if (PageReclaim(page)) { 1098 ClearPageReclaim(page); 1099 rotate_reclaimable_page(page); 1100 } 1101 1102 if (!test_clear_page_writeback(page)) 1103 BUG(); 1104 1105 smp_mb__after_atomic(); 1106 wake_up_page(page, PG_writeback); 1107 } 1108 EXPORT_SYMBOL(end_page_writeback); 1109 1110 /* 1111 * After completing I/O on a page, call this routine to update the page 1112 * flags appropriately 1113 */ 1114 void page_endio(struct page *page, bool is_write, int err) 1115 { 1116 if (!is_write) { 1117 if (!err) { 1118 SetPageUptodate(page); 1119 } else { 1120 ClearPageUptodate(page); 1121 SetPageError(page); 1122 } 1123 unlock_page(page); 1124 } else { 1125 if (err) { 1126 struct address_space *mapping; 1127 1128 SetPageError(page); 1129 mapping = page_mapping(page); 1130 if (mapping) 1131 mapping_set_error(mapping, err); 1132 } 1133 end_page_writeback(page); 1134 } 1135 } 1136 EXPORT_SYMBOL_GPL(page_endio); 1137 1138 /** 1139 * __lock_page - get a lock on the page, assuming we need to sleep to get it 1140 * @__page: the page to lock 1141 */ 1142 void __lock_page(struct page *__page) 1143 { 1144 struct page *page = compound_head(__page); 1145 wait_queue_head_t *q = page_waitqueue(page); 1146 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, true); 1147 } 1148 EXPORT_SYMBOL(__lock_page); 1149 1150 int __lock_page_killable(struct page *__page) 1151 { 1152 struct page *page = compound_head(__page); 1153 wait_queue_head_t *q = page_waitqueue(page); 1154 return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE, true); 1155 } 1156 EXPORT_SYMBOL_GPL(__lock_page_killable); 1157 1158 /* 1159 * Return values: 1160 * 1 - page is locked; mmap_sem is still held. 1161 * 0 - page is not locked. 1162 * mmap_sem has been released (up_read()), unless flags had both 1163 * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in 1164 * which case mmap_sem is still held. 1165 * 1166 * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1 1167 * with the page locked and the mmap_sem unperturbed. 1168 */ 1169 int __lock_page_or_retry(struct page *page, struct mm_struct *mm, 1170 unsigned int flags) 1171 { 1172 if (flags & FAULT_FLAG_ALLOW_RETRY) { 1173 /* 1174 * CAUTION! In this case, mmap_sem is not released 1175 * even though return 0. 1176 */ 1177 if (flags & FAULT_FLAG_RETRY_NOWAIT) 1178 return 0; 1179 1180 up_read(&mm->mmap_sem); 1181 if (flags & FAULT_FLAG_KILLABLE) 1182 wait_on_page_locked_killable(page); 1183 else 1184 wait_on_page_locked(page); 1185 return 0; 1186 } else { 1187 if (flags & FAULT_FLAG_KILLABLE) { 1188 int ret; 1189 1190 ret = __lock_page_killable(page); 1191 if (ret) { 1192 up_read(&mm->mmap_sem); 1193 return 0; 1194 } 1195 } else 1196 __lock_page(page); 1197 return 1; 1198 } 1199 } 1200 1201 /** 1202 * page_cache_next_hole - find the next hole (not-present entry) 1203 * @mapping: mapping 1204 * @index: index 1205 * @max_scan: maximum range to search 1206 * 1207 * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the 1208 * lowest indexed hole. 1209 * 1210 * Returns: the index of the hole if found, otherwise returns an index 1211 * outside of the set specified (in which case 'return - index >= 1212 * max_scan' will be true). In rare cases of index wrap-around, 0 will 1213 * be returned. 1214 * 1215 * page_cache_next_hole may be called under rcu_read_lock. However, 1216 * like radix_tree_gang_lookup, this will not atomically search a 1217 * snapshot of the tree at a single point in time. For example, if a 1218 * hole is created at index 5, then subsequently a hole is created at 1219 * index 10, page_cache_next_hole covering both indexes may return 10 1220 * if called under rcu_read_lock. 1221 */ 1222 pgoff_t page_cache_next_hole(struct address_space *mapping, 1223 pgoff_t index, unsigned long max_scan) 1224 { 1225 unsigned long i; 1226 1227 for (i = 0; i < max_scan; i++) { 1228 struct page *page; 1229 1230 page = radix_tree_lookup(&mapping->page_tree, index); 1231 if (!page || radix_tree_exceptional_entry(page)) 1232 break; 1233 index++; 1234 if (index == 0) 1235 break; 1236 } 1237 1238 return index; 1239 } 1240 EXPORT_SYMBOL(page_cache_next_hole); 1241 1242 /** 1243 * page_cache_prev_hole - find the prev hole (not-present entry) 1244 * @mapping: mapping 1245 * @index: index 1246 * @max_scan: maximum range to search 1247 * 1248 * Search backwards in the range [max(index-max_scan+1, 0), index] for 1249 * the first hole. 1250 * 1251 * Returns: the index of the hole if found, otherwise returns an index 1252 * outside of the set specified (in which case 'index - return >= 1253 * max_scan' will be true). In rare cases of wrap-around, ULONG_MAX 1254 * will be returned. 1255 * 1256 * page_cache_prev_hole may be called under rcu_read_lock. However, 1257 * like radix_tree_gang_lookup, this will not atomically search a 1258 * snapshot of the tree at a single point in time. For example, if a 1259 * hole is created at index 10, then subsequently a hole is created at 1260 * index 5, page_cache_prev_hole covering both indexes may return 5 if 1261 * called under rcu_read_lock. 1262 */ 1263 pgoff_t page_cache_prev_hole(struct address_space *mapping, 1264 pgoff_t index, unsigned long max_scan) 1265 { 1266 unsigned long i; 1267 1268 for (i = 0; i < max_scan; i++) { 1269 struct page *page; 1270 1271 page = radix_tree_lookup(&mapping->page_tree, index); 1272 if (!page || radix_tree_exceptional_entry(page)) 1273 break; 1274 index--; 1275 if (index == ULONG_MAX) 1276 break; 1277 } 1278 1279 return index; 1280 } 1281 EXPORT_SYMBOL(page_cache_prev_hole); 1282 1283 /** 1284 * find_get_entry - find and get a page cache entry 1285 * @mapping: the address_space to search 1286 * @offset: the page cache index 1287 * 1288 * Looks up the page cache slot at @mapping & @offset. If there is a 1289 * page cache page, it is returned with an increased refcount. 1290 * 1291 * If the slot holds a shadow entry of a previously evicted page, or a 1292 * swap entry from shmem/tmpfs, it is returned. 1293 * 1294 * Otherwise, %NULL is returned. 1295 */ 1296 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) 1297 { 1298 void **pagep; 1299 struct page *head, *page; 1300 1301 rcu_read_lock(); 1302 repeat: 1303 page = NULL; 1304 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); 1305 if (pagep) { 1306 page = radix_tree_deref_slot(pagep); 1307 if (unlikely(!page)) 1308 goto out; 1309 if (radix_tree_exception(page)) { 1310 if (radix_tree_deref_retry(page)) 1311 goto repeat; 1312 /* 1313 * A shadow entry of a recently evicted page, 1314 * or a swap entry from shmem/tmpfs. Return 1315 * it without attempting to raise page count. 1316 */ 1317 goto out; 1318 } 1319 1320 head = compound_head(page); 1321 if (!page_cache_get_speculative(head)) 1322 goto repeat; 1323 1324 /* The page was split under us? */ 1325 if (compound_head(page) != head) { 1326 put_page(head); 1327 goto repeat; 1328 } 1329 1330 /* 1331 * Has the page moved? 1332 * This is part of the lockless pagecache protocol. See 1333 * include/linux/pagemap.h for details. 1334 */ 1335 if (unlikely(page != *pagep)) { 1336 put_page(head); 1337 goto repeat; 1338 } 1339 } 1340 out: 1341 rcu_read_unlock(); 1342 1343 return page; 1344 } 1345 EXPORT_SYMBOL(find_get_entry); 1346 1347 /** 1348 * find_lock_entry - locate, pin and lock a page cache entry 1349 * @mapping: the address_space to search 1350 * @offset: the page cache index 1351 * 1352 * Looks up the page cache slot at @mapping & @offset. If there is a 1353 * page cache page, it is returned locked and with an increased 1354 * refcount. 1355 * 1356 * If the slot holds a shadow entry of a previously evicted page, or a 1357 * swap entry from shmem/tmpfs, it is returned. 1358 * 1359 * Otherwise, %NULL is returned. 1360 * 1361 * find_lock_entry() may sleep. 1362 */ 1363 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset) 1364 { 1365 struct page *page; 1366 1367 repeat: 1368 page = find_get_entry(mapping, offset); 1369 if (page && !radix_tree_exception(page)) { 1370 lock_page(page); 1371 /* Has the page been truncated? */ 1372 if (unlikely(page_mapping(page) != mapping)) { 1373 unlock_page(page); 1374 put_page(page); 1375 goto repeat; 1376 } 1377 VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page); 1378 } 1379 return page; 1380 } 1381 EXPORT_SYMBOL(find_lock_entry); 1382 1383 /** 1384 * pagecache_get_page - find and get a page reference 1385 * @mapping: the address_space to search 1386 * @offset: the page index 1387 * @fgp_flags: PCG flags 1388 * @gfp_mask: gfp mask to use for the page cache data page allocation 1389 * 1390 * Looks up the page cache slot at @mapping & @offset. 1391 * 1392 * PCG flags modify how the page is returned. 1393 * 1394 * @fgp_flags can be: 1395 * 1396 * - FGP_ACCESSED: the page will be marked accessed 1397 * - FGP_LOCK: Page is return locked 1398 * - FGP_CREAT: If page is not present then a new page is allocated using 1399 * @gfp_mask and added to the page cache and the VM's LRU 1400 * list. The page is returned locked and with an increased 1401 * refcount. Otherwise, NULL is returned. 1402 * 1403 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even 1404 * if the GFP flags specified for FGP_CREAT are atomic. 1405 * 1406 * If there is a page cache page, it is returned with an increased refcount. 1407 */ 1408 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, 1409 int fgp_flags, gfp_t gfp_mask) 1410 { 1411 struct page *page; 1412 1413 repeat: 1414 page = find_get_entry(mapping, offset); 1415 if (radix_tree_exceptional_entry(page)) 1416 page = NULL; 1417 if (!page) 1418 goto no_page; 1419 1420 if (fgp_flags & FGP_LOCK) { 1421 if (fgp_flags & FGP_NOWAIT) { 1422 if (!trylock_page(page)) { 1423 put_page(page); 1424 return NULL; 1425 } 1426 } else { 1427 lock_page(page); 1428 } 1429 1430 /* Has the page been truncated? */ 1431 if (unlikely(page->mapping != mapping)) { 1432 unlock_page(page); 1433 put_page(page); 1434 goto repeat; 1435 } 1436 VM_BUG_ON_PAGE(page->index != offset, page); 1437 } 1438 1439 if (page && (fgp_flags & FGP_ACCESSED)) 1440 mark_page_accessed(page); 1441 1442 no_page: 1443 if (!page && (fgp_flags & FGP_CREAT)) { 1444 int err; 1445 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping)) 1446 gfp_mask |= __GFP_WRITE; 1447 if (fgp_flags & FGP_NOFS) 1448 gfp_mask &= ~__GFP_FS; 1449 1450 page = __page_cache_alloc(gfp_mask); 1451 if (!page) 1452 return NULL; 1453 1454 if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK))) 1455 fgp_flags |= FGP_LOCK; 1456 1457 /* Init accessed so avoid atomic mark_page_accessed later */ 1458 if (fgp_flags & FGP_ACCESSED) 1459 __SetPageReferenced(page); 1460 1461 err = add_to_page_cache_lru(page, mapping, offset, 1462 gfp_mask & GFP_RECLAIM_MASK); 1463 if (unlikely(err)) { 1464 put_page(page); 1465 page = NULL; 1466 if (err == -EEXIST) 1467 goto repeat; 1468 } 1469 } 1470 1471 return page; 1472 } 1473 EXPORT_SYMBOL(pagecache_get_page); 1474 1475 /** 1476 * find_get_entries - gang pagecache lookup 1477 * @mapping: The address_space to search 1478 * @start: The starting page cache index 1479 * @nr_entries: The maximum number of entries 1480 * @entries: Where the resulting entries are placed 1481 * @indices: The cache indices corresponding to the entries in @entries 1482 * 1483 * find_get_entries() will search for and return a group of up to 1484 * @nr_entries entries in the mapping. The entries are placed at 1485 * @entries. find_get_entries() takes a reference against any actual 1486 * pages it returns. 1487 * 1488 * The search returns a group of mapping-contiguous page cache entries 1489 * with ascending indexes. There may be holes in the indices due to 1490 * not-present pages. 1491 * 1492 * Any shadow entries of evicted pages, or swap entries from 1493 * shmem/tmpfs, are included in the returned array. 1494 * 1495 * find_get_entries() returns the number of pages and shadow entries 1496 * which were found. 1497 */ 1498 unsigned find_get_entries(struct address_space *mapping, 1499 pgoff_t start, unsigned int nr_entries, 1500 struct page **entries, pgoff_t *indices) 1501 { 1502 void **slot; 1503 unsigned int ret = 0; 1504 struct radix_tree_iter iter; 1505 1506 if (!nr_entries) 1507 return 0; 1508 1509 rcu_read_lock(); 1510 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 1511 struct page *head, *page; 1512 repeat: 1513 page = radix_tree_deref_slot(slot); 1514 if (unlikely(!page)) 1515 continue; 1516 if (radix_tree_exception(page)) { 1517 if (radix_tree_deref_retry(page)) { 1518 slot = radix_tree_iter_retry(&iter); 1519 continue; 1520 } 1521 /* 1522 * A shadow entry of a recently evicted page, a swap 1523 * entry from shmem/tmpfs or a DAX entry. Return it 1524 * without attempting to raise page count. 1525 */ 1526 goto export; 1527 } 1528 1529 head = compound_head(page); 1530 if (!page_cache_get_speculative(head)) 1531 goto repeat; 1532 1533 /* The page was split under us? */ 1534 if (compound_head(page) != head) { 1535 put_page(head); 1536 goto repeat; 1537 } 1538 1539 /* Has the page moved? */ 1540 if (unlikely(page != *slot)) { 1541 put_page(head); 1542 goto repeat; 1543 } 1544 export: 1545 indices[ret] = iter.index; 1546 entries[ret] = page; 1547 if (++ret == nr_entries) 1548 break; 1549 } 1550 rcu_read_unlock(); 1551 return ret; 1552 } 1553 1554 /** 1555 * find_get_pages_range - gang pagecache lookup 1556 * @mapping: The address_space to search 1557 * @start: The starting page index 1558 * @end: The final page index (inclusive) 1559 * @nr_pages: The maximum number of pages 1560 * @pages: Where the resulting pages are placed 1561 * 1562 * find_get_pages_range() will search for and return a group of up to @nr_pages 1563 * pages in the mapping starting at index @start and up to index @end 1564 * (inclusive). The pages are placed at @pages. find_get_pages_range() takes 1565 * a reference against the returned pages. 1566 * 1567 * The search returns a group of mapping-contiguous pages with ascending 1568 * indexes. There may be holes in the indices due to not-present pages. 1569 * We also update @start to index the next page for the traversal. 1570 * 1571 * find_get_pages_range() returns the number of pages which were found. If this 1572 * number is smaller than @nr_pages, the end of specified range has been 1573 * reached. 1574 */ 1575 unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, 1576 pgoff_t end, unsigned int nr_pages, 1577 struct page **pages) 1578 { 1579 struct radix_tree_iter iter; 1580 void **slot; 1581 unsigned ret = 0; 1582 1583 if (unlikely(!nr_pages)) 1584 return 0; 1585 1586 rcu_read_lock(); 1587 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, *start) { 1588 struct page *head, *page; 1589 1590 if (iter.index > end) 1591 break; 1592 repeat: 1593 page = radix_tree_deref_slot(slot); 1594 if (unlikely(!page)) 1595 continue; 1596 1597 if (radix_tree_exception(page)) { 1598 if (radix_tree_deref_retry(page)) { 1599 slot = radix_tree_iter_retry(&iter); 1600 continue; 1601 } 1602 /* 1603 * A shadow entry of a recently evicted page, 1604 * or a swap entry from shmem/tmpfs. Skip 1605 * over it. 1606 */ 1607 continue; 1608 } 1609 1610 head = compound_head(page); 1611 if (!page_cache_get_speculative(head)) 1612 goto repeat; 1613 1614 /* The page was split under us? */ 1615 if (compound_head(page) != head) { 1616 put_page(head); 1617 goto repeat; 1618 } 1619 1620 /* Has the page moved? */ 1621 if (unlikely(page != *slot)) { 1622 put_page(head); 1623 goto repeat; 1624 } 1625 1626 pages[ret] = page; 1627 if (++ret == nr_pages) { 1628 *start = pages[ret - 1]->index + 1; 1629 goto out; 1630 } 1631 } 1632 1633 /* 1634 * We come here when there is no page beyond @end. We take care to not 1635 * overflow the index @start as it confuses some of the callers. This 1636 * breaks the iteration when there is page at index -1 but that is 1637 * already broken anyway. 1638 */ 1639 if (end == (pgoff_t)-1) 1640 *start = (pgoff_t)-1; 1641 else 1642 *start = end + 1; 1643 out: 1644 rcu_read_unlock(); 1645 1646 return ret; 1647 } 1648 1649 /** 1650 * find_get_pages_contig - gang contiguous pagecache lookup 1651 * @mapping: The address_space to search 1652 * @index: The starting page index 1653 * @nr_pages: The maximum number of pages 1654 * @pages: Where the resulting pages are placed 1655 * 1656 * find_get_pages_contig() works exactly like find_get_pages(), except 1657 * that the returned number of pages are guaranteed to be contiguous. 1658 * 1659 * find_get_pages_contig() returns the number of pages which were found. 1660 */ 1661 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, 1662 unsigned int nr_pages, struct page **pages) 1663 { 1664 struct radix_tree_iter iter; 1665 void **slot; 1666 unsigned int ret = 0; 1667 1668 if (unlikely(!nr_pages)) 1669 return 0; 1670 1671 rcu_read_lock(); 1672 radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) { 1673 struct page *head, *page; 1674 repeat: 1675 page = radix_tree_deref_slot(slot); 1676 /* The hole, there no reason to continue */ 1677 if (unlikely(!page)) 1678 break; 1679 1680 if (radix_tree_exception(page)) { 1681 if (radix_tree_deref_retry(page)) { 1682 slot = radix_tree_iter_retry(&iter); 1683 continue; 1684 } 1685 /* 1686 * A shadow entry of a recently evicted page, 1687 * or a swap entry from shmem/tmpfs. Stop 1688 * looking for contiguous pages. 1689 */ 1690 break; 1691 } 1692 1693 head = compound_head(page); 1694 if (!page_cache_get_speculative(head)) 1695 goto repeat; 1696 1697 /* The page was split under us? */ 1698 if (compound_head(page) != head) { 1699 put_page(head); 1700 goto repeat; 1701 } 1702 1703 /* Has the page moved? */ 1704 if (unlikely(page != *slot)) { 1705 put_page(head); 1706 goto repeat; 1707 } 1708 1709 /* 1710 * must check mapping and index after taking the ref. 1711 * otherwise we can get both false positives and false 1712 * negatives, which is just confusing to the caller. 1713 */ 1714 if (page->mapping == NULL || page_to_pgoff(page) != iter.index) { 1715 put_page(page); 1716 break; 1717 } 1718 1719 pages[ret] = page; 1720 if (++ret == nr_pages) 1721 break; 1722 } 1723 rcu_read_unlock(); 1724 return ret; 1725 } 1726 EXPORT_SYMBOL(find_get_pages_contig); 1727 1728 /** 1729 * find_get_pages_tag - find and return pages that match @tag 1730 * @mapping: the address_space to search 1731 * @index: the starting page index 1732 * @tag: the tag index 1733 * @nr_pages: the maximum number of pages 1734 * @pages: where the resulting pages are placed 1735 * 1736 * Like find_get_pages, except we only return pages which are tagged with 1737 * @tag. We update @index to index the next page for the traversal. 1738 */ 1739 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, 1740 int tag, unsigned int nr_pages, struct page **pages) 1741 { 1742 struct radix_tree_iter iter; 1743 void **slot; 1744 unsigned ret = 0; 1745 1746 if (unlikely(!nr_pages)) 1747 return 0; 1748 1749 rcu_read_lock(); 1750 radix_tree_for_each_tagged(slot, &mapping->page_tree, 1751 &iter, *index, tag) { 1752 struct page *head, *page; 1753 repeat: 1754 page = radix_tree_deref_slot(slot); 1755 if (unlikely(!page)) 1756 continue; 1757 1758 if (radix_tree_exception(page)) { 1759 if (radix_tree_deref_retry(page)) { 1760 slot = radix_tree_iter_retry(&iter); 1761 continue; 1762 } 1763 /* 1764 * A shadow entry of a recently evicted page. 1765 * 1766 * Those entries should never be tagged, but 1767 * this tree walk is lockless and the tags are 1768 * looked up in bulk, one radix tree node at a 1769 * time, so there is a sizable window for page 1770 * reclaim to evict a page we saw tagged. 1771 * 1772 * Skip over it. 1773 */ 1774 continue; 1775 } 1776 1777 head = compound_head(page); 1778 if (!page_cache_get_speculative(head)) 1779 goto repeat; 1780 1781 /* The page was split under us? */ 1782 if (compound_head(page) != head) { 1783 put_page(head); 1784 goto repeat; 1785 } 1786 1787 /* Has the page moved? */ 1788 if (unlikely(page != *slot)) { 1789 put_page(head); 1790 goto repeat; 1791 } 1792 1793 pages[ret] = page; 1794 if (++ret == nr_pages) 1795 break; 1796 } 1797 1798 rcu_read_unlock(); 1799 1800 if (ret) 1801 *index = pages[ret - 1]->index + 1; 1802 1803 return ret; 1804 } 1805 EXPORT_SYMBOL(find_get_pages_tag); 1806 1807 /** 1808 * find_get_entries_tag - find and return entries that match @tag 1809 * @mapping: the address_space to search 1810 * @start: the starting page cache index 1811 * @tag: the tag index 1812 * @nr_entries: the maximum number of entries 1813 * @entries: where the resulting entries are placed 1814 * @indices: the cache indices corresponding to the entries in @entries 1815 * 1816 * Like find_get_entries, except we only return entries which are tagged with 1817 * @tag. 1818 */ 1819 unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, 1820 int tag, unsigned int nr_entries, 1821 struct page **entries, pgoff_t *indices) 1822 { 1823 void **slot; 1824 unsigned int ret = 0; 1825 struct radix_tree_iter iter; 1826 1827 if (!nr_entries) 1828 return 0; 1829 1830 rcu_read_lock(); 1831 radix_tree_for_each_tagged(slot, &mapping->page_tree, 1832 &iter, start, tag) { 1833 struct page *head, *page; 1834 repeat: 1835 page = radix_tree_deref_slot(slot); 1836 if (unlikely(!page)) 1837 continue; 1838 if (radix_tree_exception(page)) { 1839 if (radix_tree_deref_retry(page)) { 1840 slot = radix_tree_iter_retry(&iter); 1841 continue; 1842 } 1843 1844 /* 1845 * A shadow entry of a recently evicted page, a swap 1846 * entry from shmem/tmpfs or a DAX entry. Return it 1847 * without attempting to raise page count. 1848 */ 1849 goto export; 1850 } 1851 1852 head = compound_head(page); 1853 if (!page_cache_get_speculative(head)) 1854 goto repeat; 1855 1856 /* The page was split under us? */ 1857 if (compound_head(page) != head) { 1858 put_page(head); 1859 goto repeat; 1860 } 1861 1862 /* Has the page moved? */ 1863 if (unlikely(page != *slot)) { 1864 put_page(head); 1865 goto repeat; 1866 } 1867 export: 1868 indices[ret] = iter.index; 1869 entries[ret] = page; 1870 if (++ret == nr_entries) 1871 break; 1872 } 1873 rcu_read_unlock(); 1874 return ret; 1875 } 1876 EXPORT_SYMBOL(find_get_entries_tag); 1877 1878 /* 1879 * CD/DVDs are error prone. When a medium error occurs, the driver may fail 1880 * a _large_ part of the i/o request. Imagine the worst scenario: 1881 * 1882 * ---R__________________________________________B__________ 1883 * ^ reading here ^ bad block(assume 4k) 1884 * 1885 * read(R) => miss => readahead(R...B) => media error => frustrating retries 1886 * => failing the whole request => read(R) => read(R+1) => 1887 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => 1888 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => 1889 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... 1890 * 1891 * It is going insane. Fix it by quickly scaling down the readahead size. 1892 */ 1893 static void shrink_readahead_size_eio(struct file *filp, 1894 struct file_ra_state *ra) 1895 { 1896 ra->ra_pages /= 4; 1897 } 1898 1899 /** 1900 * do_generic_file_read - generic file read routine 1901 * @filp: the file to read 1902 * @ppos: current file position 1903 * @iter: data destination 1904 * @written: already copied 1905 * 1906 * This is a generic file read routine, and uses the 1907 * mapping->a_ops->readpage() function for the actual low-level stuff. 1908 * 1909 * This is really ugly. But the goto's actually try to clarify some 1910 * of the logic when it comes to error handling etc. 1911 */ 1912 static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos, 1913 struct iov_iter *iter, ssize_t written) 1914 { 1915 struct address_space *mapping = filp->f_mapping; 1916 struct inode *inode = mapping->host; 1917 struct file_ra_state *ra = &filp->f_ra; 1918 pgoff_t index; 1919 pgoff_t last_index; 1920 pgoff_t prev_index; 1921 unsigned long offset; /* offset into pagecache page */ 1922 unsigned int prev_offset; 1923 int error = 0; 1924 1925 if (unlikely(*ppos >= inode->i_sb->s_maxbytes)) 1926 return 0; 1927 iov_iter_truncate(iter, inode->i_sb->s_maxbytes); 1928 1929 index = *ppos >> PAGE_SHIFT; 1930 prev_index = ra->prev_pos >> PAGE_SHIFT; 1931 prev_offset = ra->prev_pos & (PAGE_SIZE-1); 1932 last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT; 1933 offset = *ppos & ~PAGE_MASK; 1934 1935 for (;;) { 1936 struct page *page; 1937 pgoff_t end_index; 1938 loff_t isize; 1939 unsigned long nr, ret; 1940 1941 cond_resched(); 1942 find_page: 1943 if (fatal_signal_pending(current)) { 1944 error = -EINTR; 1945 goto out; 1946 } 1947 1948 page = find_get_page(mapping, index); 1949 if (!page) { 1950 page_cache_sync_readahead(mapping, 1951 ra, filp, 1952 index, last_index - index); 1953 page = find_get_page(mapping, index); 1954 if (unlikely(page == NULL)) 1955 goto no_cached_page; 1956 } 1957 if (PageReadahead(page)) { 1958 page_cache_async_readahead(mapping, 1959 ra, filp, page, 1960 index, last_index - index); 1961 } 1962 if (!PageUptodate(page)) { 1963 /* 1964 * See comment in do_read_cache_page on why 1965 * wait_on_page_locked is used to avoid unnecessarily 1966 * serialisations and why it's safe. 1967 */ 1968 error = wait_on_page_locked_killable(page); 1969 if (unlikely(error)) 1970 goto readpage_error; 1971 if (PageUptodate(page)) 1972 goto page_ok; 1973 1974 if (inode->i_blkbits == PAGE_SHIFT || 1975 !mapping->a_ops->is_partially_uptodate) 1976 goto page_not_up_to_date; 1977 /* pipes can't handle partially uptodate pages */ 1978 if (unlikely(iter->type & ITER_PIPE)) 1979 goto page_not_up_to_date; 1980 if (!trylock_page(page)) 1981 goto page_not_up_to_date; 1982 /* Did it get truncated before we got the lock? */ 1983 if (!page->mapping) 1984 goto page_not_up_to_date_locked; 1985 if (!mapping->a_ops->is_partially_uptodate(page, 1986 offset, iter->count)) 1987 goto page_not_up_to_date_locked; 1988 unlock_page(page); 1989 } 1990 page_ok: 1991 /* 1992 * i_size must be checked after we know the page is Uptodate. 1993 * 1994 * Checking i_size after the check allows us to calculate 1995 * the correct value for "nr", which means the zero-filled 1996 * part of the page is not copied back to userspace (unless 1997 * another truncate extends the file - this is desired though). 1998 */ 1999 2000 isize = i_size_read(inode); 2001 end_index = (isize - 1) >> PAGE_SHIFT; 2002 if (unlikely(!isize || index > end_index)) { 2003 put_page(page); 2004 goto out; 2005 } 2006 2007 /* nr is the maximum number of bytes to copy from this page */ 2008 nr = PAGE_SIZE; 2009 if (index == end_index) { 2010 nr = ((isize - 1) & ~PAGE_MASK) + 1; 2011 if (nr <= offset) { 2012 put_page(page); 2013 goto out; 2014 } 2015 } 2016 nr = nr - offset; 2017 2018 /* If users can be writing to this page using arbitrary 2019 * virtual addresses, take care about potential aliasing 2020 * before reading the page on the kernel side. 2021 */ 2022 if (mapping_writably_mapped(mapping)) 2023 flush_dcache_page(page); 2024 2025 /* 2026 * When a sequential read accesses a page several times, 2027 * only mark it as accessed the first time. 2028 */ 2029 if (prev_index != index || offset != prev_offset) 2030 mark_page_accessed(page); 2031 prev_index = index; 2032 2033 /* 2034 * Ok, we have the page, and it's up-to-date, so 2035 * now we can copy it to user space... 2036 */ 2037 2038 ret = copy_page_to_iter(page, offset, nr, iter); 2039 offset += ret; 2040 index += offset >> PAGE_SHIFT; 2041 offset &= ~PAGE_MASK; 2042 prev_offset = offset; 2043 2044 put_page(page); 2045 written += ret; 2046 if (!iov_iter_count(iter)) 2047 goto out; 2048 if (ret < nr) { 2049 error = -EFAULT; 2050 goto out; 2051 } 2052 continue; 2053 2054 page_not_up_to_date: 2055 /* Get exclusive access to the page ... */ 2056 error = lock_page_killable(page); 2057 if (unlikely(error)) 2058 goto readpage_error; 2059 2060 page_not_up_to_date_locked: 2061 /* Did it get truncated before we got the lock? */ 2062 if (!page->mapping) { 2063 unlock_page(page); 2064 put_page(page); 2065 continue; 2066 } 2067 2068 /* Did somebody else fill it already? */ 2069 if (PageUptodate(page)) { 2070 unlock_page(page); 2071 goto page_ok; 2072 } 2073 2074 readpage: 2075 /* 2076 * A previous I/O error may have been due to temporary 2077 * failures, eg. multipath errors. 2078 * PG_error will be set again if readpage fails. 2079 */ 2080 ClearPageError(page); 2081 /* Start the actual read. The read will unlock the page. */ 2082 error = mapping->a_ops->readpage(filp, page); 2083 2084 if (unlikely(error)) { 2085 if (error == AOP_TRUNCATED_PAGE) { 2086 put_page(page); 2087 error = 0; 2088 goto find_page; 2089 } 2090 goto readpage_error; 2091 } 2092 2093 if (!PageUptodate(page)) { 2094 error = lock_page_killable(page); 2095 if (unlikely(error)) 2096 goto readpage_error; 2097 if (!PageUptodate(page)) { 2098 if (page->mapping == NULL) { 2099 /* 2100 * invalidate_mapping_pages got it 2101 */ 2102 unlock_page(page); 2103 put_page(page); 2104 goto find_page; 2105 } 2106 unlock_page(page); 2107 shrink_readahead_size_eio(filp, ra); 2108 error = -EIO; 2109 goto readpage_error; 2110 } 2111 unlock_page(page); 2112 } 2113 2114 goto page_ok; 2115 2116 readpage_error: 2117 /* UHHUH! A synchronous read error occurred. Report it */ 2118 put_page(page); 2119 goto out; 2120 2121 no_cached_page: 2122 /* 2123 * Ok, it wasn't cached, so we need to create a new 2124 * page.. 2125 */ 2126 page = page_cache_alloc_cold(mapping); 2127 if (!page) { 2128 error = -ENOMEM; 2129 goto out; 2130 } 2131 error = add_to_page_cache_lru(page, mapping, index, 2132 mapping_gfp_constraint(mapping, GFP_KERNEL)); 2133 if (error) { 2134 put_page(page); 2135 if (error == -EEXIST) { 2136 error = 0; 2137 goto find_page; 2138 } 2139 goto out; 2140 } 2141 goto readpage; 2142 } 2143 2144 out: 2145 ra->prev_pos = prev_index; 2146 ra->prev_pos <<= PAGE_SHIFT; 2147 ra->prev_pos |= prev_offset; 2148 2149 *ppos = ((loff_t)index << PAGE_SHIFT) + offset; 2150 file_accessed(filp); 2151 return written ? written : error; 2152 } 2153 2154 /** 2155 * generic_file_read_iter - generic filesystem read routine 2156 * @iocb: kernel I/O control block 2157 * @iter: destination for the data read 2158 * 2159 * This is the "read_iter()" routine for all filesystems 2160 * that can use the page cache directly. 2161 */ 2162 ssize_t 2163 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 2164 { 2165 struct file *file = iocb->ki_filp; 2166 ssize_t retval = 0; 2167 size_t count = iov_iter_count(iter); 2168 2169 if (!count) 2170 goto out; /* skip atime */ 2171 2172 if (iocb->ki_flags & IOCB_DIRECT) { 2173 struct address_space *mapping = file->f_mapping; 2174 struct inode *inode = mapping->host; 2175 loff_t size; 2176 2177 size = i_size_read(inode); 2178 if (iocb->ki_flags & IOCB_NOWAIT) { 2179 if (filemap_range_has_page(mapping, iocb->ki_pos, 2180 iocb->ki_pos + count - 1)) 2181 return -EAGAIN; 2182 } else { 2183 retval = filemap_write_and_wait_range(mapping, 2184 iocb->ki_pos, 2185 iocb->ki_pos + count - 1); 2186 if (retval < 0) 2187 goto out; 2188 } 2189 2190 file_accessed(file); 2191 2192 retval = mapping->a_ops->direct_IO(iocb, iter); 2193 if (retval >= 0) { 2194 iocb->ki_pos += retval; 2195 count -= retval; 2196 } 2197 iov_iter_revert(iter, count - iov_iter_count(iter)); 2198 2199 /* 2200 * Btrfs can have a short DIO read if we encounter 2201 * compressed extents, so if there was an error, or if 2202 * we've already read everything we wanted to, or if 2203 * there was a short read because we hit EOF, go ahead 2204 * and return. Otherwise fallthrough to buffered io for 2205 * the rest of the read. Buffered reads will not work for 2206 * DAX files, so don't bother trying. 2207 */ 2208 if (retval < 0 || !count || iocb->ki_pos >= size || 2209 IS_DAX(inode)) 2210 goto out; 2211 } 2212 2213 retval = do_generic_file_read(file, &iocb->ki_pos, iter, retval); 2214 out: 2215 return retval; 2216 } 2217 EXPORT_SYMBOL(generic_file_read_iter); 2218 2219 #ifdef CONFIG_MMU 2220 /** 2221 * page_cache_read - adds requested page to the page cache if not already there 2222 * @file: file to read 2223 * @offset: page index 2224 * @gfp_mask: memory allocation flags 2225 * 2226 * This adds the requested page to the page cache if it isn't already there, 2227 * and schedules an I/O to read in its contents from disk. 2228 */ 2229 static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask) 2230 { 2231 struct address_space *mapping = file->f_mapping; 2232 struct page *page; 2233 int ret; 2234 2235 do { 2236 page = __page_cache_alloc(gfp_mask|__GFP_COLD); 2237 if (!page) 2238 return -ENOMEM; 2239 2240 ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_KERNEL); 2241 if (ret == 0) 2242 ret = mapping->a_ops->readpage(file, page); 2243 else if (ret == -EEXIST) 2244 ret = 0; /* losing race to add is OK */ 2245 2246 put_page(page); 2247 2248 } while (ret == AOP_TRUNCATED_PAGE); 2249 2250 return ret; 2251 } 2252 2253 #define MMAP_LOTSAMISS (100) 2254 2255 /* 2256 * Synchronous readahead happens when we don't even find 2257 * a page in the page cache at all. 2258 */ 2259 static void do_sync_mmap_readahead(struct vm_area_struct *vma, 2260 struct file_ra_state *ra, 2261 struct file *file, 2262 pgoff_t offset) 2263 { 2264 struct address_space *mapping = file->f_mapping; 2265 2266 /* If we don't want any read-ahead, don't bother */ 2267 if (vma->vm_flags & VM_RAND_READ) 2268 return; 2269 if (!ra->ra_pages) 2270 return; 2271 2272 if (vma->vm_flags & VM_SEQ_READ) { 2273 page_cache_sync_readahead(mapping, ra, file, offset, 2274 ra->ra_pages); 2275 return; 2276 } 2277 2278 /* Avoid banging the cache line if not needed */ 2279 if (ra->mmap_miss < MMAP_LOTSAMISS * 10) 2280 ra->mmap_miss++; 2281 2282 /* 2283 * Do we miss much more than hit in this file? If so, 2284 * stop bothering with read-ahead. It will only hurt. 2285 */ 2286 if (ra->mmap_miss > MMAP_LOTSAMISS) 2287 return; 2288 2289 /* 2290 * mmap read-around 2291 */ 2292 ra->start = max_t(long, 0, offset - ra->ra_pages / 2); 2293 ra->size = ra->ra_pages; 2294 ra->async_size = ra->ra_pages / 4; 2295 ra_submit(ra, mapping, file); 2296 } 2297 2298 /* 2299 * Asynchronous readahead happens when we find the page and PG_readahead, 2300 * so we want to possibly extend the readahead further.. 2301 */ 2302 static void do_async_mmap_readahead(struct vm_area_struct *vma, 2303 struct file_ra_state *ra, 2304 struct file *file, 2305 struct page *page, 2306 pgoff_t offset) 2307 { 2308 struct address_space *mapping = file->f_mapping; 2309 2310 /* If we don't want any read-ahead, don't bother */ 2311 if (vma->vm_flags & VM_RAND_READ) 2312 return; 2313 if (ra->mmap_miss > 0) 2314 ra->mmap_miss--; 2315 if (PageReadahead(page)) 2316 page_cache_async_readahead(mapping, ra, file, 2317 page, offset, ra->ra_pages); 2318 } 2319 2320 /** 2321 * filemap_fault - read in file data for page fault handling 2322 * @vmf: struct vm_fault containing details of the fault 2323 * 2324 * filemap_fault() is invoked via the vma operations vector for a 2325 * mapped memory region to read in file data during a page fault. 2326 * 2327 * The goto's are kind of ugly, but this streamlines the normal case of having 2328 * it in the page cache, and handles the special cases reasonably without 2329 * having a lot of duplicated code. 2330 * 2331 * vma->vm_mm->mmap_sem must be held on entry. 2332 * 2333 * If our return value has VM_FAULT_RETRY set, it's because 2334 * lock_page_or_retry() returned 0. 2335 * The mmap_sem has usually been released in this case. 2336 * See __lock_page_or_retry() for the exception. 2337 * 2338 * If our return value does not have VM_FAULT_RETRY set, the mmap_sem 2339 * has not been released. 2340 * 2341 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set. 2342 */ 2343 int filemap_fault(struct vm_fault *vmf) 2344 { 2345 int error; 2346 struct file *file = vmf->vma->vm_file; 2347 struct address_space *mapping = file->f_mapping; 2348 struct file_ra_state *ra = &file->f_ra; 2349 struct inode *inode = mapping->host; 2350 pgoff_t offset = vmf->pgoff; 2351 pgoff_t max_off; 2352 struct page *page; 2353 int ret = 0; 2354 2355 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 2356 if (unlikely(offset >= max_off)) 2357 return VM_FAULT_SIGBUS; 2358 2359 /* 2360 * Do we have something in the page cache already? 2361 */ 2362 page = find_get_page(mapping, offset); 2363 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { 2364 /* 2365 * We found the page, so try async readahead before 2366 * waiting for the lock. 2367 */ 2368 do_async_mmap_readahead(vmf->vma, ra, file, page, offset); 2369 } else if (!page) { 2370 /* No page in the page cache at all */ 2371 do_sync_mmap_readahead(vmf->vma, ra, file, offset); 2372 count_vm_event(PGMAJFAULT); 2373 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); 2374 ret = VM_FAULT_MAJOR; 2375 retry_find: 2376 page = find_get_page(mapping, offset); 2377 if (!page) 2378 goto no_cached_page; 2379 } 2380 2381 if (!lock_page_or_retry(page, vmf->vma->vm_mm, vmf->flags)) { 2382 put_page(page); 2383 return ret | VM_FAULT_RETRY; 2384 } 2385 2386 /* Did it get truncated? */ 2387 if (unlikely(page->mapping != mapping)) { 2388 unlock_page(page); 2389 put_page(page); 2390 goto retry_find; 2391 } 2392 VM_BUG_ON_PAGE(page->index != offset, page); 2393 2394 /* 2395 * We have a locked page in the page cache, now we need to check 2396 * that it's up-to-date. If not, it is going to be due to an error. 2397 */ 2398 if (unlikely(!PageUptodate(page))) 2399 goto page_not_uptodate; 2400 2401 /* 2402 * Found the page and have a reference on it. 2403 * We must recheck i_size under page lock. 2404 */ 2405 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 2406 if (unlikely(offset >= max_off)) { 2407 unlock_page(page); 2408 put_page(page); 2409 return VM_FAULT_SIGBUS; 2410 } 2411 2412 vmf->page = page; 2413 return ret | VM_FAULT_LOCKED; 2414 2415 no_cached_page: 2416 /* 2417 * We're only likely to ever get here if MADV_RANDOM is in 2418 * effect. 2419 */ 2420 error = page_cache_read(file, offset, vmf->gfp_mask); 2421 2422 /* 2423 * The page we want has now been added to the page cache. 2424 * In the unlikely event that someone removed it in the 2425 * meantime, we'll just come back here and read it again. 2426 */ 2427 if (error >= 0) 2428 goto retry_find; 2429 2430 /* 2431 * An error return from page_cache_read can result if the 2432 * system is low on memory, or a problem occurs while trying 2433 * to schedule I/O. 2434 */ 2435 if (error == -ENOMEM) 2436 return VM_FAULT_OOM; 2437 return VM_FAULT_SIGBUS; 2438 2439 page_not_uptodate: 2440 /* 2441 * Umm, take care of errors if the page isn't up-to-date. 2442 * Try to re-read it _once_. We do this synchronously, 2443 * because there really aren't any performance issues here 2444 * and we need to check for errors. 2445 */ 2446 ClearPageError(page); 2447 error = mapping->a_ops->readpage(file, page); 2448 if (!error) { 2449 wait_on_page_locked(page); 2450 if (!PageUptodate(page)) 2451 error = -EIO; 2452 } 2453 put_page(page); 2454 2455 if (!error || error == AOP_TRUNCATED_PAGE) 2456 goto retry_find; 2457 2458 /* Things didn't work out. Return zero to tell the mm layer so. */ 2459 shrink_readahead_size_eio(file, ra); 2460 return VM_FAULT_SIGBUS; 2461 } 2462 EXPORT_SYMBOL(filemap_fault); 2463 2464 void filemap_map_pages(struct vm_fault *vmf, 2465 pgoff_t start_pgoff, pgoff_t end_pgoff) 2466 { 2467 struct radix_tree_iter iter; 2468 void **slot; 2469 struct file *file = vmf->vma->vm_file; 2470 struct address_space *mapping = file->f_mapping; 2471 pgoff_t last_pgoff = start_pgoff; 2472 unsigned long max_idx; 2473 struct page *head, *page; 2474 2475 rcu_read_lock(); 2476 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, 2477 start_pgoff) { 2478 if (iter.index > end_pgoff) 2479 break; 2480 repeat: 2481 page = radix_tree_deref_slot(slot); 2482 if (unlikely(!page)) 2483 goto next; 2484 if (radix_tree_exception(page)) { 2485 if (radix_tree_deref_retry(page)) { 2486 slot = radix_tree_iter_retry(&iter); 2487 continue; 2488 } 2489 goto next; 2490 } 2491 2492 head = compound_head(page); 2493 if (!page_cache_get_speculative(head)) 2494 goto repeat; 2495 2496 /* The page was split under us? */ 2497 if (compound_head(page) != head) { 2498 put_page(head); 2499 goto repeat; 2500 } 2501 2502 /* Has the page moved? */ 2503 if (unlikely(page != *slot)) { 2504 put_page(head); 2505 goto repeat; 2506 } 2507 2508 if (!PageUptodate(page) || 2509 PageReadahead(page) || 2510 PageHWPoison(page)) 2511 goto skip; 2512 if (!trylock_page(page)) 2513 goto skip; 2514 2515 if (page->mapping != mapping || !PageUptodate(page)) 2516 goto unlock; 2517 2518 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); 2519 if (page->index >= max_idx) 2520 goto unlock; 2521 2522 if (file->f_ra.mmap_miss > 0) 2523 file->f_ra.mmap_miss--; 2524 2525 vmf->address += (iter.index - last_pgoff) << PAGE_SHIFT; 2526 if (vmf->pte) 2527 vmf->pte += iter.index - last_pgoff; 2528 last_pgoff = iter.index; 2529 if (alloc_set_pte(vmf, NULL, page)) 2530 goto unlock; 2531 unlock_page(page); 2532 goto next; 2533 unlock: 2534 unlock_page(page); 2535 skip: 2536 put_page(page); 2537 next: 2538 /* Huge page is mapped? No need to proceed. */ 2539 if (pmd_trans_huge(*vmf->pmd)) 2540 break; 2541 if (iter.index == end_pgoff) 2542 break; 2543 } 2544 rcu_read_unlock(); 2545 } 2546 EXPORT_SYMBOL(filemap_map_pages); 2547 2548 int filemap_page_mkwrite(struct vm_fault *vmf) 2549 { 2550 struct page *page = vmf->page; 2551 struct inode *inode = file_inode(vmf->vma->vm_file); 2552 int ret = VM_FAULT_LOCKED; 2553 2554 sb_start_pagefault(inode->i_sb); 2555 file_update_time(vmf->vma->vm_file); 2556 lock_page(page); 2557 if (page->mapping != inode->i_mapping) { 2558 unlock_page(page); 2559 ret = VM_FAULT_NOPAGE; 2560 goto out; 2561 } 2562 /* 2563 * We mark the page dirty already here so that when freeze is in 2564 * progress, we are guaranteed that writeback during freezing will 2565 * see the dirty page and writeprotect it again. 2566 */ 2567 set_page_dirty(page); 2568 wait_for_stable_page(page); 2569 out: 2570 sb_end_pagefault(inode->i_sb); 2571 return ret; 2572 } 2573 EXPORT_SYMBOL(filemap_page_mkwrite); 2574 2575 const struct vm_operations_struct generic_file_vm_ops = { 2576 .fault = filemap_fault, 2577 .map_pages = filemap_map_pages, 2578 .page_mkwrite = filemap_page_mkwrite, 2579 }; 2580 2581 /* This is used for a general mmap of a disk file */ 2582 2583 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 2584 { 2585 struct address_space *mapping = file->f_mapping; 2586 2587 if (!mapping->a_ops->readpage) 2588 return -ENOEXEC; 2589 file_accessed(file); 2590 vma->vm_ops = &generic_file_vm_ops; 2591 return 0; 2592 } 2593 2594 /* 2595 * This is for filesystems which do not implement ->writepage. 2596 */ 2597 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) 2598 { 2599 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) 2600 return -EINVAL; 2601 return generic_file_mmap(file, vma); 2602 } 2603 #else 2604 int generic_file_mmap(struct file * file, struct vm_area_struct * vma) 2605 { 2606 return -ENOSYS; 2607 } 2608 int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) 2609 { 2610 return -ENOSYS; 2611 } 2612 #endif /* CONFIG_MMU */ 2613 2614 EXPORT_SYMBOL(generic_file_mmap); 2615 EXPORT_SYMBOL(generic_file_readonly_mmap); 2616 2617 static struct page *wait_on_page_read(struct page *page) 2618 { 2619 if (!IS_ERR(page)) { 2620 wait_on_page_locked(page); 2621 if (!PageUptodate(page)) { 2622 put_page(page); 2623 page = ERR_PTR(-EIO); 2624 } 2625 } 2626 return page; 2627 } 2628 2629 static struct page *do_read_cache_page(struct address_space *mapping, 2630 pgoff_t index, 2631 int (*filler)(void *, struct page *), 2632 void *data, 2633 gfp_t gfp) 2634 { 2635 struct page *page; 2636 int err; 2637 repeat: 2638 page = find_get_page(mapping, index); 2639 if (!page) { 2640 page = __page_cache_alloc(gfp | __GFP_COLD); 2641 if (!page) 2642 return ERR_PTR(-ENOMEM); 2643 err = add_to_page_cache_lru(page, mapping, index, gfp); 2644 if (unlikely(err)) { 2645 put_page(page); 2646 if (err == -EEXIST) 2647 goto repeat; 2648 /* Presumably ENOMEM for radix tree node */ 2649 return ERR_PTR(err); 2650 } 2651 2652 filler: 2653 err = filler(data, page); 2654 if (err < 0) { 2655 put_page(page); 2656 return ERR_PTR(err); 2657 } 2658 2659 page = wait_on_page_read(page); 2660 if (IS_ERR(page)) 2661 return page; 2662 goto out; 2663 } 2664 if (PageUptodate(page)) 2665 goto out; 2666 2667 /* 2668 * Page is not up to date and may be locked due one of the following 2669 * case a: Page is being filled and the page lock is held 2670 * case b: Read/write error clearing the page uptodate status 2671 * case c: Truncation in progress (page locked) 2672 * case d: Reclaim in progress 2673 * 2674 * Case a, the page will be up to date when the page is unlocked. 2675 * There is no need to serialise on the page lock here as the page 2676 * is pinned so the lock gives no additional protection. Even if the 2677 * the page is truncated, the data is still valid if PageUptodate as 2678 * it's a race vs truncate race. 2679 * Case b, the page will not be up to date 2680 * Case c, the page may be truncated but in itself, the data may still 2681 * be valid after IO completes as it's a read vs truncate race. The 2682 * operation must restart if the page is not uptodate on unlock but 2683 * otherwise serialising on page lock to stabilise the mapping gives 2684 * no additional guarantees to the caller as the page lock is 2685 * released before return. 2686 * Case d, similar to truncation. If reclaim holds the page lock, it 2687 * will be a race with remove_mapping that determines if the mapping 2688 * is valid on unlock but otherwise the data is valid and there is 2689 * no need to serialise with page lock. 2690 * 2691 * As the page lock gives no additional guarantee, we optimistically 2692 * wait on the page to be unlocked and check if it's up to date and 2693 * use the page if it is. Otherwise, the page lock is required to 2694 * distinguish between the different cases. The motivation is that we 2695 * avoid spurious serialisations and wakeups when multiple processes 2696 * wait on the same page for IO to complete. 2697 */ 2698 wait_on_page_locked(page); 2699 if (PageUptodate(page)) 2700 goto out; 2701 2702 /* Distinguish between all the cases under the safety of the lock */ 2703 lock_page(page); 2704 2705 /* Case c or d, restart the operation */ 2706 if (!page->mapping) { 2707 unlock_page(page); 2708 put_page(page); 2709 goto repeat; 2710 } 2711 2712 /* Someone else locked and filled the page in a very small window */ 2713 if (PageUptodate(page)) { 2714 unlock_page(page); 2715 goto out; 2716 } 2717 goto filler; 2718 2719 out: 2720 mark_page_accessed(page); 2721 return page; 2722 } 2723 2724 /** 2725 * read_cache_page - read into page cache, fill it if needed 2726 * @mapping: the page's address_space 2727 * @index: the page index 2728 * @filler: function to perform the read 2729 * @data: first arg to filler(data, page) function, often left as NULL 2730 * 2731 * Read into the page cache. If a page already exists, and PageUptodate() is 2732 * not set, try to fill the page and wait for it to become unlocked. 2733 * 2734 * If the page does not get brought uptodate, return -EIO. 2735 */ 2736 struct page *read_cache_page(struct address_space *mapping, 2737 pgoff_t index, 2738 int (*filler)(void *, struct page *), 2739 void *data) 2740 { 2741 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping)); 2742 } 2743 EXPORT_SYMBOL(read_cache_page); 2744 2745 /** 2746 * read_cache_page_gfp - read into page cache, using specified page allocation flags. 2747 * @mapping: the page's address_space 2748 * @index: the page index 2749 * @gfp: the page allocator flags to use if allocating 2750 * 2751 * This is the same as "read_mapping_page(mapping, index, NULL)", but with 2752 * any new page allocations done using the specified allocation flags. 2753 * 2754 * If the page does not get brought uptodate, return -EIO. 2755 */ 2756 struct page *read_cache_page_gfp(struct address_space *mapping, 2757 pgoff_t index, 2758 gfp_t gfp) 2759 { 2760 filler_t *filler = (filler_t *)mapping->a_ops->readpage; 2761 2762 return do_read_cache_page(mapping, index, filler, NULL, gfp); 2763 } 2764 EXPORT_SYMBOL(read_cache_page_gfp); 2765 2766 /* 2767 * Performs necessary checks before doing a write 2768 * 2769 * Can adjust writing position or amount of bytes to write. 2770 * Returns appropriate error code that caller should return or 2771 * zero in case that write should be allowed. 2772 */ 2773 inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from) 2774 { 2775 struct file *file = iocb->ki_filp; 2776 struct inode *inode = file->f_mapping->host; 2777 unsigned long limit = rlimit(RLIMIT_FSIZE); 2778 loff_t pos; 2779 2780 if (!iov_iter_count(from)) 2781 return 0; 2782 2783 /* FIXME: this is for backwards compatibility with 2.4 */ 2784 if (iocb->ki_flags & IOCB_APPEND) 2785 iocb->ki_pos = i_size_read(inode); 2786 2787 pos = iocb->ki_pos; 2788 2789 if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT)) 2790 return -EINVAL; 2791 2792 if (limit != RLIM_INFINITY) { 2793 if (iocb->ki_pos >= limit) { 2794 send_sig(SIGXFSZ, current, 0); 2795 return -EFBIG; 2796 } 2797 iov_iter_truncate(from, limit - (unsigned long)pos); 2798 } 2799 2800 /* 2801 * LFS rule 2802 */ 2803 if (unlikely(pos + iov_iter_count(from) > MAX_NON_LFS && 2804 !(file->f_flags & O_LARGEFILE))) { 2805 if (pos >= MAX_NON_LFS) 2806 return -EFBIG; 2807 iov_iter_truncate(from, MAX_NON_LFS - (unsigned long)pos); 2808 } 2809 2810 /* 2811 * Are we about to exceed the fs block limit ? 2812 * 2813 * If we have written data it becomes a short write. If we have 2814 * exceeded without writing data we send a signal and return EFBIG. 2815 * Linus frestrict idea will clean these up nicely.. 2816 */ 2817 if (unlikely(pos >= inode->i_sb->s_maxbytes)) 2818 return -EFBIG; 2819 2820 iov_iter_truncate(from, inode->i_sb->s_maxbytes - pos); 2821 return iov_iter_count(from); 2822 } 2823 EXPORT_SYMBOL(generic_write_checks); 2824 2825 int pagecache_write_begin(struct file *file, struct address_space *mapping, 2826 loff_t pos, unsigned len, unsigned flags, 2827 struct page **pagep, void **fsdata) 2828 { 2829 const struct address_space_operations *aops = mapping->a_ops; 2830 2831 return aops->write_begin(file, mapping, pos, len, flags, 2832 pagep, fsdata); 2833 } 2834 EXPORT_SYMBOL(pagecache_write_begin); 2835 2836 int pagecache_write_end(struct file *file, struct address_space *mapping, 2837 loff_t pos, unsigned len, unsigned copied, 2838 struct page *page, void *fsdata) 2839 { 2840 const struct address_space_operations *aops = mapping->a_ops; 2841 2842 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); 2843 } 2844 EXPORT_SYMBOL(pagecache_write_end); 2845 2846 ssize_t 2847 generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) 2848 { 2849 struct file *file = iocb->ki_filp; 2850 struct address_space *mapping = file->f_mapping; 2851 struct inode *inode = mapping->host; 2852 loff_t pos = iocb->ki_pos; 2853 ssize_t written; 2854 size_t write_len; 2855 pgoff_t end; 2856 2857 write_len = iov_iter_count(from); 2858 end = (pos + write_len - 1) >> PAGE_SHIFT; 2859 2860 if (iocb->ki_flags & IOCB_NOWAIT) { 2861 /* If there are pages to writeback, return */ 2862 if (filemap_range_has_page(inode->i_mapping, pos, 2863 pos + iov_iter_count(from))) 2864 return -EAGAIN; 2865 } else { 2866 written = filemap_write_and_wait_range(mapping, pos, 2867 pos + write_len - 1); 2868 if (written) 2869 goto out; 2870 } 2871 2872 /* 2873 * After a write we want buffered reads to be sure to go to disk to get 2874 * the new data. We invalidate clean cached page from the region we're 2875 * about to write. We do this *before* the write so that we can return 2876 * without clobbering -EIOCBQUEUED from ->direct_IO(). 2877 */ 2878 written = invalidate_inode_pages2_range(mapping, 2879 pos >> PAGE_SHIFT, end); 2880 /* 2881 * If a page can not be invalidated, return 0 to fall back 2882 * to buffered write. 2883 */ 2884 if (written) { 2885 if (written == -EBUSY) 2886 return 0; 2887 goto out; 2888 } 2889 2890 written = mapping->a_ops->direct_IO(iocb, from); 2891 2892 /* 2893 * Finally, try again to invalidate clean pages which might have been 2894 * cached by non-direct readahead, or faulted in by get_user_pages() 2895 * if the source of the write was an mmap'ed region of the file 2896 * we're writing. Either one is a pretty crazy thing to do, 2897 * so we don't support it 100%. If this invalidation 2898 * fails, tough, the write still worked... 2899 */ 2900 invalidate_inode_pages2_range(mapping, 2901 pos >> PAGE_SHIFT, end); 2902 2903 if (written > 0) { 2904 pos += written; 2905 write_len -= written; 2906 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { 2907 i_size_write(inode, pos); 2908 mark_inode_dirty(inode); 2909 } 2910 iocb->ki_pos = pos; 2911 } 2912 iov_iter_revert(from, write_len - iov_iter_count(from)); 2913 out: 2914 return written; 2915 } 2916 EXPORT_SYMBOL(generic_file_direct_write); 2917 2918 /* 2919 * Find or create a page at the given pagecache position. Return the locked 2920 * page. This function is specifically for buffered writes. 2921 */ 2922 struct page *grab_cache_page_write_begin(struct address_space *mapping, 2923 pgoff_t index, unsigned flags) 2924 { 2925 struct page *page; 2926 int fgp_flags = FGP_LOCK|FGP_WRITE|FGP_CREAT; 2927 2928 if (flags & AOP_FLAG_NOFS) 2929 fgp_flags |= FGP_NOFS; 2930 2931 page = pagecache_get_page(mapping, index, fgp_flags, 2932 mapping_gfp_mask(mapping)); 2933 if (page) 2934 wait_for_stable_page(page); 2935 2936 return page; 2937 } 2938 EXPORT_SYMBOL(grab_cache_page_write_begin); 2939 2940 ssize_t generic_perform_write(struct file *file, 2941 struct iov_iter *i, loff_t pos) 2942 { 2943 struct address_space *mapping = file->f_mapping; 2944 const struct address_space_operations *a_ops = mapping->a_ops; 2945 long status = 0; 2946 ssize_t written = 0; 2947 unsigned int flags = 0; 2948 2949 do { 2950 struct page *page; 2951 unsigned long offset; /* Offset into pagecache page */ 2952 unsigned long bytes; /* Bytes to write to page */ 2953 size_t copied; /* Bytes copied from user */ 2954 void *fsdata; 2955 2956 offset = (pos & (PAGE_SIZE - 1)); 2957 bytes = min_t(unsigned long, PAGE_SIZE - offset, 2958 iov_iter_count(i)); 2959 2960 again: 2961 /* 2962 * Bring in the user page that we will copy from _first_. 2963 * Otherwise there's a nasty deadlock on copying from the 2964 * same page as we're writing to, without it being marked 2965 * up-to-date. 2966 * 2967 * Not only is this an optimisation, but it is also required 2968 * to check that the address is actually valid, when atomic 2969 * usercopies are used, below. 2970 */ 2971 if (unlikely(iov_iter_fault_in_readable(i, bytes))) { 2972 status = -EFAULT; 2973 break; 2974 } 2975 2976 if (fatal_signal_pending(current)) { 2977 status = -EINTR; 2978 break; 2979 } 2980 2981 status = a_ops->write_begin(file, mapping, pos, bytes, flags, 2982 &page, &fsdata); 2983 if (unlikely(status < 0)) 2984 break; 2985 2986 if (mapping_writably_mapped(mapping)) 2987 flush_dcache_page(page); 2988 2989 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 2990 flush_dcache_page(page); 2991 2992 status = a_ops->write_end(file, mapping, pos, bytes, copied, 2993 page, fsdata); 2994 if (unlikely(status < 0)) 2995 break; 2996 copied = status; 2997 2998 cond_resched(); 2999 3000 iov_iter_advance(i, copied); 3001 if (unlikely(copied == 0)) { 3002 /* 3003 * If we were unable to copy any data at all, we must 3004 * fall back to a single segment length write. 3005 * 3006 * If we didn't fallback here, we could livelock 3007 * because not all segments in the iov can be copied at 3008 * once without a pagefault. 3009 */ 3010 bytes = min_t(unsigned long, PAGE_SIZE - offset, 3011 iov_iter_single_seg_count(i)); 3012 goto again; 3013 } 3014 pos += copied; 3015 written += copied; 3016 3017 balance_dirty_pages_ratelimited(mapping); 3018 } while (iov_iter_count(i)); 3019 3020 return written ? written : status; 3021 } 3022 EXPORT_SYMBOL(generic_perform_write); 3023 3024 /** 3025 * __generic_file_write_iter - write data to a file 3026 * @iocb: IO state structure (file, offset, etc.) 3027 * @from: iov_iter with data to write 3028 * 3029 * This function does all the work needed for actually writing data to a 3030 * file. It does all basic checks, removes SUID from the file, updates 3031 * modification times and calls proper subroutines depending on whether we 3032 * do direct IO or a standard buffered write. 3033 * 3034 * It expects i_mutex to be grabbed unless we work on a block device or similar 3035 * object which does not need locking at all. 3036 * 3037 * This function does *not* take care of syncing data in case of O_SYNC write. 3038 * A caller has to handle it. This is mainly due to the fact that we want to 3039 * avoid syncing under i_mutex. 3040 */ 3041 ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 3042 { 3043 struct file *file = iocb->ki_filp; 3044 struct address_space * mapping = file->f_mapping; 3045 struct inode *inode = mapping->host; 3046 ssize_t written = 0; 3047 ssize_t err; 3048 ssize_t status; 3049 3050 /* We can write back this queue in page reclaim */ 3051 current->backing_dev_info = inode_to_bdi(inode); 3052 err = file_remove_privs(file); 3053 if (err) 3054 goto out; 3055 3056 err = file_update_time(file); 3057 if (err) 3058 goto out; 3059 3060 if (iocb->ki_flags & IOCB_DIRECT) { 3061 loff_t pos, endbyte; 3062 3063 written = generic_file_direct_write(iocb, from); 3064 /* 3065 * If the write stopped short of completing, fall back to 3066 * buffered writes. Some filesystems do this for writes to 3067 * holes, for example. For DAX files, a buffered write will 3068 * not succeed (even if it did, DAX does not handle dirty 3069 * page-cache pages correctly). 3070 */ 3071 if (written < 0 || !iov_iter_count(from) || IS_DAX(inode)) 3072 goto out; 3073 3074 status = generic_perform_write(file, from, pos = iocb->ki_pos); 3075 /* 3076 * If generic_perform_write() returned a synchronous error 3077 * then we want to return the number of bytes which were 3078 * direct-written, or the error code if that was zero. Note 3079 * that this differs from normal direct-io semantics, which 3080 * will return -EFOO even if some bytes were written. 3081 */ 3082 if (unlikely(status < 0)) { 3083 err = status; 3084 goto out; 3085 } 3086 /* 3087 * We need to ensure that the page cache pages are written to 3088 * disk and invalidated to preserve the expected O_DIRECT 3089 * semantics. 3090 */ 3091 endbyte = pos + status - 1; 3092 err = filemap_write_and_wait_range(mapping, pos, endbyte); 3093 if (err == 0) { 3094 iocb->ki_pos = endbyte + 1; 3095 written += status; 3096 invalidate_mapping_pages(mapping, 3097 pos >> PAGE_SHIFT, 3098 endbyte >> PAGE_SHIFT); 3099 } else { 3100 /* 3101 * We don't know how much we wrote, so just return 3102 * the number of bytes which were direct-written 3103 */ 3104 } 3105 } else { 3106 written = generic_perform_write(file, from, iocb->ki_pos); 3107 if (likely(written > 0)) 3108 iocb->ki_pos += written; 3109 } 3110 out: 3111 current->backing_dev_info = NULL; 3112 return written ? written : err; 3113 } 3114 EXPORT_SYMBOL(__generic_file_write_iter); 3115 3116 /** 3117 * generic_file_write_iter - write data to a file 3118 * @iocb: IO state structure 3119 * @from: iov_iter with data to write 3120 * 3121 * This is a wrapper around __generic_file_write_iter() to be used by most 3122 * filesystems. It takes care of syncing the file in case of O_SYNC file 3123 * and acquires i_mutex as needed. 3124 */ 3125 ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 3126 { 3127 struct file *file = iocb->ki_filp; 3128 struct inode *inode = file->f_mapping->host; 3129 ssize_t ret; 3130 3131 inode_lock(inode); 3132 ret = generic_write_checks(iocb, from); 3133 if (ret > 0) 3134 ret = __generic_file_write_iter(iocb, from); 3135 inode_unlock(inode); 3136 3137 if (ret > 0) 3138 ret = generic_write_sync(iocb, ret); 3139 return ret; 3140 } 3141 EXPORT_SYMBOL(generic_file_write_iter); 3142 3143 /** 3144 * try_to_release_page() - release old fs-specific metadata on a page 3145 * 3146 * @page: the page which the kernel is trying to free 3147 * @gfp_mask: memory allocation flags (and I/O mode) 3148 * 3149 * The address_space is to try to release any data against the page 3150 * (presumably at page->private). If the release was successful, return '1'. 3151 * Otherwise return zero. 3152 * 3153 * This may also be called if PG_fscache is set on a page, indicating that the 3154 * page is known to the local caching routines. 3155 * 3156 * The @gfp_mask argument specifies whether I/O may be performed to release 3157 * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS). 3158 * 3159 */ 3160 int try_to_release_page(struct page *page, gfp_t gfp_mask) 3161 { 3162 struct address_space * const mapping = page->mapping; 3163 3164 BUG_ON(!PageLocked(page)); 3165 if (PageWriteback(page)) 3166 return 0; 3167 3168 if (mapping && mapping->a_ops->releasepage) 3169 return mapping->a_ops->releasepage(page, gfp_mask); 3170 return try_to_free_buffers(page); 3171 } 3172 3173 EXPORT_SYMBOL(try_to_release_page); 3174