1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/filemap.c 4 * 5 * Copyright (C) 1994-1999 Linus Torvalds 6 */ 7 8 /* 9 * This file handles the generic file mmap semantics used by 10 * most "normal" filesystems (but you don't /have/ to use this: 11 * the NFS filesystem used to do this differently, for example) 12 */ 13 #include <linux/export.h> 14 #include <linux/compiler.h> 15 #include <linux/dax.h> 16 #include <linux/fs.h> 17 #include <linux/sched/signal.h> 18 #include <linux/uaccess.h> 19 #include <linux/capability.h> 20 #include <linux/kernel_stat.h> 21 #include <linux/gfp.h> 22 #include <linux/mm.h> 23 #include <linux/swap.h> 24 #include <linux/swapops.h> 25 #include <linux/syscalls.h> 26 #include <linux/mman.h> 27 #include <linux/pagemap.h> 28 #include <linux/file.h> 29 #include <linux/uio.h> 30 #include <linux/error-injection.h> 31 #include <linux/hash.h> 32 #include <linux/writeback.h> 33 #include <linux/backing-dev.h> 34 #include <linux/pagevec.h> 35 #include <linux/security.h> 36 #include <linux/cpuset.h> 37 #include <linux/hugetlb.h> 38 #include <linux/memcontrol.h> 39 #include <linux/shmem_fs.h> 40 #include <linux/rmap.h> 41 #include <linux/delayacct.h> 42 #include <linux/psi.h> 43 #include <linux/ramfs.h> 44 #include <linux/page_idle.h> 45 #include <linux/migrate.h> 46 #include <linux/pipe_fs_i.h> 47 #include <linux/splice.h> 48 #include <linux/rcupdate_wait.h> 49 #include <asm/pgalloc.h> 50 #include <asm/tlbflush.h> 51 #include "internal.h" 52 53 #define CREATE_TRACE_POINTS 54 #include <trace/events/filemap.h> 55 56 /* 57 * FIXME: remove all knowledge of the buffer layer from the core VM 58 */ 59 #include <linux/buffer_head.h> /* for try_to_free_buffers */ 60 61 #include <asm/mman.h> 62 63 #include "swap.h" 64 65 /* 66 * Shared mappings implemented 30.11.1994. It's not fully working yet, 67 * though. 68 * 69 * Shared mappings now work. 15.8.1995 Bruno. 70 * 71 * finished 'unifying' the page and buffer cache and SMP-threaded the 72 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> 73 * 74 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> 75 */ 76 77 /* 78 * Lock ordering: 79 * 80 * ->i_mmap_rwsem (truncate_pagecache) 81 * ->private_lock (__free_pte->block_dirty_folio) 82 * ->swap_lock (exclusive_swap_page, others) 83 * ->i_pages lock 84 * 85 * ->i_rwsem 86 * ->invalidate_lock (acquired by fs in truncate path) 87 * ->i_mmap_rwsem (truncate->unmap_mapping_range) 88 * 89 * ->mmap_lock 90 * ->i_mmap_rwsem 91 * ->page_table_lock or pte_lock (various, mainly in memory.c) 92 * ->i_pages lock (arch-dependent flush_dcache_mmap_lock) 93 * 94 * ->mmap_lock 95 * ->invalidate_lock (filemap_fault) 96 * ->lock_page (filemap_fault, access_process_vm) 97 * 98 * ->i_rwsem (generic_perform_write) 99 * ->mmap_lock (fault_in_readable->do_page_fault) 100 * 101 * bdi->wb.list_lock 102 * sb_lock (fs/fs-writeback.c) 103 * ->i_pages lock (__sync_single_inode) 104 * 105 * ->i_mmap_rwsem 106 * ->anon_vma.lock (vma_merge) 107 * 108 * ->anon_vma.lock 109 * ->page_table_lock or pte_lock (anon_vma_prepare and various) 110 * 111 * ->page_table_lock or pte_lock 112 * ->swap_lock (try_to_unmap_one) 113 * ->private_lock (try_to_unmap_one) 114 * ->i_pages lock (try_to_unmap_one) 115 * ->lruvec->lru_lock (follow_page_mask->mark_page_accessed) 116 * ->lruvec->lru_lock (check_pte_range->isolate_lru_page) 117 * ->private_lock (folio_remove_rmap_pte->set_page_dirty) 118 * ->i_pages lock (folio_remove_rmap_pte->set_page_dirty) 119 * bdi.wb->list_lock (folio_remove_rmap_pte->set_page_dirty) 120 * ->inode->i_lock (folio_remove_rmap_pte->set_page_dirty) 121 * ->memcg->move_lock (folio_remove_rmap_pte->folio_memcg_lock) 122 * bdi.wb->list_lock (zap_pte_range->set_page_dirty) 123 * ->inode->i_lock (zap_pte_range->set_page_dirty) 124 * ->private_lock (zap_pte_range->block_dirty_folio) 125 */ 126 127 static void mapping_set_update(struct xa_state *xas, 128 struct address_space *mapping) 129 { 130 if (dax_mapping(mapping) || shmem_mapping(mapping)) 131 return; 132 xas_set_update(xas, workingset_update_node); 133 xas_set_lru(xas, &shadow_nodes); 134 } 135 136 static void page_cache_delete(struct address_space *mapping, 137 struct folio *folio, void *shadow) 138 { 139 XA_STATE(xas, &mapping->i_pages, folio->index); 140 long nr = 1; 141 142 mapping_set_update(&xas, mapping); 143 144 xas_set_order(&xas, folio->index, folio_order(folio)); 145 nr = folio_nr_pages(folio); 146 147 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 148 149 xas_store(&xas, shadow); 150 xas_init_marks(&xas); 151 152 folio->mapping = NULL; 153 /* Leave page->index set: truncation lookup relies upon it */ 154 mapping->nrpages -= nr; 155 } 156 157 static void filemap_unaccount_folio(struct address_space *mapping, 158 struct folio *folio) 159 { 160 long nr; 161 162 VM_BUG_ON_FOLIO(folio_mapped(folio), folio); 163 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) { 164 pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n", 165 current->comm, folio_pfn(folio)); 166 dump_page(&folio->page, "still mapped when deleted"); 167 dump_stack(); 168 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 169 170 if (mapping_exiting(mapping) && !folio_test_large(folio)) { 171 int mapcount = folio_mapcount(folio); 172 173 if (folio_ref_count(folio) >= mapcount + 2) { 174 /* 175 * All vmas have already been torn down, so it's 176 * a good bet that actually the page is unmapped 177 * and we'd rather not leak it: if we're wrong, 178 * another bad page check should catch it later. 179 */ 180 atomic_set(&folio->_mapcount, -1); 181 folio_ref_sub(folio, mapcount); 182 } 183 } 184 } 185 186 /* hugetlb folios do not participate in page cache accounting. */ 187 if (folio_test_hugetlb(folio)) 188 return; 189 190 nr = folio_nr_pages(folio); 191 192 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); 193 if (folio_test_swapbacked(folio)) { 194 __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); 195 if (folio_test_pmd_mappable(folio)) 196 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr); 197 } else if (folio_test_pmd_mappable(folio)) { 198 __lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr); 199 filemap_nr_thps_dec(mapping); 200 } 201 202 /* 203 * At this point folio must be either written or cleaned by 204 * truncate. Dirty folio here signals a bug and loss of 205 * unwritten data - on ordinary filesystems. 206 * 207 * But it's harmless on in-memory filesystems like tmpfs; and can 208 * occur when a driver which did get_user_pages() sets page dirty 209 * before putting it, while the inode is being finally evicted. 210 * 211 * Below fixes dirty accounting after removing the folio entirely 212 * but leaves the dirty flag set: it has no effect for truncated 213 * folio and anyway will be cleared before returning folio to 214 * buddy allocator. 215 */ 216 if (WARN_ON_ONCE(folio_test_dirty(folio) && 217 mapping_can_writeback(mapping))) 218 folio_account_cleaned(folio, inode_to_wb(mapping->host)); 219 } 220 221 /* 222 * Delete a page from the page cache and free it. Caller has to make 223 * sure the page is locked and that nobody else uses it - or that usage 224 * is safe. The caller must hold the i_pages lock. 225 */ 226 void __filemap_remove_folio(struct folio *folio, void *shadow) 227 { 228 struct address_space *mapping = folio->mapping; 229 230 trace_mm_filemap_delete_from_page_cache(folio); 231 filemap_unaccount_folio(mapping, folio); 232 page_cache_delete(mapping, folio, shadow); 233 } 234 235 void filemap_free_folio(struct address_space *mapping, struct folio *folio) 236 { 237 void (*free_folio)(struct folio *); 238 int refs = 1; 239 240 free_folio = mapping->a_ops->free_folio; 241 if (free_folio) 242 free_folio(folio); 243 244 if (folio_test_large(folio)) 245 refs = folio_nr_pages(folio); 246 folio_put_refs(folio, refs); 247 } 248 249 /** 250 * filemap_remove_folio - Remove folio from page cache. 251 * @folio: The folio. 252 * 253 * This must be called only on folios that are locked and have been 254 * verified to be in the page cache. It will never put the folio into 255 * the free list because the caller has a reference on the page. 256 */ 257 void filemap_remove_folio(struct folio *folio) 258 { 259 struct address_space *mapping = folio->mapping; 260 261 BUG_ON(!folio_test_locked(folio)); 262 spin_lock(&mapping->host->i_lock); 263 xa_lock_irq(&mapping->i_pages); 264 __filemap_remove_folio(folio, NULL); 265 xa_unlock_irq(&mapping->i_pages); 266 if (mapping_shrinkable(mapping)) 267 inode_add_lru(mapping->host); 268 spin_unlock(&mapping->host->i_lock); 269 270 filemap_free_folio(mapping, folio); 271 } 272 273 /* 274 * page_cache_delete_batch - delete several folios from page cache 275 * @mapping: the mapping to which folios belong 276 * @fbatch: batch of folios to delete 277 * 278 * The function walks over mapping->i_pages and removes folios passed in 279 * @fbatch from the mapping. The function expects @fbatch to be sorted 280 * by page index and is optimised for it to be dense. 281 * It tolerates holes in @fbatch (mapping entries at those indices are not 282 * modified). 283 * 284 * The function expects the i_pages lock to be held. 285 */ 286 static void page_cache_delete_batch(struct address_space *mapping, 287 struct folio_batch *fbatch) 288 { 289 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); 290 long total_pages = 0; 291 int i = 0; 292 struct folio *folio; 293 294 mapping_set_update(&xas, mapping); 295 xas_for_each(&xas, folio, ULONG_MAX) { 296 if (i >= folio_batch_count(fbatch)) 297 break; 298 299 /* A swap/dax/shadow entry got inserted? Skip it. */ 300 if (xa_is_value(folio)) 301 continue; 302 /* 303 * A page got inserted in our range? Skip it. We have our 304 * pages locked so they are protected from being removed. 305 * If we see a page whose index is higher than ours, it 306 * means our page has been removed, which shouldn't be 307 * possible because we're holding the PageLock. 308 */ 309 if (folio != fbatch->folios[i]) { 310 VM_BUG_ON_FOLIO(folio->index > 311 fbatch->folios[i]->index, folio); 312 continue; 313 } 314 315 WARN_ON_ONCE(!folio_test_locked(folio)); 316 317 folio->mapping = NULL; 318 /* Leave folio->index set: truncation lookup relies on it */ 319 320 i++; 321 xas_store(&xas, NULL); 322 total_pages += folio_nr_pages(folio); 323 } 324 mapping->nrpages -= total_pages; 325 } 326 327 void delete_from_page_cache_batch(struct address_space *mapping, 328 struct folio_batch *fbatch) 329 { 330 int i; 331 332 if (!folio_batch_count(fbatch)) 333 return; 334 335 spin_lock(&mapping->host->i_lock); 336 xa_lock_irq(&mapping->i_pages); 337 for (i = 0; i < folio_batch_count(fbatch); i++) { 338 struct folio *folio = fbatch->folios[i]; 339 340 trace_mm_filemap_delete_from_page_cache(folio); 341 filemap_unaccount_folio(mapping, folio); 342 } 343 page_cache_delete_batch(mapping, fbatch); 344 xa_unlock_irq(&mapping->i_pages); 345 if (mapping_shrinkable(mapping)) 346 inode_add_lru(mapping->host); 347 spin_unlock(&mapping->host->i_lock); 348 349 for (i = 0; i < folio_batch_count(fbatch); i++) 350 filemap_free_folio(mapping, fbatch->folios[i]); 351 } 352 353 int filemap_check_errors(struct address_space *mapping) 354 { 355 int ret = 0; 356 /* Check for outstanding write errors */ 357 if (test_bit(AS_ENOSPC, &mapping->flags) && 358 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) 359 ret = -ENOSPC; 360 if (test_bit(AS_EIO, &mapping->flags) && 361 test_and_clear_bit(AS_EIO, &mapping->flags)) 362 ret = -EIO; 363 return ret; 364 } 365 EXPORT_SYMBOL(filemap_check_errors); 366 367 static int filemap_check_and_keep_errors(struct address_space *mapping) 368 { 369 /* Check for outstanding write errors */ 370 if (test_bit(AS_EIO, &mapping->flags)) 371 return -EIO; 372 if (test_bit(AS_ENOSPC, &mapping->flags)) 373 return -ENOSPC; 374 return 0; 375 } 376 377 /** 378 * filemap_fdatawrite_wbc - start writeback on mapping dirty pages in range 379 * @mapping: address space structure to write 380 * @wbc: the writeback_control controlling the writeout 381 * 382 * Call writepages on the mapping using the provided wbc to control the 383 * writeout. 384 * 385 * Return: %0 on success, negative error code otherwise. 386 */ 387 int filemap_fdatawrite_wbc(struct address_space *mapping, 388 struct writeback_control *wbc) 389 { 390 int ret; 391 392 if (!mapping_can_writeback(mapping) || 393 !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 394 return 0; 395 396 wbc_attach_fdatawrite_inode(wbc, mapping->host); 397 ret = do_writepages(mapping, wbc); 398 wbc_detach_inode(wbc); 399 return ret; 400 } 401 EXPORT_SYMBOL(filemap_fdatawrite_wbc); 402 403 /** 404 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range 405 * @mapping: address space structure to write 406 * @start: offset in bytes where the range starts 407 * @end: offset in bytes where the range ends (inclusive) 408 * @sync_mode: enable synchronous operation 409 * 410 * Start writeback against all of a mapping's dirty pages that lie 411 * within the byte offsets <start, end> inclusive. 412 * 413 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as 414 * opposed to a regular memory cleansing writeback. The difference between 415 * these two operations is that if a dirty page/buffer is encountered, it must 416 * be waited upon, and not just skipped over. 417 * 418 * Return: %0 on success, negative error code otherwise. 419 */ 420 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 421 loff_t end, int sync_mode) 422 { 423 struct writeback_control wbc = { 424 .sync_mode = sync_mode, 425 .nr_to_write = LONG_MAX, 426 .range_start = start, 427 .range_end = end, 428 }; 429 430 return filemap_fdatawrite_wbc(mapping, &wbc); 431 } 432 433 static inline int __filemap_fdatawrite(struct address_space *mapping, 434 int sync_mode) 435 { 436 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); 437 } 438 439 int filemap_fdatawrite(struct address_space *mapping) 440 { 441 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); 442 } 443 EXPORT_SYMBOL(filemap_fdatawrite); 444 445 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 446 loff_t end) 447 { 448 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); 449 } 450 EXPORT_SYMBOL(filemap_fdatawrite_range); 451 452 /** 453 * filemap_flush - mostly a non-blocking flush 454 * @mapping: target address_space 455 * 456 * This is a mostly non-blocking flush. Not suitable for data-integrity 457 * purposes - I/O may not be started against all dirty pages. 458 * 459 * Return: %0 on success, negative error code otherwise. 460 */ 461 int filemap_flush(struct address_space *mapping) 462 { 463 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); 464 } 465 EXPORT_SYMBOL(filemap_flush); 466 467 /** 468 * filemap_range_has_page - check if a page exists in range. 469 * @mapping: address space within which to check 470 * @start_byte: offset in bytes where the range starts 471 * @end_byte: offset in bytes where the range ends (inclusive) 472 * 473 * Find at least one page in the range supplied, usually used to check if 474 * direct writing in this range will trigger a writeback. 475 * 476 * Return: %true if at least one page exists in the specified range, 477 * %false otherwise. 478 */ 479 bool filemap_range_has_page(struct address_space *mapping, 480 loff_t start_byte, loff_t end_byte) 481 { 482 struct folio *folio; 483 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); 484 pgoff_t max = end_byte >> PAGE_SHIFT; 485 486 if (end_byte < start_byte) 487 return false; 488 489 rcu_read_lock(); 490 for (;;) { 491 folio = xas_find(&xas, max); 492 if (xas_retry(&xas, folio)) 493 continue; 494 /* Shadow entries don't count */ 495 if (xa_is_value(folio)) 496 continue; 497 /* 498 * We don't need to try to pin this page; we're about to 499 * release the RCU lock anyway. It is enough to know that 500 * there was a page here recently. 501 */ 502 break; 503 } 504 rcu_read_unlock(); 505 506 return folio != NULL; 507 } 508 EXPORT_SYMBOL(filemap_range_has_page); 509 510 static void __filemap_fdatawait_range(struct address_space *mapping, 511 loff_t start_byte, loff_t end_byte) 512 { 513 pgoff_t index = start_byte >> PAGE_SHIFT; 514 pgoff_t end = end_byte >> PAGE_SHIFT; 515 struct folio_batch fbatch; 516 unsigned nr_folios; 517 518 folio_batch_init(&fbatch); 519 520 while (index <= end) { 521 unsigned i; 522 523 nr_folios = filemap_get_folios_tag(mapping, &index, end, 524 PAGECACHE_TAG_WRITEBACK, &fbatch); 525 526 if (!nr_folios) 527 break; 528 529 for (i = 0; i < nr_folios; i++) { 530 struct folio *folio = fbatch.folios[i]; 531 532 folio_wait_writeback(folio); 533 } 534 folio_batch_release(&fbatch); 535 cond_resched(); 536 } 537 } 538 539 /** 540 * filemap_fdatawait_range - wait for writeback to complete 541 * @mapping: address space structure to wait for 542 * @start_byte: offset in bytes where the range starts 543 * @end_byte: offset in bytes where the range ends (inclusive) 544 * 545 * Walk the list of under-writeback pages of the given address space 546 * in the given range and wait for all of them. Check error status of 547 * the address space and return it. 548 * 549 * Since the error status of the address space is cleared by this function, 550 * callers are responsible for checking the return value and handling and/or 551 * reporting the error. 552 * 553 * Return: error status of the address space. 554 */ 555 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, 556 loff_t end_byte) 557 { 558 __filemap_fdatawait_range(mapping, start_byte, end_byte); 559 return filemap_check_errors(mapping); 560 } 561 EXPORT_SYMBOL(filemap_fdatawait_range); 562 563 /** 564 * filemap_fdatawait_range_keep_errors - wait for writeback to complete 565 * @mapping: address space structure to wait for 566 * @start_byte: offset in bytes where the range starts 567 * @end_byte: offset in bytes where the range ends (inclusive) 568 * 569 * Walk the list of under-writeback pages of the given address space in the 570 * given range and wait for all of them. Unlike filemap_fdatawait_range(), 571 * this function does not clear error status of the address space. 572 * 573 * Use this function if callers don't handle errors themselves. Expected 574 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), 575 * fsfreeze(8) 576 */ 577 int filemap_fdatawait_range_keep_errors(struct address_space *mapping, 578 loff_t start_byte, loff_t end_byte) 579 { 580 __filemap_fdatawait_range(mapping, start_byte, end_byte); 581 return filemap_check_and_keep_errors(mapping); 582 } 583 EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors); 584 585 /** 586 * file_fdatawait_range - wait for writeback to complete 587 * @file: file pointing to address space structure to wait for 588 * @start_byte: offset in bytes where the range starts 589 * @end_byte: offset in bytes where the range ends (inclusive) 590 * 591 * Walk the list of under-writeback pages of the address space that file 592 * refers to, in the given range and wait for all of them. Check error 593 * status of the address space vs. the file->f_wb_err cursor and return it. 594 * 595 * Since the error status of the file is advanced by this function, 596 * callers are responsible for checking the return value and handling and/or 597 * reporting the error. 598 * 599 * Return: error status of the address space vs. the file->f_wb_err cursor. 600 */ 601 int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte) 602 { 603 struct address_space *mapping = file->f_mapping; 604 605 __filemap_fdatawait_range(mapping, start_byte, end_byte); 606 return file_check_and_advance_wb_err(file); 607 } 608 EXPORT_SYMBOL(file_fdatawait_range); 609 610 /** 611 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors 612 * @mapping: address space structure to wait for 613 * 614 * Walk the list of under-writeback pages of the given address space 615 * and wait for all of them. Unlike filemap_fdatawait(), this function 616 * does not clear error status of the address space. 617 * 618 * Use this function if callers don't handle errors themselves. Expected 619 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), 620 * fsfreeze(8) 621 * 622 * Return: error status of the address space. 623 */ 624 int filemap_fdatawait_keep_errors(struct address_space *mapping) 625 { 626 __filemap_fdatawait_range(mapping, 0, LLONG_MAX); 627 return filemap_check_and_keep_errors(mapping); 628 } 629 EXPORT_SYMBOL(filemap_fdatawait_keep_errors); 630 631 /* Returns true if writeback might be needed or already in progress. */ 632 static bool mapping_needs_writeback(struct address_space *mapping) 633 { 634 return mapping->nrpages; 635 } 636 637 bool filemap_range_has_writeback(struct address_space *mapping, 638 loff_t start_byte, loff_t end_byte) 639 { 640 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); 641 pgoff_t max = end_byte >> PAGE_SHIFT; 642 struct folio *folio; 643 644 if (end_byte < start_byte) 645 return false; 646 647 rcu_read_lock(); 648 xas_for_each(&xas, folio, max) { 649 if (xas_retry(&xas, folio)) 650 continue; 651 if (xa_is_value(folio)) 652 continue; 653 if (folio_test_dirty(folio) || folio_test_locked(folio) || 654 folio_test_writeback(folio)) 655 break; 656 } 657 rcu_read_unlock(); 658 return folio != NULL; 659 } 660 EXPORT_SYMBOL_GPL(filemap_range_has_writeback); 661 662 /** 663 * filemap_write_and_wait_range - write out & wait on a file range 664 * @mapping: the address_space for the pages 665 * @lstart: offset in bytes where the range starts 666 * @lend: offset in bytes where the range ends (inclusive) 667 * 668 * Write out and wait upon file offsets lstart->lend, inclusive. 669 * 670 * Note that @lend is inclusive (describes the last byte to be written) so 671 * that this function can be used to write to the very end-of-file (end = -1). 672 * 673 * Return: error status of the address space. 674 */ 675 int filemap_write_and_wait_range(struct address_space *mapping, 676 loff_t lstart, loff_t lend) 677 { 678 int err = 0, err2; 679 680 if (lend < lstart) 681 return 0; 682 683 if (mapping_needs_writeback(mapping)) { 684 err = __filemap_fdatawrite_range(mapping, lstart, lend, 685 WB_SYNC_ALL); 686 /* 687 * Even if the above returned error, the pages may be 688 * written partially (e.g. -ENOSPC), so we wait for it. 689 * But the -EIO is special case, it may indicate the worst 690 * thing (e.g. bug) happened, so we avoid waiting for it. 691 */ 692 if (err != -EIO) 693 __filemap_fdatawait_range(mapping, lstart, lend); 694 } 695 err2 = filemap_check_errors(mapping); 696 if (!err) 697 err = err2; 698 return err; 699 } 700 EXPORT_SYMBOL(filemap_write_and_wait_range); 701 702 void __filemap_set_wb_err(struct address_space *mapping, int err) 703 { 704 errseq_t eseq = errseq_set(&mapping->wb_err, err); 705 706 trace_filemap_set_wb_err(mapping, eseq); 707 } 708 EXPORT_SYMBOL(__filemap_set_wb_err); 709 710 /** 711 * file_check_and_advance_wb_err - report wb error (if any) that was previously 712 * and advance wb_err to current one 713 * @file: struct file on which the error is being reported 714 * 715 * When userland calls fsync (or something like nfsd does the equivalent), we 716 * want to report any writeback errors that occurred since the last fsync (or 717 * since the file was opened if there haven't been any). 718 * 719 * Grab the wb_err from the mapping. If it matches what we have in the file, 720 * then just quickly return 0. The file is all caught up. 721 * 722 * If it doesn't match, then take the mapping value, set the "seen" flag in 723 * it and try to swap it into place. If it works, or another task beat us 724 * to it with the new value, then update the f_wb_err and return the error 725 * portion. The error at this point must be reported via proper channels 726 * (a'la fsync, or NFS COMMIT operation, etc.). 727 * 728 * While we handle mapping->wb_err with atomic operations, the f_wb_err 729 * value is protected by the f_lock since we must ensure that it reflects 730 * the latest value swapped in for this file descriptor. 731 * 732 * Return: %0 on success, negative error code otherwise. 733 */ 734 int file_check_and_advance_wb_err(struct file *file) 735 { 736 int err = 0; 737 errseq_t old = READ_ONCE(file->f_wb_err); 738 struct address_space *mapping = file->f_mapping; 739 740 /* Locklessly handle the common case where nothing has changed */ 741 if (errseq_check(&mapping->wb_err, old)) { 742 /* Something changed, must use slow path */ 743 spin_lock(&file->f_lock); 744 old = file->f_wb_err; 745 err = errseq_check_and_advance(&mapping->wb_err, 746 &file->f_wb_err); 747 trace_file_check_and_advance_wb_err(file, old); 748 spin_unlock(&file->f_lock); 749 } 750 751 /* 752 * We're mostly using this function as a drop in replacement for 753 * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect 754 * that the legacy code would have had on these flags. 755 */ 756 clear_bit(AS_EIO, &mapping->flags); 757 clear_bit(AS_ENOSPC, &mapping->flags); 758 return err; 759 } 760 EXPORT_SYMBOL(file_check_and_advance_wb_err); 761 762 /** 763 * file_write_and_wait_range - write out & wait on a file range 764 * @file: file pointing to address_space with pages 765 * @lstart: offset in bytes where the range starts 766 * @lend: offset in bytes where the range ends (inclusive) 767 * 768 * Write out and wait upon file offsets lstart->lend, inclusive. 769 * 770 * Note that @lend is inclusive (describes the last byte to be written) so 771 * that this function can be used to write to the very end-of-file (end = -1). 772 * 773 * After writing out and waiting on the data, we check and advance the 774 * f_wb_err cursor to the latest value, and return any errors detected there. 775 * 776 * Return: %0 on success, negative error code otherwise. 777 */ 778 int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend) 779 { 780 int err = 0, err2; 781 struct address_space *mapping = file->f_mapping; 782 783 if (lend < lstart) 784 return 0; 785 786 if (mapping_needs_writeback(mapping)) { 787 err = __filemap_fdatawrite_range(mapping, lstart, lend, 788 WB_SYNC_ALL); 789 /* See comment of filemap_write_and_wait() */ 790 if (err != -EIO) 791 __filemap_fdatawait_range(mapping, lstart, lend); 792 } 793 err2 = file_check_and_advance_wb_err(file); 794 if (!err) 795 err = err2; 796 return err; 797 } 798 EXPORT_SYMBOL(file_write_and_wait_range); 799 800 /** 801 * replace_page_cache_folio - replace a pagecache folio with a new one 802 * @old: folio to be replaced 803 * @new: folio to replace with 804 * 805 * This function replaces a folio in the pagecache with a new one. On 806 * success it acquires the pagecache reference for the new folio and 807 * drops it for the old folio. Both the old and new folios must be 808 * locked. This function does not add the new folio to the LRU, the 809 * caller must do that. 810 * 811 * The remove + add is atomic. This function cannot fail. 812 */ 813 void replace_page_cache_folio(struct folio *old, struct folio *new) 814 { 815 struct address_space *mapping = old->mapping; 816 void (*free_folio)(struct folio *) = mapping->a_ops->free_folio; 817 pgoff_t offset = old->index; 818 XA_STATE(xas, &mapping->i_pages, offset); 819 820 VM_BUG_ON_FOLIO(!folio_test_locked(old), old); 821 VM_BUG_ON_FOLIO(!folio_test_locked(new), new); 822 VM_BUG_ON_FOLIO(new->mapping, new); 823 824 folio_get(new); 825 new->mapping = mapping; 826 new->index = offset; 827 828 mem_cgroup_replace_folio(old, new); 829 830 xas_lock_irq(&xas); 831 xas_store(&xas, new); 832 833 old->mapping = NULL; 834 /* hugetlb pages do not participate in page cache accounting. */ 835 if (!folio_test_hugetlb(old)) 836 __lruvec_stat_sub_folio(old, NR_FILE_PAGES); 837 if (!folio_test_hugetlb(new)) 838 __lruvec_stat_add_folio(new, NR_FILE_PAGES); 839 if (folio_test_swapbacked(old)) 840 __lruvec_stat_sub_folio(old, NR_SHMEM); 841 if (folio_test_swapbacked(new)) 842 __lruvec_stat_add_folio(new, NR_SHMEM); 843 xas_unlock_irq(&xas); 844 if (free_folio) 845 free_folio(old); 846 folio_put(old); 847 } 848 EXPORT_SYMBOL_GPL(replace_page_cache_folio); 849 850 noinline int __filemap_add_folio(struct address_space *mapping, 851 struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp) 852 { 853 XA_STATE(xas, &mapping->i_pages, index); 854 void *alloced_shadow = NULL; 855 int alloced_order = 0; 856 bool huge; 857 long nr; 858 859 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 860 VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio); 861 mapping_set_update(&xas, mapping); 862 863 VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio); 864 xas_set_order(&xas, index, folio_order(folio)); 865 huge = folio_test_hugetlb(folio); 866 nr = folio_nr_pages(folio); 867 868 gfp &= GFP_RECLAIM_MASK; 869 folio_ref_add(folio, nr); 870 folio->mapping = mapping; 871 folio->index = xas.xa_index; 872 873 for (;;) { 874 int order = -1, split_order = 0; 875 void *entry, *old = NULL; 876 877 xas_lock_irq(&xas); 878 xas_for_each_conflict(&xas, entry) { 879 old = entry; 880 if (!xa_is_value(entry)) { 881 xas_set_err(&xas, -EEXIST); 882 goto unlock; 883 } 884 /* 885 * If a larger entry exists, 886 * it will be the first and only entry iterated. 887 */ 888 if (order == -1) 889 order = xas_get_order(&xas); 890 } 891 892 /* entry may have changed before we re-acquire the lock */ 893 if (alloced_order && (old != alloced_shadow || order != alloced_order)) { 894 xas_destroy(&xas); 895 alloced_order = 0; 896 } 897 898 if (old) { 899 if (order > 0 && order > folio_order(folio)) { 900 /* How to handle large swap entries? */ 901 BUG_ON(shmem_mapping(mapping)); 902 if (!alloced_order) { 903 split_order = order; 904 goto unlock; 905 } 906 xas_split(&xas, old, order); 907 xas_reset(&xas); 908 } 909 if (shadowp) 910 *shadowp = old; 911 } 912 913 xas_store(&xas, folio); 914 if (xas_error(&xas)) 915 goto unlock; 916 917 mapping->nrpages += nr; 918 919 /* hugetlb pages do not participate in page cache accounting */ 920 if (!huge) { 921 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr); 922 if (folio_test_pmd_mappable(folio)) 923 __lruvec_stat_mod_folio(folio, 924 NR_FILE_THPS, nr); 925 } 926 927 unlock: 928 xas_unlock_irq(&xas); 929 930 /* split needed, alloc here and retry. */ 931 if (split_order) { 932 xas_split_alloc(&xas, old, split_order, gfp); 933 if (xas_error(&xas)) 934 goto error; 935 alloced_shadow = old; 936 alloced_order = split_order; 937 xas_reset(&xas); 938 continue; 939 } 940 941 if (!xas_nomem(&xas, gfp)) 942 break; 943 } 944 945 if (xas_error(&xas)) 946 goto error; 947 948 trace_mm_filemap_add_to_page_cache(folio); 949 return 0; 950 error: 951 folio->mapping = NULL; 952 /* Leave page->index set: truncation relies upon it */ 953 folio_put_refs(folio, nr); 954 return xas_error(&xas); 955 } 956 ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO); 957 958 int filemap_add_folio(struct address_space *mapping, struct folio *folio, 959 pgoff_t index, gfp_t gfp) 960 { 961 void *shadow = NULL; 962 int ret; 963 964 ret = mem_cgroup_charge(folio, NULL, gfp); 965 if (ret) 966 return ret; 967 968 __folio_set_locked(folio); 969 ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow); 970 if (unlikely(ret)) { 971 mem_cgroup_uncharge(folio); 972 __folio_clear_locked(folio); 973 } else { 974 /* 975 * The folio might have been evicted from cache only 976 * recently, in which case it should be activated like 977 * any other repeatedly accessed folio. 978 * The exception is folios getting rewritten; evicting other 979 * data from the working set, only to cache data that will 980 * get overwritten with something else, is a waste of memory. 981 */ 982 WARN_ON_ONCE(folio_test_active(folio)); 983 if (!(gfp & __GFP_WRITE) && shadow) 984 workingset_refault(folio, shadow); 985 folio_add_lru(folio); 986 } 987 return ret; 988 } 989 EXPORT_SYMBOL_GPL(filemap_add_folio); 990 991 #ifdef CONFIG_NUMA 992 struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) 993 { 994 int n; 995 struct folio *folio; 996 997 if (cpuset_do_page_mem_spread()) { 998 unsigned int cpuset_mems_cookie; 999 do { 1000 cpuset_mems_cookie = read_mems_allowed_begin(); 1001 n = cpuset_mem_spread_node(); 1002 folio = __folio_alloc_node_noprof(gfp, order, n); 1003 } while (!folio && read_mems_allowed_retry(cpuset_mems_cookie)); 1004 1005 return folio; 1006 } 1007 return folio_alloc_noprof(gfp, order); 1008 } 1009 EXPORT_SYMBOL(filemap_alloc_folio_noprof); 1010 #endif 1011 1012 /* 1013 * filemap_invalidate_lock_two - lock invalidate_lock for two mappings 1014 * 1015 * Lock exclusively invalidate_lock of any passed mapping that is not NULL. 1016 * 1017 * @mapping1: the first mapping to lock 1018 * @mapping2: the second mapping to lock 1019 */ 1020 void filemap_invalidate_lock_two(struct address_space *mapping1, 1021 struct address_space *mapping2) 1022 { 1023 if (mapping1 > mapping2) 1024 swap(mapping1, mapping2); 1025 if (mapping1) 1026 down_write(&mapping1->invalidate_lock); 1027 if (mapping2 && mapping1 != mapping2) 1028 down_write_nested(&mapping2->invalidate_lock, 1); 1029 } 1030 EXPORT_SYMBOL(filemap_invalidate_lock_two); 1031 1032 /* 1033 * filemap_invalidate_unlock_two - unlock invalidate_lock for two mappings 1034 * 1035 * Unlock exclusive invalidate_lock of any passed mapping that is not NULL. 1036 * 1037 * @mapping1: the first mapping to unlock 1038 * @mapping2: the second mapping to unlock 1039 */ 1040 void filemap_invalidate_unlock_two(struct address_space *mapping1, 1041 struct address_space *mapping2) 1042 { 1043 if (mapping1) 1044 up_write(&mapping1->invalidate_lock); 1045 if (mapping2 && mapping1 != mapping2) 1046 up_write(&mapping2->invalidate_lock); 1047 } 1048 EXPORT_SYMBOL(filemap_invalidate_unlock_two); 1049 1050 /* 1051 * In order to wait for pages to become available there must be 1052 * waitqueues associated with pages. By using a hash table of 1053 * waitqueues where the bucket discipline is to maintain all 1054 * waiters on the same queue and wake all when any of the pages 1055 * become available, and for the woken contexts to check to be 1056 * sure the appropriate page became available, this saves space 1057 * at a cost of "thundering herd" phenomena during rare hash 1058 * collisions. 1059 */ 1060 #define PAGE_WAIT_TABLE_BITS 8 1061 #define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS) 1062 static wait_queue_head_t folio_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned; 1063 1064 static wait_queue_head_t *folio_waitqueue(struct folio *folio) 1065 { 1066 return &folio_wait_table[hash_ptr(folio, PAGE_WAIT_TABLE_BITS)]; 1067 } 1068 1069 void __init pagecache_init(void) 1070 { 1071 int i; 1072 1073 for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++) 1074 init_waitqueue_head(&folio_wait_table[i]); 1075 1076 page_writeback_init(); 1077 } 1078 1079 /* 1080 * The page wait code treats the "wait->flags" somewhat unusually, because 1081 * we have multiple different kinds of waits, not just the usual "exclusive" 1082 * one. 1083 * 1084 * We have: 1085 * 1086 * (a) no special bits set: 1087 * 1088 * We're just waiting for the bit to be released, and when a waker 1089 * calls the wakeup function, we set WQ_FLAG_WOKEN and wake it up, 1090 * and remove it from the wait queue. 1091 * 1092 * Simple and straightforward. 1093 * 1094 * (b) WQ_FLAG_EXCLUSIVE: 1095 * 1096 * The waiter is waiting to get the lock, and only one waiter should 1097 * be woken up to avoid any thundering herd behavior. We'll set the 1098 * WQ_FLAG_WOKEN bit, wake it up, and remove it from the wait queue. 1099 * 1100 * This is the traditional exclusive wait. 1101 * 1102 * (c) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM: 1103 * 1104 * The waiter is waiting to get the bit, and additionally wants the 1105 * lock to be transferred to it for fair lock behavior. If the lock 1106 * cannot be taken, we stop walking the wait queue without waking 1107 * the waiter. 1108 * 1109 * This is the "fair lock handoff" case, and in addition to setting 1110 * WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to let the waiter easily see 1111 * that it now has the lock. 1112 */ 1113 static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg) 1114 { 1115 unsigned int flags; 1116 struct wait_page_key *key = arg; 1117 struct wait_page_queue *wait_page 1118 = container_of(wait, struct wait_page_queue, wait); 1119 1120 if (!wake_page_match(wait_page, key)) 1121 return 0; 1122 1123 /* 1124 * If it's a lock handoff wait, we get the bit for it, and 1125 * stop walking (and do not wake it up) if we can't. 1126 */ 1127 flags = wait->flags; 1128 if (flags & WQ_FLAG_EXCLUSIVE) { 1129 if (test_bit(key->bit_nr, &key->folio->flags)) 1130 return -1; 1131 if (flags & WQ_FLAG_CUSTOM) { 1132 if (test_and_set_bit(key->bit_nr, &key->folio->flags)) 1133 return -1; 1134 flags |= WQ_FLAG_DONE; 1135 } 1136 } 1137 1138 /* 1139 * We are holding the wait-queue lock, but the waiter that 1140 * is waiting for this will be checking the flags without 1141 * any locking. 1142 * 1143 * So update the flags atomically, and wake up the waiter 1144 * afterwards to avoid any races. This store-release pairs 1145 * with the load-acquire in folio_wait_bit_common(). 1146 */ 1147 smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN); 1148 wake_up_state(wait->private, mode); 1149 1150 /* 1151 * Ok, we have successfully done what we're waiting for, 1152 * and we can unconditionally remove the wait entry. 1153 * 1154 * Note that this pairs with the "finish_wait()" in the 1155 * waiter, and has to be the absolute last thing we do. 1156 * After this list_del_init(&wait->entry) the wait entry 1157 * might be de-allocated and the process might even have 1158 * exited. 1159 */ 1160 list_del_init_careful(&wait->entry); 1161 return (flags & WQ_FLAG_EXCLUSIVE) != 0; 1162 } 1163 1164 static void folio_wake_bit(struct folio *folio, int bit_nr) 1165 { 1166 wait_queue_head_t *q = folio_waitqueue(folio); 1167 struct wait_page_key key; 1168 unsigned long flags; 1169 1170 key.folio = folio; 1171 key.bit_nr = bit_nr; 1172 key.page_match = 0; 1173 1174 spin_lock_irqsave(&q->lock, flags); 1175 __wake_up_locked_key(q, TASK_NORMAL, &key); 1176 1177 /* 1178 * It's possible to miss clearing waiters here, when we woke our page 1179 * waiters, but the hashed waitqueue has waiters for other pages on it. 1180 * That's okay, it's a rare case. The next waker will clear it. 1181 * 1182 * Note that, depending on the page pool (buddy, hugetlb, ZONE_DEVICE, 1183 * other), the flag may be cleared in the course of freeing the page; 1184 * but that is not required for correctness. 1185 */ 1186 if (!waitqueue_active(q) || !key.page_match) 1187 folio_clear_waiters(folio); 1188 1189 spin_unlock_irqrestore(&q->lock, flags); 1190 } 1191 1192 /* 1193 * A choice of three behaviors for folio_wait_bit_common(): 1194 */ 1195 enum behavior { 1196 EXCLUSIVE, /* Hold ref to page and take the bit when woken, like 1197 * __folio_lock() waiting on then setting PG_locked. 1198 */ 1199 SHARED, /* Hold ref to page and check the bit when woken, like 1200 * folio_wait_writeback() waiting on PG_writeback. 1201 */ 1202 DROP, /* Drop ref to page before wait, no check when woken, 1203 * like folio_put_wait_locked() on PG_locked. 1204 */ 1205 }; 1206 1207 /* 1208 * Attempt to check (or get) the folio flag, and mark us done 1209 * if successful. 1210 */ 1211 static inline bool folio_trylock_flag(struct folio *folio, int bit_nr, 1212 struct wait_queue_entry *wait) 1213 { 1214 if (wait->flags & WQ_FLAG_EXCLUSIVE) { 1215 if (test_and_set_bit(bit_nr, &folio->flags)) 1216 return false; 1217 } else if (test_bit(bit_nr, &folio->flags)) 1218 return false; 1219 1220 wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE; 1221 return true; 1222 } 1223 1224 /* How many times do we accept lock stealing from under a waiter? */ 1225 int sysctl_page_lock_unfairness = 5; 1226 1227 static inline int folio_wait_bit_common(struct folio *folio, int bit_nr, 1228 int state, enum behavior behavior) 1229 { 1230 wait_queue_head_t *q = folio_waitqueue(folio); 1231 int unfairness = sysctl_page_lock_unfairness; 1232 struct wait_page_queue wait_page; 1233 wait_queue_entry_t *wait = &wait_page.wait; 1234 bool thrashing = false; 1235 unsigned long pflags; 1236 bool in_thrashing; 1237 1238 if (bit_nr == PG_locked && 1239 !folio_test_uptodate(folio) && folio_test_workingset(folio)) { 1240 delayacct_thrashing_start(&in_thrashing); 1241 psi_memstall_enter(&pflags); 1242 thrashing = true; 1243 } 1244 1245 init_wait(wait); 1246 wait->func = wake_page_function; 1247 wait_page.folio = folio; 1248 wait_page.bit_nr = bit_nr; 1249 1250 repeat: 1251 wait->flags = 0; 1252 if (behavior == EXCLUSIVE) { 1253 wait->flags = WQ_FLAG_EXCLUSIVE; 1254 if (--unfairness < 0) 1255 wait->flags |= WQ_FLAG_CUSTOM; 1256 } 1257 1258 /* 1259 * Do one last check whether we can get the 1260 * page bit synchronously. 1261 * 1262 * Do the folio_set_waiters() marking before that 1263 * to let any waker we _just_ missed know they 1264 * need to wake us up (otherwise they'll never 1265 * even go to the slow case that looks at the 1266 * page queue), and add ourselves to the wait 1267 * queue if we need to sleep. 1268 * 1269 * This part needs to be done under the queue 1270 * lock to avoid races. 1271 */ 1272 spin_lock_irq(&q->lock); 1273 folio_set_waiters(folio); 1274 if (!folio_trylock_flag(folio, bit_nr, wait)) 1275 __add_wait_queue_entry_tail(q, wait); 1276 spin_unlock_irq(&q->lock); 1277 1278 /* 1279 * From now on, all the logic will be based on 1280 * the WQ_FLAG_WOKEN and WQ_FLAG_DONE flag, to 1281 * see whether the page bit testing has already 1282 * been done by the wake function. 1283 * 1284 * We can drop our reference to the folio. 1285 */ 1286 if (behavior == DROP) 1287 folio_put(folio); 1288 1289 /* 1290 * Note that until the "finish_wait()", or until 1291 * we see the WQ_FLAG_WOKEN flag, we need to 1292 * be very careful with the 'wait->flags', because 1293 * we may race with a waker that sets them. 1294 */ 1295 for (;;) { 1296 unsigned int flags; 1297 1298 set_current_state(state); 1299 1300 /* Loop until we've been woken or interrupted */ 1301 flags = smp_load_acquire(&wait->flags); 1302 if (!(flags & WQ_FLAG_WOKEN)) { 1303 if (signal_pending_state(state, current)) 1304 break; 1305 1306 io_schedule(); 1307 continue; 1308 } 1309 1310 /* If we were non-exclusive, we're done */ 1311 if (behavior != EXCLUSIVE) 1312 break; 1313 1314 /* If the waker got the lock for us, we're done */ 1315 if (flags & WQ_FLAG_DONE) 1316 break; 1317 1318 /* 1319 * Otherwise, if we're getting the lock, we need to 1320 * try to get it ourselves. 1321 * 1322 * And if that fails, we'll have to retry this all. 1323 */ 1324 if (unlikely(test_and_set_bit(bit_nr, folio_flags(folio, 0)))) 1325 goto repeat; 1326 1327 wait->flags |= WQ_FLAG_DONE; 1328 break; 1329 } 1330 1331 /* 1332 * If a signal happened, this 'finish_wait()' may remove the last 1333 * waiter from the wait-queues, but the folio waiters bit will remain 1334 * set. That's ok. The next wakeup will take care of it, and trying 1335 * to do it here would be difficult and prone to races. 1336 */ 1337 finish_wait(q, wait); 1338 1339 if (thrashing) { 1340 delayacct_thrashing_end(&in_thrashing); 1341 psi_memstall_leave(&pflags); 1342 } 1343 1344 /* 1345 * NOTE! The wait->flags weren't stable until we've done the 1346 * 'finish_wait()', and we could have exited the loop above due 1347 * to a signal, and had a wakeup event happen after the signal 1348 * test but before the 'finish_wait()'. 1349 * 1350 * So only after the finish_wait() can we reliably determine 1351 * if we got woken up or not, so we can now figure out the final 1352 * return value based on that state without races. 1353 * 1354 * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive 1355 * waiter, but an exclusive one requires WQ_FLAG_DONE. 1356 */ 1357 if (behavior == EXCLUSIVE) 1358 return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR; 1359 1360 return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR; 1361 } 1362 1363 #ifdef CONFIG_MIGRATION 1364 /** 1365 * migration_entry_wait_on_locked - Wait for a migration entry to be removed 1366 * @entry: migration swap entry. 1367 * @ptl: already locked ptl. This function will drop the lock. 1368 * 1369 * Wait for a migration entry referencing the given page to be removed. This is 1370 * equivalent to put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE) except 1371 * this can be called without taking a reference on the page. Instead this 1372 * should be called while holding the ptl for the migration entry referencing 1373 * the page. 1374 * 1375 * Returns after unlocking the ptl. 1376 * 1377 * This follows the same logic as folio_wait_bit_common() so see the comments 1378 * there. 1379 */ 1380 void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl) 1381 __releases(ptl) 1382 { 1383 struct wait_page_queue wait_page; 1384 wait_queue_entry_t *wait = &wait_page.wait; 1385 bool thrashing = false; 1386 unsigned long pflags; 1387 bool in_thrashing; 1388 wait_queue_head_t *q; 1389 struct folio *folio = pfn_swap_entry_folio(entry); 1390 1391 q = folio_waitqueue(folio); 1392 if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) { 1393 delayacct_thrashing_start(&in_thrashing); 1394 psi_memstall_enter(&pflags); 1395 thrashing = true; 1396 } 1397 1398 init_wait(wait); 1399 wait->func = wake_page_function; 1400 wait_page.folio = folio; 1401 wait_page.bit_nr = PG_locked; 1402 wait->flags = 0; 1403 1404 spin_lock_irq(&q->lock); 1405 folio_set_waiters(folio); 1406 if (!folio_trylock_flag(folio, PG_locked, wait)) 1407 __add_wait_queue_entry_tail(q, wait); 1408 spin_unlock_irq(&q->lock); 1409 1410 /* 1411 * If a migration entry exists for the page the migration path must hold 1412 * a valid reference to the page, and it must take the ptl to remove the 1413 * migration entry. So the page is valid until the ptl is dropped. 1414 */ 1415 spin_unlock(ptl); 1416 1417 for (;;) { 1418 unsigned int flags; 1419 1420 set_current_state(TASK_UNINTERRUPTIBLE); 1421 1422 /* Loop until we've been woken or interrupted */ 1423 flags = smp_load_acquire(&wait->flags); 1424 if (!(flags & WQ_FLAG_WOKEN)) { 1425 if (signal_pending_state(TASK_UNINTERRUPTIBLE, current)) 1426 break; 1427 1428 io_schedule(); 1429 continue; 1430 } 1431 break; 1432 } 1433 1434 finish_wait(q, wait); 1435 1436 if (thrashing) { 1437 delayacct_thrashing_end(&in_thrashing); 1438 psi_memstall_leave(&pflags); 1439 } 1440 } 1441 #endif 1442 1443 void folio_wait_bit(struct folio *folio, int bit_nr) 1444 { 1445 folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED); 1446 } 1447 EXPORT_SYMBOL(folio_wait_bit); 1448 1449 int folio_wait_bit_killable(struct folio *folio, int bit_nr) 1450 { 1451 return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED); 1452 } 1453 EXPORT_SYMBOL(folio_wait_bit_killable); 1454 1455 /** 1456 * folio_put_wait_locked - Drop a reference and wait for it to be unlocked 1457 * @folio: The folio to wait for. 1458 * @state: The sleep state (TASK_KILLABLE, TASK_UNINTERRUPTIBLE, etc). 1459 * 1460 * The caller should hold a reference on @folio. They expect the page to 1461 * become unlocked relatively soon, but do not wish to hold up migration 1462 * (for example) by holding the reference while waiting for the folio to 1463 * come unlocked. After this function returns, the caller should not 1464 * dereference @folio. 1465 * 1466 * Return: 0 if the folio was unlocked or -EINTR if interrupted by a signal. 1467 */ 1468 static int folio_put_wait_locked(struct folio *folio, int state) 1469 { 1470 return folio_wait_bit_common(folio, PG_locked, state, DROP); 1471 } 1472 1473 /** 1474 * folio_add_wait_queue - Add an arbitrary waiter to a folio's wait queue 1475 * @folio: Folio defining the wait queue of interest 1476 * @waiter: Waiter to add to the queue 1477 * 1478 * Add an arbitrary @waiter to the wait queue for the nominated @folio. 1479 */ 1480 void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter) 1481 { 1482 wait_queue_head_t *q = folio_waitqueue(folio); 1483 unsigned long flags; 1484 1485 spin_lock_irqsave(&q->lock, flags); 1486 __add_wait_queue_entry_tail(q, waiter); 1487 folio_set_waiters(folio); 1488 spin_unlock_irqrestore(&q->lock, flags); 1489 } 1490 EXPORT_SYMBOL_GPL(folio_add_wait_queue); 1491 1492 /** 1493 * folio_unlock - Unlock a locked folio. 1494 * @folio: The folio. 1495 * 1496 * Unlocks the folio and wakes up any thread sleeping on the page lock. 1497 * 1498 * Context: May be called from interrupt or process context. May not be 1499 * called from NMI context. 1500 */ 1501 void folio_unlock(struct folio *folio) 1502 { 1503 /* Bit 7 allows x86 to check the byte's sign bit */ 1504 BUILD_BUG_ON(PG_waiters != 7); 1505 BUILD_BUG_ON(PG_locked > 7); 1506 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 1507 if (folio_xor_flags_has_waiters(folio, 1 << PG_locked)) 1508 folio_wake_bit(folio, PG_locked); 1509 } 1510 EXPORT_SYMBOL(folio_unlock); 1511 1512 /** 1513 * folio_end_read - End read on a folio. 1514 * @folio: The folio. 1515 * @success: True if all reads completed successfully. 1516 * 1517 * When all reads against a folio have completed, filesystems should 1518 * call this function to let the pagecache know that no more reads 1519 * are outstanding. This will unlock the folio and wake up any thread 1520 * sleeping on the lock. The folio will also be marked uptodate if all 1521 * reads succeeded. 1522 * 1523 * Context: May be called from interrupt or process context. May not be 1524 * called from NMI context. 1525 */ 1526 void folio_end_read(struct folio *folio, bool success) 1527 { 1528 unsigned long mask = 1 << PG_locked; 1529 1530 /* Must be in bottom byte for x86 to work */ 1531 BUILD_BUG_ON(PG_uptodate > 7); 1532 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 1533 VM_BUG_ON_FOLIO(folio_test_uptodate(folio), folio); 1534 1535 if (likely(success)) 1536 mask |= 1 << PG_uptodate; 1537 if (folio_xor_flags_has_waiters(folio, mask)) 1538 folio_wake_bit(folio, PG_locked); 1539 } 1540 EXPORT_SYMBOL(folio_end_read); 1541 1542 /** 1543 * folio_end_private_2 - Clear PG_private_2 and wake any waiters. 1544 * @folio: The folio. 1545 * 1546 * Clear the PG_private_2 bit on a folio and wake up any sleepers waiting for 1547 * it. The folio reference held for PG_private_2 being set is released. 1548 * 1549 * This is, for example, used when a netfs folio is being written to a local 1550 * disk cache, thereby allowing writes to the cache for the same folio to be 1551 * serialised. 1552 */ 1553 void folio_end_private_2(struct folio *folio) 1554 { 1555 VM_BUG_ON_FOLIO(!folio_test_private_2(folio), folio); 1556 clear_bit_unlock(PG_private_2, folio_flags(folio, 0)); 1557 folio_wake_bit(folio, PG_private_2); 1558 folio_put(folio); 1559 } 1560 EXPORT_SYMBOL(folio_end_private_2); 1561 1562 /** 1563 * folio_wait_private_2 - Wait for PG_private_2 to be cleared on a folio. 1564 * @folio: The folio to wait on. 1565 * 1566 * Wait for PG_private_2 to be cleared on a folio. 1567 */ 1568 void folio_wait_private_2(struct folio *folio) 1569 { 1570 while (folio_test_private_2(folio)) 1571 folio_wait_bit(folio, PG_private_2); 1572 } 1573 EXPORT_SYMBOL(folio_wait_private_2); 1574 1575 /** 1576 * folio_wait_private_2_killable - Wait for PG_private_2 to be cleared on a folio. 1577 * @folio: The folio to wait on. 1578 * 1579 * Wait for PG_private_2 to be cleared on a folio or until a fatal signal is 1580 * received by the calling task. 1581 * 1582 * Return: 1583 * - 0 if successful. 1584 * - -EINTR if a fatal signal was encountered. 1585 */ 1586 int folio_wait_private_2_killable(struct folio *folio) 1587 { 1588 int ret = 0; 1589 1590 while (folio_test_private_2(folio)) { 1591 ret = folio_wait_bit_killable(folio, PG_private_2); 1592 if (ret < 0) 1593 break; 1594 } 1595 1596 return ret; 1597 } 1598 EXPORT_SYMBOL(folio_wait_private_2_killable); 1599 1600 /** 1601 * folio_end_writeback - End writeback against a folio. 1602 * @folio: The folio. 1603 * 1604 * The folio must actually be under writeback. 1605 * 1606 * Context: May be called from process or interrupt context. 1607 */ 1608 void folio_end_writeback(struct folio *folio) 1609 { 1610 VM_BUG_ON_FOLIO(!folio_test_writeback(folio), folio); 1611 1612 /* 1613 * folio_test_clear_reclaim() could be used here but it is an 1614 * atomic operation and overkill in this particular case. Failing 1615 * to shuffle a folio marked for immediate reclaim is too mild 1616 * a gain to justify taking an atomic operation penalty at the 1617 * end of every folio writeback. 1618 */ 1619 if (folio_test_reclaim(folio)) { 1620 folio_clear_reclaim(folio); 1621 folio_rotate_reclaimable(folio); 1622 } 1623 1624 /* 1625 * Writeback does not hold a folio reference of its own, relying 1626 * on truncation to wait for the clearing of PG_writeback. 1627 * But here we must make sure that the folio is not freed and 1628 * reused before the folio_wake_bit(). 1629 */ 1630 folio_get(folio); 1631 if (__folio_end_writeback(folio)) 1632 folio_wake_bit(folio, PG_writeback); 1633 acct_reclaim_writeback(folio); 1634 folio_put(folio); 1635 } 1636 EXPORT_SYMBOL(folio_end_writeback); 1637 1638 /** 1639 * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it. 1640 * @folio: The folio to lock 1641 */ 1642 void __folio_lock(struct folio *folio) 1643 { 1644 folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE, 1645 EXCLUSIVE); 1646 } 1647 EXPORT_SYMBOL(__folio_lock); 1648 1649 int __folio_lock_killable(struct folio *folio) 1650 { 1651 return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE, 1652 EXCLUSIVE); 1653 } 1654 EXPORT_SYMBOL_GPL(__folio_lock_killable); 1655 1656 static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait) 1657 { 1658 struct wait_queue_head *q = folio_waitqueue(folio); 1659 int ret; 1660 1661 wait->folio = folio; 1662 wait->bit_nr = PG_locked; 1663 1664 spin_lock_irq(&q->lock); 1665 __add_wait_queue_entry_tail(q, &wait->wait); 1666 folio_set_waiters(folio); 1667 ret = !folio_trylock(folio); 1668 /* 1669 * If we were successful now, we know we're still on the 1670 * waitqueue as we're still under the lock. This means it's 1671 * safe to remove and return success, we know the callback 1672 * isn't going to trigger. 1673 */ 1674 if (!ret) 1675 __remove_wait_queue(q, &wait->wait); 1676 else 1677 ret = -EIOCBQUEUED; 1678 spin_unlock_irq(&q->lock); 1679 return ret; 1680 } 1681 1682 /* 1683 * Return values: 1684 * 0 - folio is locked. 1685 * non-zero - folio is not locked. 1686 * mmap_lock or per-VMA lock has been released (mmap_read_unlock() or 1687 * vma_end_read()), unless flags had both FAULT_FLAG_ALLOW_RETRY and 1688 * FAULT_FLAG_RETRY_NOWAIT set, in which case the lock is still held. 1689 * 1690 * If neither ALLOW_RETRY nor KILLABLE are set, will always return 0 1691 * with the folio locked and the mmap_lock/per-VMA lock is left unperturbed. 1692 */ 1693 vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf) 1694 { 1695 unsigned int flags = vmf->flags; 1696 1697 if (fault_flag_allow_retry_first(flags)) { 1698 /* 1699 * CAUTION! In this case, mmap_lock/per-VMA lock is not 1700 * released even though returning VM_FAULT_RETRY. 1701 */ 1702 if (flags & FAULT_FLAG_RETRY_NOWAIT) 1703 return VM_FAULT_RETRY; 1704 1705 release_fault_lock(vmf); 1706 if (flags & FAULT_FLAG_KILLABLE) 1707 folio_wait_locked_killable(folio); 1708 else 1709 folio_wait_locked(folio); 1710 return VM_FAULT_RETRY; 1711 } 1712 if (flags & FAULT_FLAG_KILLABLE) { 1713 bool ret; 1714 1715 ret = __folio_lock_killable(folio); 1716 if (ret) { 1717 release_fault_lock(vmf); 1718 return VM_FAULT_RETRY; 1719 } 1720 } else { 1721 __folio_lock(folio); 1722 } 1723 1724 return 0; 1725 } 1726 1727 /** 1728 * page_cache_next_miss() - Find the next gap in the page cache. 1729 * @mapping: Mapping. 1730 * @index: Index. 1731 * @max_scan: Maximum range to search. 1732 * 1733 * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the 1734 * gap with the lowest index. 1735 * 1736 * This function may be called under the rcu_read_lock. However, this will 1737 * not atomically search a snapshot of the cache at a single point in time. 1738 * For example, if a gap is created at index 5, then subsequently a gap is 1739 * created at index 10, page_cache_next_miss covering both indices may 1740 * return 10 if called under the rcu_read_lock. 1741 * 1742 * Return: The index of the gap if found, otherwise an index outside the 1743 * range specified (in which case 'return - index >= max_scan' will be true). 1744 * In the rare case of index wrap-around, 0 will be returned. 1745 */ 1746 pgoff_t page_cache_next_miss(struct address_space *mapping, 1747 pgoff_t index, unsigned long max_scan) 1748 { 1749 XA_STATE(xas, &mapping->i_pages, index); 1750 1751 while (max_scan--) { 1752 void *entry = xas_next(&xas); 1753 if (!entry || xa_is_value(entry)) 1754 return xas.xa_index; 1755 if (xas.xa_index == 0) 1756 return 0; 1757 } 1758 1759 return index + max_scan; 1760 } 1761 EXPORT_SYMBOL(page_cache_next_miss); 1762 1763 /** 1764 * page_cache_prev_miss() - Find the previous gap in the page cache. 1765 * @mapping: Mapping. 1766 * @index: Index. 1767 * @max_scan: Maximum range to search. 1768 * 1769 * Search the range [max(index - max_scan + 1, 0), index] for the 1770 * gap with the highest index. 1771 * 1772 * This function may be called under the rcu_read_lock. However, this will 1773 * not atomically search a snapshot of the cache at a single point in time. 1774 * For example, if a gap is created at index 10, then subsequently a gap is 1775 * created at index 5, page_cache_prev_miss() covering both indices may 1776 * return 5 if called under the rcu_read_lock. 1777 * 1778 * Return: The index of the gap if found, otherwise an index outside the 1779 * range specified (in which case 'index - return >= max_scan' will be true). 1780 * In the rare case of wrap-around, ULONG_MAX will be returned. 1781 */ 1782 pgoff_t page_cache_prev_miss(struct address_space *mapping, 1783 pgoff_t index, unsigned long max_scan) 1784 { 1785 XA_STATE(xas, &mapping->i_pages, index); 1786 1787 while (max_scan--) { 1788 void *entry = xas_prev(&xas); 1789 if (!entry || xa_is_value(entry)) 1790 break; 1791 if (xas.xa_index == ULONG_MAX) 1792 break; 1793 } 1794 1795 return xas.xa_index; 1796 } 1797 EXPORT_SYMBOL(page_cache_prev_miss); 1798 1799 /* 1800 * Lockless page cache protocol: 1801 * On the lookup side: 1802 * 1. Load the folio from i_pages 1803 * 2. Increment the refcount if it's not zero 1804 * 3. If the folio is not found by xas_reload(), put the refcount and retry 1805 * 1806 * On the removal side: 1807 * A. Freeze the page (by zeroing the refcount if nobody else has a reference) 1808 * B. Remove the page from i_pages 1809 * C. Return the page to the page allocator 1810 * 1811 * This means that any page may have its reference count temporarily 1812 * increased by a speculative page cache (or GUP-fast) lookup as it can 1813 * be allocated by another user before the RCU grace period expires. 1814 * Because the refcount temporarily acquired here may end up being the 1815 * last refcount on the page, any page allocation must be freeable by 1816 * folio_put(). 1817 */ 1818 1819 /* 1820 * filemap_get_entry - Get a page cache entry. 1821 * @mapping: the address_space to search 1822 * @index: The page cache index. 1823 * 1824 * Looks up the page cache entry at @mapping & @index. If it is a folio, 1825 * it is returned with an increased refcount. If it is a shadow entry 1826 * of a previously evicted folio, or a swap entry from shmem/tmpfs, 1827 * it is returned without further action. 1828 * 1829 * Return: The folio, swap or shadow entry, %NULL if nothing is found. 1830 */ 1831 void *filemap_get_entry(struct address_space *mapping, pgoff_t index) 1832 { 1833 XA_STATE(xas, &mapping->i_pages, index); 1834 struct folio *folio; 1835 1836 rcu_read_lock(); 1837 repeat: 1838 xas_reset(&xas); 1839 folio = xas_load(&xas); 1840 if (xas_retry(&xas, folio)) 1841 goto repeat; 1842 /* 1843 * A shadow entry of a recently evicted page, or a swap entry from 1844 * shmem/tmpfs. Return it without attempting to raise page count. 1845 */ 1846 if (!folio || xa_is_value(folio)) 1847 goto out; 1848 1849 if (!folio_try_get(folio)) 1850 goto repeat; 1851 1852 if (unlikely(folio != xas_reload(&xas))) { 1853 folio_put(folio); 1854 goto repeat; 1855 } 1856 out: 1857 rcu_read_unlock(); 1858 1859 return folio; 1860 } 1861 1862 /** 1863 * __filemap_get_folio - Find and get a reference to a folio. 1864 * @mapping: The address_space to search. 1865 * @index: The page index. 1866 * @fgp_flags: %FGP flags modify how the folio is returned. 1867 * @gfp: Memory allocation flags to use if %FGP_CREAT is specified. 1868 * 1869 * Looks up the page cache entry at @mapping & @index. 1870 * 1871 * If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even 1872 * if the %GFP flags specified for %FGP_CREAT are atomic. 1873 * 1874 * If this function returns a folio, it is returned with an increased refcount. 1875 * 1876 * Return: The found folio or an ERR_PTR() otherwise. 1877 */ 1878 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, 1879 fgf_t fgp_flags, gfp_t gfp) 1880 { 1881 struct folio *folio; 1882 1883 repeat: 1884 folio = filemap_get_entry(mapping, index); 1885 if (xa_is_value(folio)) 1886 folio = NULL; 1887 if (!folio) 1888 goto no_page; 1889 1890 if (fgp_flags & FGP_LOCK) { 1891 if (fgp_flags & FGP_NOWAIT) { 1892 if (!folio_trylock(folio)) { 1893 folio_put(folio); 1894 return ERR_PTR(-EAGAIN); 1895 } 1896 } else { 1897 folio_lock(folio); 1898 } 1899 1900 /* Has the page been truncated? */ 1901 if (unlikely(folio->mapping != mapping)) { 1902 folio_unlock(folio); 1903 folio_put(folio); 1904 goto repeat; 1905 } 1906 VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio); 1907 } 1908 1909 if (fgp_flags & FGP_ACCESSED) 1910 folio_mark_accessed(folio); 1911 else if (fgp_flags & FGP_WRITE) { 1912 /* Clear idle flag for buffer write */ 1913 if (folio_test_idle(folio)) 1914 folio_clear_idle(folio); 1915 } 1916 1917 if (fgp_flags & FGP_STABLE) 1918 folio_wait_stable(folio); 1919 no_page: 1920 if (!folio && (fgp_flags & FGP_CREAT)) { 1921 unsigned order = FGF_GET_ORDER(fgp_flags); 1922 int err; 1923 1924 if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping)) 1925 gfp |= __GFP_WRITE; 1926 if (fgp_flags & FGP_NOFS) 1927 gfp &= ~__GFP_FS; 1928 if (fgp_flags & FGP_NOWAIT) { 1929 gfp &= ~GFP_KERNEL; 1930 gfp |= GFP_NOWAIT | __GFP_NOWARN; 1931 } 1932 if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP)))) 1933 fgp_flags |= FGP_LOCK; 1934 1935 if (!mapping_large_folio_support(mapping)) 1936 order = 0; 1937 if (order > MAX_PAGECACHE_ORDER) 1938 order = MAX_PAGECACHE_ORDER; 1939 /* If we're not aligned, allocate a smaller folio */ 1940 if (index & ((1UL << order) - 1)) 1941 order = __ffs(index); 1942 1943 do { 1944 gfp_t alloc_gfp = gfp; 1945 1946 err = -ENOMEM; 1947 if (order > 0) 1948 alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN; 1949 folio = filemap_alloc_folio(alloc_gfp, order); 1950 if (!folio) 1951 continue; 1952 1953 /* Init accessed so avoid atomic mark_page_accessed later */ 1954 if (fgp_flags & FGP_ACCESSED) 1955 __folio_set_referenced(folio); 1956 1957 err = filemap_add_folio(mapping, folio, index, gfp); 1958 if (!err) 1959 break; 1960 folio_put(folio); 1961 folio = NULL; 1962 } while (order-- > 0); 1963 1964 if (err == -EEXIST) 1965 goto repeat; 1966 if (err) 1967 return ERR_PTR(err); 1968 /* 1969 * filemap_add_folio locks the page, and for mmap 1970 * we expect an unlocked page. 1971 */ 1972 if (folio && (fgp_flags & FGP_FOR_MMAP)) 1973 folio_unlock(folio); 1974 } 1975 1976 if (!folio) 1977 return ERR_PTR(-ENOENT); 1978 return folio; 1979 } 1980 EXPORT_SYMBOL(__filemap_get_folio); 1981 1982 static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max, 1983 xa_mark_t mark) 1984 { 1985 struct folio *folio; 1986 1987 retry: 1988 if (mark == XA_PRESENT) 1989 folio = xas_find(xas, max); 1990 else 1991 folio = xas_find_marked(xas, max, mark); 1992 1993 if (xas_retry(xas, folio)) 1994 goto retry; 1995 /* 1996 * A shadow entry of a recently evicted page, a swap 1997 * entry from shmem/tmpfs or a DAX entry. Return it 1998 * without attempting to raise page count. 1999 */ 2000 if (!folio || xa_is_value(folio)) 2001 return folio; 2002 2003 if (!folio_try_get(folio)) 2004 goto reset; 2005 2006 if (unlikely(folio != xas_reload(xas))) { 2007 folio_put(folio); 2008 goto reset; 2009 } 2010 2011 return folio; 2012 reset: 2013 xas_reset(xas); 2014 goto retry; 2015 } 2016 2017 /** 2018 * find_get_entries - gang pagecache lookup 2019 * @mapping: The address_space to search 2020 * @start: The starting page cache index 2021 * @end: The final page index (inclusive). 2022 * @fbatch: Where the resulting entries are placed. 2023 * @indices: The cache indices corresponding to the entries in @entries 2024 * 2025 * find_get_entries() will search for and return a batch of entries in 2026 * the mapping. The entries are placed in @fbatch. find_get_entries() 2027 * takes a reference on any actual folios it returns. 2028 * 2029 * The entries have ascending indexes. The indices may not be consecutive 2030 * due to not-present entries or large folios. 2031 * 2032 * Any shadow entries of evicted folios, or swap entries from 2033 * shmem/tmpfs, are included in the returned array. 2034 * 2035 * Return: The number of entries which were found. 2036 */ 2037 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start, 2038 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices) 2039 { 2040 XA_STATE(xas, &mapping->i_pages, *start); 2041 struct folio *folio; 2042 2043 rcu_read_lock(); 2044 while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) { 2045 indices[fbatch->nr] = xas.xa_index; 2046 if (!folio_batch_add(fbatch, folio)) 2047 break; 2048 } 2049 rcu_read_unlock(); 2050 2051 if (folio_batch_count(fbatch)) { 2052 unsigned long nr = 1; 2053 int idx = folio_batch_count(fbatch) - 1; 2054 2055 folio = fbatch->folios[idx]; 2056 if (!xa_is_value(folio)) 2057 nr = folio_nr_pages(folio); 2058 *start = indices[idx] + nr; 2059 } 2060 return folio_batch_count(fbatch); 2061 } 2062 2063 /** 2064 * find_lock_entries - Find a batch of pagecache entries. 2065 * @mapping: The address_space to search. 2066 * @start: The starting page cache index. 2067 * @end: The final page index (inclusive). 2068 * @fbatch: Where the resulting entries are placed. 2069 * @indices: The cache indices of the entries in @fbatch. 2070 * 2071 * find_lock_entries() will return a batch of entries from @mapping. 2072 * Swap, shadow and DAX entries are included. Folios are returned 2073 * locked and with an incremented refcount. Folios which are locked 2074 * by somebody else or under writeback are skipped. Folios which are 2075 * partially outside the range are not returned. 2076 * 2077 * The entries have ascending indexes. The indices may not be consecutive 2078 * due to not-present entries, large folios, folios which could not be 2079 * locked or folios under writeback. 2080 * 2081 * Return: The number of entries which were found. 2082 */ 2083 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, 2084 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices) 2085 { 2086 XA_STATE(xas, &mapping->i_pages, *start); 2087 struct folio *folio; 2088 2089 rcu_read_lock(); 2090 while ((folio = find_get_entry(&xas, end, XA_PRESENT))) { 2091 if (!xa_is_value(folio)) { 2092 if (folio->index < *start) 2093 goto put; 2094 if (folio_next_index(folio) - 1 > end) 2095 goto put; 2096 if (!folio_trylock(folio)) 2097 goto put; 2098 if (folio->mapping != mapping || 2099 folio_test_writeback(folio)) 2100 goto unlock; 2101 VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index), 2102 folio); 2103 } 2104 indices[fbatch->nr] = xas.xa_index; 2105 if (!folio_batch_add(fbatch, folio)) 2106 break; 2107 continue; 2108 unlock: 2109 folio_unlock(folio); 2110 put: 2111 folio_put(folio); 2112 } 2113 rcu_read_unlock(); 2114 2115 if (folio_batch_count(fbatch)) { 2116 unsigned long nr = 1; 2117 int idx = folio_batch_count(fbatch) - 1; 2118 2119 folio = fbatch->folios[idx]; 2120 if (!xa_is_value(folio)) 2121 nr = folio_nr_pages(folio); 2122 *start = indices[idx] + nr; 2123 } 2124 return folio_batch_count(fbatch); 2125 } 2126 2127 /** 2128 * filemap_get_folios - Get a batch of folios 2129 * @mapping: The address_space to search 2130 * @start: The starting page index 2131 * @end: The final page index (inclusive) 2132 * @fbatch: The batch to fill. 2133 * 2134 * Search for and return a batch of folios in the mapping starting at 2135 * index @start and up to index @end (inclusive). The folios are returned 2136 * in @fbatch with an elevated reference count. 2137 * 2138 * Return: The number of folios which were found. 2139 * We also update @start to index the next folio for the traversal. 2140 */ 2141 unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start, 2142 pgoff_t end, struct folio_batch *fbatch) 2143 { 2144 return filemap_get_folios_tag(mapping, start, end, XA_PRESENT, fbatch); 2145 } 2146 EXPORT_SYMBOL(filemap_get_folios); 2147 2148 /** 2149 * filemap_get_folios_contig - Get a batch of contiguous folios 2150 * @mapping: The address_space to search 2151 * @start: The starting page index 2152 * @end: The final page index (inclusive) 2153 * @fbatch: The batch to fill 2154 * 2155 * filemap_get_folios_contig() works exactly like filemap_get_folios(), 2156 * except the returned folios are guaranteed to be contiguous. This may 2157 * not return all contiguous folios if the batch gets filled up. 2158 * 2159 * Return: The number of folios found. 2160 * Also update @start to be positioned for traversal of the next folio. 2161 */ 2162 2163 unsigned filemap_get_folios_contig(struct address_space *mapping, 2164 pgoff_t *start, pgoff_t end, struct folio_batch *fbatch) 2165 { 2166 XA_STATE(xas, &mapping->i_pages, *start); 2167 unsigned long nr; 2168 struct folio *folio; 2169 2170 rcu_read_lock(); 2171 2172 for (folio = xas_load(&xas); folio && xas.xa_index <= end; 2173 folio = xas_next(&xas)) { 2174 if (xas_retry(&xas, folio)) 2175 continue; 2176 /* 2177 * If the entry has been swapped out, we can stop looking. 2178 * No current caller is looking for DAX entries. 2179 */ 2180 if (xa_is_value(folio)) 2181 goto update_start; 2182 2183 if (!folio_try_get(folio)) 2184 goto retry; 2185 2186 if (unlikely(folio != xas_reload(&xas))) 2187 goto put_folio; 2188 2189 if (!folio_batch_add(fbatch, folio)) { 2190 nr = folio_nr_pages(folio); 2191 *start = folio->index + nr; 2192 goto out; 2193 } 2194 continue; 2195 put_folio: 2196 folio_put(folio); 2197 2198 retry: 2199 xas_reset(&xas); 2200 } 2201 2202 update_start: 2203 nr = folio_batch_count(fbatch); 2204 2205 if (nr) { 2206 folio = fbatch->folios[nr - 1]; 2207 *start = folio_next_index(folio); 2208 } 2209 out: 2210 rcu_read_unlock(); 2211 return folio_batch_count(fbatch); 2212 } 2213 EXPORT_SYMBOL(filemap_get_folios_contig); 2214 2215 /** 2216 * filemap_get_folios_tag - Get a batch of folios matching @tag 2217 * @mapping: The address_space to search 2218 * @start: The starting page index 2219 * @end: The final page index (inclusive) 2220 * @tag: The tag index 2221 * @fbatch: The batch to fill 2222 * 2223 * The first folio may start before @start; if it does, it will contain 2224 * @start. The final folio may extend beyond @end; if it does, it will 2225 * contain @end. The folios have ascending indices. There may be gaps 2226 * between the folios if there are indices which have no folio in the 2227 * page cache. If folios are added to or removed from the page cache 2228 * while this is running, they may or may not be found by this call. 2229 * Only returns folios that are tagged with @tag. 2230 * 2231 * Return: The number of folios found. 2232 * Also update @start to index the next folio for traversal. 2233 */ 2234 unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start, 2235 pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch) 2236 { 2237 XA_STATE(xas, &mapping->i_pages, *start); 2238 struct folio *folio; 2239 2240 rcu_read_lock(); 2241 while ((folio = find_get_entry(&xas, end, tag)) != NULL) { 2242 /* 2243 * Shadow entries should never be tagged, but this iteration 2244 * is lockless so there is a window for page reclaim to evict 2245 * a page we saw tagged. Skip over it. 2246 */ 2247 if (xa_is_value(folio)) 2248 continue; 2249 if (!folio_batch_add(fbatch, folio)) { 2250 unsigned long nr = folio_nr_pages(folio); 2251 *start = folio->index + nr; 2252 goto out; 2253 } 2254 } 2255 /* 2256 * We come here when there is no page beyond @end. We take care to not 2257 * overflow the index @start as it confuses some of the callers. This 2258 * breaks the iteration when there is a page at index -1 but that is 2259 * already broke anyway. 2260 */ 2261 if (end == (pgoff_t)-1) 2262 *start = (pgoff_t)-1; 2263 else 2264 *start = end + 1; 2265 out: 2266 rcu_read_unlock(); 2267 2268 return folio_batch_count(fbatch); 2269 } 2270 EXPORT_SYMBOL(filemap_get_folios_tag); 2271 2272 /* 2273 * CD/DVDs are error prone. When a medium error occurs, the driver may fail 2274 * a _large_ part of the i/o request. Imagine the worst scenario: 2275 * 2276 * ---R__________________________________________B__________ 2277 * ^ reading here ^ bad block(assume 4k) 2278 * 2279 * read(R) => miss => readahead(R...B) => media error => frustrating retries 2280 * => failing the whole request => read(R) => read(R+1) => 2281 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => 2282 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => 2283 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... 2284 * 2285 * It is going insane. Fix it by quickly scaling down the readahead size. 2286 */ 2287 static void shrink_readahead_size_eio(struct file_ra_state *ra) 2288 { 2289 ra->ra_pages /= 4; 2290 } 2291 2292 /* 2293 * filemap_get_read_batch - Get a batch of folios for read 2294 * 2295 * Get a batch of folios which represent a contiguous range of bytes in 2296 * the file. No exceptional entries will be returned. If @index is in 2297 * the middle of a folio, the entire folio will be returned. The last 2298 * folio in the batch may have the readahead flag set or the uptodate flag 2299 * clear so that the caller can take the appropriate action. 2300 */ 2301 static void filemap_get_read_batch(struct address_space *mapping, 2302 pgoff_t index, pgoff_t max, struct folio_batch *fbatch) 2303 { 2304 XA_STATE(xas, &mapping->i_pages, index); 2305 struct folio *folio; 2306 2307 rcu_read_lock(); 2308 for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) { 2309 if (xas_retry(&xas, folio)) 2310 continue; 2311 if (xas.xa_index > max || xa_is_value(folio)) 2312 break; 2313 if (xa_is_sibling(folio)) 2314 break; 2315 if (!folio_try_get(folio)) 2316 goto retry; 2317 2318 if (unlikely(folio != xas_reload(&xas))) 2319 goto put_folio; 2320 2321 if (!folio_batch_add(fbatch, folio)) 2322 break; 2323 if (!folio_test_uptodate(folio)) 2324 break; 2325 if (folio_test_readahead(folio)) 2326 break; 2327 xas_advance(&xas, folio_next_index(folio) - 1); 2328 continue; 2329 put_folio: 2330 folio_put(folio); 2331 retry: 2332 xas_reset(&xas); 2333 } 2334 rcu_read_unlock(); 2335 } 2336 2337 static int filemap_read_folio(struct file *file, filler_t filler, 2338 struct folio *folio) 2339 { 2340 bool workingset = folio_test_workingset(folio); 2341 unsigned long pflags; 2342 int error; 2343 2344 /* Start the actual read. The read will unlock the page. */ 2345 if (unlikely(workingset)) 2346 psi_memstall_enter(&pflags); 2347 error = filler(file, folio); 2348 if (unlikely(workingset)) 2349 psi_memstall_leave(&pflags); 2350 if (error) 2351 return error; 2352 2353 error = folio_wait_locked_killable(folio); 2354 if (error) 2355 return error; 2356 if (folio_test_uptodate(folio)) 2357 return 0; 2358 if (file) 2359 shrink_readahead_size_eio(&file->f_ra); 2360 return -EIO; 2361 } 2362 2363 static bool filemap_range_uptodate(struct address_space *mapping, 2364 loff_t pos, size_t count, struct folio *folio, 2365 bool need_uptodate) 2366 { 2367 if (folio_test_uptodate(folio)) 2368 return true; 2369 /* pipes can't handle partially uptodate pages */ 2370 if (need_uptodate) 2371 return false; 2372 if (!mapping->a_ops->is_partially_uptodate) 2373 return false; 2374 if (mapping->host->i_blkbits >= folio_shift(folio)) 2375 return false; 2376 2377 if (folio_pos(folio) > pos) { 2378 count -= folio_pos(folio) - pos; 2379 pos = 0; 2380 } else { 2381 pos -= folio_pos(folio); 2382 } 2383 2384 return mapping->a_ops->is_partially_uptodate(folio, pos, count); 2385 } 2386 2387 static int filemap_update_page(struct kiocb *iocb, 2388 struct address_space *mapping, size_t count, 2389 struct folio *folio, bool need_uptodate) 2390 { 2391 int error; 2392 2393 if (iocb->ki_flags & IOCB_NOWAIT) { 2394 if (!filemap_invalidate_trylock_shared(mapping)) 2395 return -EAGAIN; 2396 } else { 2397 filemap_invalidate_lock_shared(mapping); 2398 } 2399 2400 if (!folio_trylock(folio)) { 2401 error = -EAGAIN; 2402 if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) 2403 goto unlock_mapping; 2404 if (!(iocb->ki_flags & IOCB_WAITQ)) { 2405 filemap_invalidate_unlock_shared(mapping); 2406 /* 2407 * This is where we usually end up waiting for a 2408 * previously submitted readahead to finish. 2409 */ 2410 folio_put_wait_locked(folio, TASK_KILLABLE); 2411 return AOP_TRUNCATED_PAGE; 2412 } 2413 error = __folio_lock_async(folio, iocb->ki_waitq); 2414 if (error) 2415 goto unlock_mapping; 2416 } 2417 2418 error = AOP_TRUNCATED_PAGE; 2419 if (!folio->mapping) 2420 goto unlock; 2421 2422 error = 0; 2423 if (filemap_range_uptodate(mapping, iocb->ki_pos, count, folio, 2424 need_uptodate)) 2425 goto unlock; 2426 2427 error = -EAGAIN; 2428 if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ)) 2429 goto unlock; 2430 2431 error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio, 2432 folio); 2433 goto unlock_mapping; 2434 unlock: 2435 folio_unlock(folio); 2436 unlock_mapping: 2437 filemap_invalidate_unlock_shared(mapping); 2438 if (error == AOP_TRUNCATED_PAGE) 2439 folio_put(folio); 2440 return error; 2441 } 2442 2443 static int filemap_create_folio(struct file *file, 2444 struct address_space *mapping, pgoff_t index, 2445 struct folio_batch *fbatch) 2446 { 2447 struct folio *folio; 2448 int error; 2449 2450 folio = filemap_alloc_folio(mapping_gfp_mask(mapping), 0); 2451 if (!folio) 2452 return -ENOMEM; 2453 2454 /* 2455 * Protect against truncate / hole punch. Grabbing invalidate_lock 2456 * here assures we cannot instantiate and bring uptodate new 2457 * pagecache folios after evicting page cache during truncate 2458 * and before actually freeing blocks. Note that we could 2459 * release invalidate_lock after inserting the folio into 2460 * the page cache as the locked folio would then be enough to 2461 * synchronize with hole punching. But there are code paths 2462 * such as filemap_update_page() filling in partially uptodate 2463 * pages or ->readahead() that need to hold invalidate_lock 2464 * while mapping blocks for IO so let's hold the lock here as 2465 * well to keep locking rules simple. 2466 */ 2467 filemap_invalidate_lock_shared(mapping); 2468 error = filemap_add_folio(mapping, folio, index, 2469 mapping_gfp_constraint(mapping, GFP_KERNEL)); 2470 if (error == -EEXIST) 2471 error = AOP_TRUNCATED_PAGE; 2472 if (error) 2473 goto error; 2474 2475 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); 2476 if (error) 2477 goto error; 2478 2479 filemap_invalidate_unlock_shared(mapping); 2480 folio_batch_add(fbatch, folio); 2481 return 0; 2482 error: 2483 filemap_invalidate_unlock_shared(mapping); 2484 folio_put(folio); 2485 return error; 2486 } 2487 2488 static int filemap_readahead(struct kiocb *iocb, struct file *file, 2489 struct address_space *mapping, struct folio *folio, 2490 pgoff_t last_index) 2491 { 2492 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index); 2493 2494 if (iocb->ki_flags & IOCB_NOIO) 2495 return -EAGAIN; 2496 page_cache_async_ra(&ractl, folio, last_index - folio->index); 2497 return 0; 2498 } 2499 2500 static int filemap_get_pages(struct kiocb *iocb, size_t count, 2501 struct folio_batch *fbatch, bool need_uptodate) 2502 { 2503 struct file *filp = iocb->ki_filp; 2504 struct address_space *mapping = filp->f_mapping; 2505 struct file_ra_state *ra = &filp->f_ra; 2506 pgoff_t index = iocb->ki_pos >> PAGE_SHIFT; 2507 pgoff_t last_index; 2508 struct folio *folio; 2509 int err = 0; 2510 2511 /* "last_index" is the index of the page beyond the end of the read */ 2512 last_index = DIV_ROUND_UP(iocb->ki_pos + count, PAGE_SIZE); 2513 retry: 2514 if (fatal_signal_pending(current)) 2515 return -EINTR; 2516 2517 filemap_get_read_batch(mapping, index, last_index - 1, fbatch); 2518 if (!folio_batch_count(fbatch)) { 2519 if (iocb->ki_flags & IOCB_NOIO) 2520 return -EAGAIN; 2521 page_cache_sync_readahead(mapping, ra, filp, index, 2522 last_index - index); 2523 filemap_get_read_batch(mapping, index, last_index - 1, fbatch); 2524 } 2525 if (!folio_batch_count(fbatch)) { 2526 if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ)) 2527 return -EAGAIN; 2528 err = filemap_create_folio(filp, mapping, 2529 iocb->ki_pos >> PAGE_SHIFT, fbatch); 2530 if (err == AOP_TRUNCATED_PAGE) 2531 goto retry; 2532 return err; 2533 } 2534 2535 folio = fbatch->folios[folio_batch_count(fbatch) - 1]; 2536 if (folio_test_readahead(folio)) { 2537 err = filemap_readahead(iocb, filp, mapping, folio, last_index); 2538 if (err) 2539 goto err; 2540 } 2541 if (!folio_test_uptodate(folio)) { 2542 if ((iocb->ki_flags & IOCB_WAITQ) && 2543 folio_batch_count(fbatch) > 1) 2544 iocb->ki_flags |= IOCB_NOWAIT; 2545 err = filemap_update_page(iocb, mapping, count, folio, 2546 need_uptodate); 2547 if (err) 2548 goto err; 2549 } 2550 2551 trace_mm_filemap_get_pages(mapping, index, last_index); 2552 return 0; 2553 err: 2554 if (err < 0) 2555 folio_put(folio); 2556 if (likely(--fbatch->nr)) 2557 return 0; 2558 if (err == AOP_TRUNCATED_PAGE) 2559 goto retry; 2560 return err; 2561 } 2562 2563 static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio) 2564 { 2565 unsigned int shift = folio_shift(folio); 2566 2567 return (pos1 >> shift == pos2 >> shift); 2568 } 2569 2570 /** 2571 * filemap_read - Read data from the page cache. 2572 * @iocb: The iocb to read. 2573 * @iter: Destination for the data. 2574 * @already_read: Number of bytes already read by the caller. 2575 * 2576 * Copies data from the page cache. If the data is not currently present, 2577 * uses the readahead and read_folio address_space operations to fetch it. 2578 * 2579 * Return: Total number of bytes copied, including those already read by 2580 * the caller. If an error happens before any bytes are copied, returns 2581 * a negative error number. 2582 */ 2583 ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter, 2584 ssize_t already_read) 2585 { 2586 struct file *filp = iocb->ki_filp; 2587 struct file_ra_state *ra = &filp->f_ra; 2588 struct address_space *mapping = filp->f_mapping; 2589 struct inode *inode = mapping->host; 2590 struct folio_batch fbatch; 2591 int i, error = 0; 2592 bool writably_mapped; 2593 loff_t isize, end_offset; 2594 loff_t last_pos = ra->prev_pos; 2595 2596 if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes)) 2597 return 0; 2598 if (unlikely(!iov_iter_count(iter))) 2599 return 0; 2600 2601 iov_iter_truncate(iter, inode->i_sb->s_maxbytes); 2602 folio_batch_init(&fbatch); 2603 2604 do { 2605 cond_resched(); 2606 2607 /* 2608 * If we've already successfully copied some data, then we 2609 * can no longer safely return -EIOCBQUEUED. Hence mark 2610 * an async read NOWAIT at that point. 2611 */ 2612 if ((iocb->ki_flags & IOCB_WAITQ) && already_read) 2613 iocb->ki_flags |= IOCB_NOWAIT; 2614 2615 if (unlikely(iocb->ki_pos >= i_size_read(inode))) 2616 break; 2617 2618 error = filemap_get_pages(iocb, iter->count, &fbatch, false); 2619 if (error < 0) 2620 break; 2621 2622 /* 2623 * i_size must be checked after we know the pages are Uptodate. 2624 * 2625 * Checking i_size after the check allows us to calculate 2626 * the correct value for "nr", which means the zero-filled 2627 * part of the page is not copied back to userspace (unless 2628 * another truncate extends the file - this is desired though). 2629 */ 2630 isize = i_size_read(inode); 2631 if (unlikely(iocb->ki_pos >= isize)) 2632 goto put_folios; 2633 end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count); 2634 2635 /* 2636 * Once we start copying data, we don't want to be touching any 2637 * cachelines that might be contended: 2638 */ 2639 writably_mapped = mapping_writably_mapped(mapping); 2640 2641 /* 2642 * When a read accesses the same folio several times, only 2643 * mark it as accessed the first time. 2644 */ 2645 if (!pos_same_folio(iocb->ki_pos, last_pos - 1, 2646 fbatch.folios[0])) 2647 folio_mark_accessed(fbatch.folios[0]); 2648 2649 for (i = 0; i < folio_batch_count(&fbatch); i++) { 2650 struct folio *folio = fbatch.folios[i]; 2651 size_t fsize = folio_size(folio); 2652 size_t offset = iocb->ki_pos & (fsize - 1); 2653 size_t bytes = min_t(loff_t, end_offset - iocb->ki_pos, 2654 fsize - offset); 2655 size_t copied; 2656 2657 if (end_offset < folio_pos(folio)) 2658 break; 2659 if (i > 0) 2660 folio_mark_accessed(folio); 2661 /* 2662 * If users can be writing to this folio using arbitrary 2663 * virtual addresses, take care of potential aliasing 2664 * before reading the folio on the kernel side. 2665 */ 2666 if (writably_mapped) 2667 flush_dcache_folio(folio); 2668 2669 copied = copy_folio_to_iter(folio, offset, bytes, iter); 2670 2671 already_read += copied; 2672 iocb->ki_pos += copied; 2673 last_pos = iocb->ki_pos; 2674 2675 if (copied < bytes) { 2676 error = -EFAULT; 2677 break; 2678 } 2679 } 2680 put_folios: 2681 for (i = 0; i < folio_batch_count(&fbatch); i++) 2682 folio_put(fbatch.folios[i]); 2683 folio_batch_init(&fbatch); 2684 } while (iov_iter_count(iter) && iocb->ki_pos < isize && !error); 2685 2686 file_accessed(filp); 2687 ra->prev_pos = last_pos; 2688 return already_read ? already_read : error; 2689 } 2690 EXPORT_SYMBOL_GPL(filemap_read); 2691 2692 int kiocb_write_and_wait(struct kiocb *iocb, size_t count) 2693 { 2694 struct address_space *mapping = iocb->ki_filp->f_mapping; 2695 loff_t pos = iocb->ki_pos; 2696 loff_t end = pos + count - 1; 2697 2698 if (iocb->ki_flags & IOCB_NOWAIT) { 2699 if (filemap_range_needs_writeback(mapping, pos, end)) 2700 return -EAGAIN; 2701 return 0; 2702 } 2703 2704 return filemap_write_and_wait_range(mapping, pos, end); 2705 } 2706 EXPORT_SYMBOL_GPL(kiocb_write_and_wait); 2707 2708 int kiocb_invalidate_pages(struct kiocb *iocb, size_t count) 2709 { 2710 struct address_space *mapping = iocb->ki_filp->f_mapping; 2711 loff_t pos = iocb->ki_pos; 2712 loff_t end = pos + count - 1; 2713 int ret; 2714 2715 if (iocb->ki_flags & IOCB_NOWAIT) { 2716 /* we could block if there are any pages in the range */ 2717 if (filemap_range_has_page(mapping, pos, end)) 2718 return -EAGAIN; 2719 } else { 2720 ret = filemap_write_and_wait_range(mapping, pos, end); 2721 if (ret) 2722 return ret; 2723 } 2724 2725 /* 2726 * After a write we want buffered reads to be sure to go to disk to get 2727 * the new data. We invalidate clean cached page from the region we're 2728 * about to write. We do this *before* the write so that we can return 2729 * without clobbering -EIOCBQUEUED from ->direct_IO(). 2730 */ 2731 return invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, 2732 end >> PAGE_SHIFT); 2733 } 2734 EXPORT_SYMBOL_GPL(kiocb_invalidate_pages); 2735 2736 /** 2737 * generic_file_read_iter - generic filesystem read routine 2738 * @iocb: kernel I/O control block 2739 * @iter: destination for the data read 2740 * 2741 * This is the "read_iter()" routine for all filesystems 2742 * that can use the page cache directly. 2743 * 2744 * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall 2745 * be returned when no data can be read without waiting for I/O requests 2746 * to complete; it doesn't prevent readahead. 2747 * 2748 * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O 2749 * requests shall be made for the read or for readahead. When no data 2750 * can be read, -EAGAIN shall be returned. When readahead would be 2751 * triggered, a partial, possibly empty read shall be returned. 2752 * 2753 * Return: 2754 * * number of bytes copied, even for partial reads 2755 * * negative error code (or 0 if IOCB_NOIO) if nothing was read 2756 */ 2757 ssize_t 2758 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 2759 { 2760 size_t count = iov_iter_count(iter); 2761 ssize_t retval = 0; 2762 2763 if (!count) 2764 return 0; /* skip atime */ 2765 2766 if (iocb->ki_flags & IOCB_DIRECT) { 2767 struct file *file = iocb->ki_filp; 2768 struct address_space *mapping = file->f_mapping; 2769 struct inode *inode = mapping->host; 2770 2771 retval = kiocb_write_and_wait(iocb, count); 2772 if (retval < 0) 2773 return retval; 2774 file_accessed(file); 2775 2776 retval = mapping->a_ops->direct_IO(iocb, iter); 2777 if (retval >= 0) { 2778 iocb->ki_pos += retval; 2779 count -= retval; 2780 } 2781 if (retval != -EIOCBQUEUED) 2782 iov_iter_revert(iter, count - iov_iter_count(iter)); 2783 2784 /* 2785 * Btrfs can have a short DIO read if we encounter 2786 * compressed extents, so if there was an error, or if 2787 * we've already read everything we wanted to, or if 2788 * there was a short read because we hit EOF, go ahead 2789 * and return. Otherwise fallthrough to buffered io for 2790 * the rest of the read. Buffered reads will not work for 2791 * DAX files, so don't bother trying. 2792 */ 2793 if (retval < 0 || !count || IS_DAX(inode)) 2794 return retval; 2795 if (iocb->ki_pos >= i_size_read(inode)) 2796 return retval; 2797 } 2798 2799 return filemap_read(iocb, iter, retval); 2800 } 2801 EXPORT_SYMBOL(generic_file_read_iter); 2802 2803 /* 2804 * Splice subpages from a folio into a pipe. 2805 */ 2806 size_t splice_folio_into_pipe(struct pipe_inode_info *pipe, 2807 struct folio *folio, loff_t fpos, size_t size) 2808 { 2809 struct page *page; 2810 size_t spliced = 0, offset = offset_in_folio(folio, fpos); 2811 2812 page = folio_page(folio, offset / PAGE_SIZE); 2813 size = min(size, folio_size(folio) - offset); 2814 offset %= PAGE_SIZE; 2815 2816 while (spliced < size && 2817 !pipe_full(pipe->head, pipe->tail, pipe->max_usage)) { 2818 struct pipe_buffer *buf = pipe_head_buf(pipe); 2819 size_t part = min_t(size_t, PAGE_SIZE - offset, size - spliced); 2820 2821 *buf = (struct pipe_buffer) { 2822 .ops = &page_cache_pipe_buf_ops, 2823 .page = page, 2824 .offset = offset, 2825 .len = part, 2826 }; 2827 folio_get(folio); 2828 pipe->head++; 2829 page++; 2830 spliced += part; 2831 offset = 0; 2832 } 2833 2834 return spliced; 2835 } 2836 2837 /** 2838 * filemap_splice_read - Splice data from a file's pagecache into a pipe 2839 * @in: The file to read from 2840 * @ppos: Pointer to the file position to read from 2841 * @pipe: The pipe to splice into 2842 * @len: The amount to splice 2843 * @flags: The SPLICE_F_* flags 2844 * 2845 * This function gets folios from a file's pagecache and splices them into the 2846 * pipe. Readahead will be called as necessary to fill more folios. This may 2847 * be used for blockdevs also. 2848 * 2849 * Return: On success, the number of bytes read will be returned and *@ppos 2850 * will be updated if appropriate; 0 will be returned if there is no more data 2851 * to be read; -EAGAIN will be returned if the pipe had no space, and some 2852 * other negative error code will be returned on error. A short read may occur 2853 * if the pipe has insufficient space, we reach the end of the data or we hit a 2854 * hole. 2855 */ 2856 ssize_t filemap_splice_read(struct file *in, loff_t *ppos, 2857 struct pipe_inode_info *pipe, 2858 size_t len, unsigned int flags) 2859 { 2860 struct folio_batch fbatch; 2861 struct kiocb iocb; 2862 size_t total_spliced = 0, used, npages; 2863 loff_t isize, end_offset; 2864 bool writably_mapped; 2865 int i, error = 0; 2866 2867 if (unlikely(*ppos >= in->f_mapping->host->i_sb->s_maxbytes)) 2868 return 0; 2869 2870 init_sync_kiocb(&iocb, in); 2871 iocb.ki_pos = *ppos; 2872 2873 /* Work out how much data we can actually add into the pipe */ 2874 used = pipe_occupancy(pipe->head, pipe->tail); 2875 npages = max_t(ssize_t, pipe->max_usage - used, 0); 2876 len = min_t(size_t, len, npages * PAGE_SIZE); 2877 2878 folio_batch_init(&fbatch); 2879 2880 do { 2881 cond_resched(); 2882 2883 if (*ppos >= i_size_read(in->f_mapping->host)) 2884 break; 2885 2886 iocb.ki_pos = *ppos; 2887 error = filemap_get_pages(&iocb, len, &fbatch, true); 2888 if (error < 0) 2889 break; 2890 2891 /* 2892 * i_size must be checked after we know the pages are Uptodate. 2893 * 2894 * Checking i_size after the check allows us to calculate 2895 * the correct value for "nr", which means the zero-filled 2896 * part of the page is not copied back to userspace (unless 2897 * another truncate extends the file - this is desired though). 2898 */ 2899 isize = i_size_read(in->f_mapping->host); 2900 if (unlikely(*ppos >= isize)) 2901 break; 2902 end_offset = min_t(loff_t, isize, *ppos + len); 2903 2904 /* 2905 * Once we start copying data, we don't want to be touching any 2906 * cachelines that might be contended: 2907 */ 2908 writably_mapped = mapping_writably_mapped(in->f_mapping); 2909 2910 for (i = 0; i < folio_batch_count(&fbatch); i++) { 2911 struct folio *folio = fbatch.folios[i]; 2912 size_t n; 2913 2914 if (folio_pos(folio) >= end_offset) 2915 goto out; 2916 folio_mark_accessed(folio); 2917 2918 /* 2919 * If users can be writing to this folio using arbitrary 2920 * virtual addresses, take care of potential aliasing 2921 * before reading the folio on the kernel side. 2922 */ 2923 if (writably_mapped) 2924 flush_dcache_folio(folio); 2925 2926 n = min_t(loff_t, len, isize - *ppos); 2927 n = splice_folio_into_pipe(pipe, folio, *ppos, n); 2928 if (!n) 2929 goto out; 2930 len -= n; 2931 total_spliced += n; 2932 *ppos += n; 2933 in->f_ra.prev_pos = *ppos; 2934 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) 2935 goto out; 2936 } 2937 2938 folio_batch_release(&fbatch); 2939 } while (len); 2940 2941 out: 2942 folio_batch_release(&fbatch); 2943 file_accessed(in); 2944 2945 return total_spliced ? total_spliced : error; 2946 } 2947 EXPORT_SYMBOL(filemap_splice_read); 2948 2949 static inline loff_t folio_seek_hole_data(struct xa_state *xas, 2950 struct address_space *mapping, struct folio *folio, 2951 loff_t start, loff_t end, bool seek_data) 2952 { 2953 const struct address_space_operations *ops = mapping->a_ops; 2954 size_t offset, bsz = i_blocksize(mapping->host); 2955 2956 if (xa_is_value(folio) || folio_test_uptodate(folio)) 2957 return seek_data ? start : end; 2958 if (!ops->is_partially_uptodate) 2959 return seek_data ? end : start; 2960 2961 xas_pause(xas); 2962 rcu_read_unlock(); 2963 folio_lock(folio); 2964 if (unlikely(folio->mapping != mapping)) 2965 goto unlock; 2966 2967 offset = offset_in_folio(folio, start) & ~(bsz - 1); 2968 2969 do { 2970 if (ops->is_partially_uptodate(folio, offset, bsz) == 2971 seek_data) 2972 break; 2973 start = (start + bsz) & ~(bsz - 1); 2974 offset += bsz; 2975 } while (offset < folio_size(folio)); 2976 unlock: 2977 folio_unlock(folio); 2978 rcu_read_lock(); 2979 return start; 2980 } 2981 2982 static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio) 2983 { 2984 if (xa_is_value(folio)) 2985 return PAGE_SIZE << xa_get_order(xas->xa, xas->xa_index); 2986 return folio_size(folio); 2987 } 2988 2989 /** 2990 * mapping_seek_hole_data - Seek for SEEK_DATA / SEEK_HOLE in the page cache. 2991 * @mapping: Address space to search. 2992 * @start: First byte to consider. 2993 * @end: Limit of search (exclusive). 2994 * @whence: Either SEEK_HOLE or SEEK_DATA. 2995 * 2996 * If the page cache knows which blocks contain holes and which blocks 2997 * contain data, your filesystem can use this function to implement 2998 * SEEK_HOLE and SEEK_DATA. This is useful for filesystems which are 2999 * entirely memory-based such as tmpfs, and filesystems which support 3000 * unwritten extents. 3001 * 3002 * Return: The requested offset on success, or -ENXIO if @whence specifies 3003 * SEEK_DATA and there is no data after @start. There is an implicit hole 3004 * after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start 3005 * and @end contain data. 3006 */ 3007 loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start, 3008 loff_t end, int whence) 3009 { 3010 XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT); 3011 pgoff_t max = (end - 1) >> PAGE_SHIFT; 3012 bool seek_data = (whence == SEEK_DATA); 3013 struct folio *folio; 3014 3015 if (end <= start) 3016 return -ENXIO; 3017 3018 rcu_read_lock(); 3019 while ((folio = find_get_entry(&xas, max, XA_PRESENT))) { 3020 loff_t pos = (u64)xas.xa_index << PAGE_SHIFT; 3021 size_t seek_size; 3022 3023 if (start < pos) { 3024 if (!seek_data) 3025 goto unlock; 3026 start = pos; 3027 } 3028 3029 seek_size = seek_folio_size(&xas, folio); 3030 pos = round_up((u64)pos + 1, seek_size); 3031 start = folio_seek_hole_data(&xas, mapping, folio, start, pos, 3032 seek_data); 3033 if (start < pos) 3034 goto unlock; 3035 if (start >= end) 3036 break; 3037 if (seek_size > PAGE_SIZE) 3038 xas_set(&xas, pos >> PAGE_SHIFT); 3039 if (!xa_is_value(folio)) 3040 folio_put(folio); 3041 } 3042 if (seek_data) 3043 start = -ENXIO; 3044 unlock: 3045 rcu_read_unlock(); 3046 if (folio && !xa_is_value(folio)) 3047 folio_put(folio); 3048 if (start > end) 3049 return end; 3050 return start; 3051 } 3052 3053 #ifdef CONFIG_MMU 3054 #define MMAP_LOTSAMISS (100) 3055 /* 3056 * lock_folio_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock 3057 * @vmf - the vm_fault for this fault. 3058 * @folio - the folio to lock. 3059 * @fpin - the pointer to the file we may pin (or is already pinned). 3060 * 3061 * This works similar to lock_folio_or_retry in that it can drop the 3062 * mmap_lock. It differs in that it actually returns the folio locked 3063 * if it returns 1 and 0 if it couldn't lock the folio. If we did have 3064 * to drop the mmap_lock then fpin will point to the pinned file and 3065 * needs to be fput()'ed at a later point. 3066 */ 3067 static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio, 3068 struct file **fpin) 3069 { 3070 if (folio_trylock(folio)) 3071 return 1; 3072 3073 /* 3074 * NOTE! This will make us return with VM_FAULT_RETRY, but with 3075 * the fault lock still held. That's how FAULT_FLAG_RETRY_NOWAIT 3076 * is supposed to work. We have way too many special cases.. 3077 */ 3078 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) 3079 return 0; 3080 3081 *fpin = maybe_unlock_mmap_for_io(vmf, *fpin); 3082 if (vmf->flags & FAULT_FLAG_KILLABLE) { 3083 if (__folio_lock_killable(folio)) { 3084 /* 3085 * We didn't have the right flags to drop the 3086 * fault lock, but all fault_handlers only check 3087 * for fatal signals if we return VM_FAULT_RETRY, 3088 * so we need to drop the fault lock here and 3089 * return 0 if we don't have a fpin. 3090 */ 3091 if (*fpin == NULL) 3092 release_fault_lock(vmf); 3093 return 0; 3094 } 3095 } else 3096 __folio_lock(folio); 3097 3098 return 1; 3099 } 3100 3101 /* 3102 * Synchronous readahead happens when we don't even find a page in the page 3103 * cache at all. We don't want to perform IO under the mmap sem, so if we have 3104 * to drop the mmap sem we return the file that was pinned in order for us to do 3105 * that. If we didn't pin a file then we return NULL. The file that is 3106 * returned needs to be fput()'ed when we're done with it. 3107 */ 3108 static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) 3109 { 3110 struct file *file = vmf->vma->vm_file; 3111 struct file_ra_state *ra = &file->f_ra; 3112 struct address_space *mapping = file->f_mapping; 3113 DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); 3114 struct file *fpin = NULL; 3115 unsigned long vm_flags = vmf->vma->vm_flags; 3116 unsigned int mmap_miss; 3117 3118 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3119 /* Use the readahead code, even if readahead is disabled */ 3120 if ((vm_flags & VM_HUGEPAGE) && HPAGE_PMD_ORDER <= MAX_PAGECACHE_ORDER) { 3121 fpin = maybe_unlock_mmap_for_io(vmf, fpin); 3122 ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1); 3123 ra->size = HPAGE_PMD_NR; 3124 /* 3125 * Fetch two PMD folios, so we get the chance to actually 3126 * readahead, unless we've been told not to. 3127 */ 3128 if (!(vm_flags & VM_RAND_READ)) 3129 ra->size *= 2; 3130 ra->async_size = HPAGE_PMD_NR; 3131 page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER); 3132 return fpin; 3133 } 3134 #endif 3135 3136 /* If we don't want any read-ahead, don't bother */ 3137 if (vm_flags & VM_RAND_READ) 3138 return fpin; 3139 if (!ra->ra_pages) 3140 return fpin; 3141 3142 if (vm_flags & VM_SEQ_READ) { 3143 fpin = maybe_unlock_mmap_for_io(vmf, fpin); 3144 page_cache_sync_ra(&ractl, ra->ra_pages); 3145 return fpin; 3146 } 3147 3148 /* Avoid banging the cache line if not needed */ 3149 mmap_miss = READ_ONCE(ra->mmap_miss); 3150 if (mmap_miss < MMAP_LOTSAMISS * 10) 3151 WRITE_ONCE(ra->mmap_miss, ++mmap_miss); 3152 3153 /* 3154 * Do we miss much more than hit in this file? If so, 3155 * stop bothering with read-ahead. It will only hurt. 3156 */ 3157 if (mmap_miss > MMAP_LOTSAMISS) 3158 return fpin; 3159 3160 /* 3161 * mmap read-around 3162 */ 3163 fpin = maybe_unlock_mmap_for_io(vmf, fpin); 3164 ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2); 3165 ra->size = ra->ra_pages; 3166 ra->async_size = ra->ra_pages / 4; 3167 ractl._index = ra->start; 3168 page_cache_ra_order(&ractl, ra, 0); 3169 return fpin; 3170 } 3171 3172 /* 3173 * Asynchronous readahead happens when we find the page and PG_readahead, 3174 * so we want to possibly extend the readahead further. We return the file that 3175 * was pinned if we have to drop the mmap_lock in order to do IO. 3176 */ 3177 static struct file *do_async_mmap_readahead(struct vm_fault *vmf, 3178 struct folio *folio) 3179 { 3180 struct file *file = vmf->vma->vm_file; 3181 struct file_ra_state *ra = &file->f_ra; 3182 DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, vmf->pgoff); 3183 struct file *fpin = NULL; 3184 unsigned int mmap_miss; 3185 3186 /* If we don't want any read-ahead, don't bother */ 3187 if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages) 3188 return fpin; 3189 3190 mmap_miss = READ_ONCE(ra->mmap_miss); 3191 if (mmap_miss) 3192 WRITE_ONCE(ra->mmap_miss, --mmap_miss); 3193 3194 if (folio_test_readahead(folio)) { 3195 fpin = maybe_unlock_mmap_for_io(vmf, fpin); 3196 page_cache_async_ra(&ractl, folio, ra->ra_pages); 3197 } 3198 return fpin; 3199 } 3200 3201 static vm_fault_t filemap_fault_recheck_pte_none(struct vm_fault *vmf) 3202 { 3203 struct vm_area_struct *vma = vmf->vma; 3204 vm_fault_t ret = 0; 3205 pte_t *ptep; 3206 3207 /* 3208 * We might have COW'ed a pagecache folio and might now have an mlocked 3209 * anon folio mapped. The original pagecache folio is not mlocked and 3210 * might have been evicted. During a read+clear/modify/write update of 3211 * the PTE, such as done in do_numa_page()/change_pte_range(), we 3212 * temporarily clear the PTE under PT lock and might detect it here as 3213 * "none" when not holding the PT lock. 3214 * 3215 * Not rechecking the PTE under PT lock could result in an unexpected 3216 * major fault in an mlock'ed region. Recheck only for this special 3217 * scenario while holding the PT lock, to not degrade non-mlocked 3218 * scenarios. Recheck the PTE without PT lock firstly, thereby reducing 3219 * the number of times we hold PT lock. 3220 */ 3221 if (!(vma->vm_flags & VM_LOCKED)) 3222 return 0; 3223 3224 if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)) 3225 return 0; 3226 3227 ptep = pte_offset_map_nolock(vma->vm_mm, vmf->pmd, vmf->address, 3228 &vmf->ptl); 3229 if (unlikely(!ptep)) 3230 return VM_FAULT_NOPAGE; 3231 3232 if (unlikely(!pte_none(ptep_get_lockless(ptep)))) { 3233 ret = VM_FAULT_NOPAGE; 3234 } else { 3235 spin_lock(vmf->ptl); 3236 if (unlikely(!pte_none(ptep_get(ptep)))) 3237 ret = VM_FAULT_NOPAGE; 3238 spin_unlock(vmf->ptl); 3239 } 3240 pte_unmap(ptep); 3241 return ret; 3242 } 3243 3244 /** 3245 * filemap_fault - read in file data for page fault handling 3246 * @vmf: struct vm_fault containing details of the fault 3247 * 3248 * filemap_fault() is invoked via the vma operations vector for a 3249 * mapped memory region to read in file data during a page fault. 3250 * 3251 * The goto's are kind of ugly, but this streamlines the normal case of having 3252 * it in the page cache, and handles the special cases reasonably without 3253 * having a lot of duplicated code. 3254 * 3255 * vma->vm_mm->mmap_lock must be held on entry. 3256 * 3257 * If our return value has VM_FAULT_RETRY set, it's because the mmap_lock 3258 * may be dropped before doing I/O or by lock_folio_maybe_drop_mmap(). 3259 * 3260 * If our return value does not have VM_FAULT_RETRY set, the mmap_lock 3261 * has not been released. 3262 * 3263 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set. 3264 * 3265 * Return: bitwise-OR of %VM_FAULT_ codes. 3266 */ 3267 vm_fault_t filemap_fault(struct vm_fault *vmf) 3268 { 3269 int error; 3270 struct file *file = vmf->vma->vm_file; 3271 struct file *fpin = NULL; 3272 struct address_space *mapping = file->f_mapping; 3273 struct inode *inode = mapping->host; 3274 pgoff_t max_idx, index = vmf->pgoff; 3275 struct folio *folio; 3276 vm_fault_t ret = 0; 3277 bool mapping_locked = false; 3278 3279 max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 3280 if (unlikely(index >= max_idx)) 3281 return VM_FAULT_SIGBUS; 3282 3283 trace_mm_filemap_fault(mapping, index); 3284 3285 /* 3286 * Do we have something in the page cache already? 3287 */ 3288 folio = filemap_get_folio(mapping, index); 3289 if (likely(!IS_ERR(folio))) { 3290 /* 3291 * We found the page, so try async readahead before waiting for 3292 * the lock. 3293 */ 3294 if (!(vmf->flags & FAULT_FLAG_TRIED)) 3295 fpin = do_async_mmap_readahead(vmf, folio); 3296 if (unlikely(!folio_test_uptodate(folio))) { 3297 filemap_invalidate_lock_shared(mapping); 3298 mapping_locked = true; 3299 } 3300 } else { 3301 ret = filemap_fault_recheck_pte_none(vmf); 3302 if (unlikely(ret)) 3303 return ret; 3304 3305 /* No page in the page cache at all */ 3306 count_vm_event(PGMAJFAULT); 3307 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); 3308 ret = VM_FAULT_MAJOR; 3309 fpin = do_sync_mmap_readahead(vmf); 3310 retry_find: 3311 /* 3312 * See comment in filemap_create_folio() why we need 3313 * invalidate_lock 3314 */ 3315 if (!mapping_locked) { 3316 filemap_invalidate_lock_shared(mapping); 3317 mapping_locked = true; 3318 } 3319 folio = __filemap_get_folio(mapping, index, 3320 FGP_CREAT|FGP_FOR_MMAP, 3321 vmf->gfp_mask); 3322 if (IS_ERR(folio)) { 3323 if (fpin) 3324 goto out_retry; 3325 filemap_invalidate_unlock_shared(mapping); 3326 return VM_FAULT_OOM; 3327 } 3328 } 3329 3330 if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin)) 3331 goto out_retry; 3332 3333 /* Did it get truncated? */ 3334 if (unlikely(folio->mapping != mapping)) { 3335 folio_unlock(folio); 3336 folio_put(folio); 3337 goto retry_find; 3338 } 3339 VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio); 3340 3341 /* 3342 * We have a locked folio in the page cache, now we need to check 3343 * that it's up-to-date. If not, it is going to be due to an error, 3344 * or because readahead was otherwise unable to retrieve it. 3345 */ 3346 if (unlikely(!folio_test_uptodate(folio))) { 3347 /* 3348 * If the invalidate lock is not held, the folio was in cache 3349 * and uptodate and now it is not. Strange but possible since we 3350 * didn't hold the page lock all the time. Let's drop 3351 * everything, get the invalidate lock and try again. 3352 */ 3353 if (!mapping_locked) { 3354 folio_unlock(folio); 3355 folio_put(folio); 3356 goto retry_find; 3357 } 3358 3359 /* 3360 * OK, the folio is really not uptodate. This can be because the 3361 * VMA has the VM_RAND_READ flag set, or because an error 3362 * arose. Let's read it in directly. 3363 */ 3364 goto page_not_uptodate; 3365 } 3366 3367 /* 3368 * We've made it this far and we had to drop our mmap_lock, now is the 3369 * time to return to the upper layer and have it re-find the vma and 3370 * redo the fault. 3371 */ 3372 if (fpin) { 3373 folio_unlock(folio); 3374 goto out_retry; 3375 } 3376 if (mapping_locked) 3377 filemap_invalidate_unlock_shared(mapping); 3378 3379 /* 3380 * Found the page and have a reference on it. 3381 * We must recheck i_size under page lock. 3382 */ 3383 max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 3384 if (unlikely(index >= max_idx)) { 3385 folio_unlock(folio); 3386 folio_put(folio); 3387 return VM_FAULT_SIGBUS; 3388 } 3389 3390 vmf->page = folio_file_page(folio, index); 3391 return ret | VM_FAULT_LOCKED; 3392 3393 page_not_uptodate: 3394 /* 3395 * Umm, take care of errors if the page isn't up-to-date. 3396 * Try to re-read it _once_. We do this synchronously, 3397 * because there really aren't any performance issues here 3398 * and we need to check for errors. 3399 */ 3400 fpin = maybe_unlock_mmap_for_io(vmf, fpin); 3401 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); 3402 if (fpin) 3403 goto out_retry; 3404 folio_put(folio); 3405 3406 if (!error || error == AOP_TRUNCATED_PAGE) 3407 goto retry_find; 3408 filemap_invalidate_unlock_shared(mapping); 3409 3410 return VM_FAULT_SIGBUS; 3411 3412 out_retry: 3413 /* 3414 * We dropped the mmap_lock, we need to return to the fault handler to 3415 * re-find the vma and come back and find our hopefully still populated 3416 * page. 3417 */ 3418 if (!IS_ERR(folio)) 3419 folio_put(folio); 3420 if (mapping_locked) 3421 filemap_invalidate_unlock_shared(mapping); 3422 if (fpin) 3423 fput(fpin); 3424 return ret | VM_FAULT_RETRY; 3425 } 3426 EXPORT_SYMBOL(filemap_fault); 3427 3428 static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio, 3429 pgoff_t start) 3430 { 3431 struct mm_struct *mm = vmf->vma->vm_mm; 3432 3433 /* Huge page is mapped? No need to proceed. */ 3434 if (pmd_trans_huge(*vmf->pmd)) { 3435 folio_unlock(folio); 3436 folio_put(folio); 3437 return true; 3438 } 3439 3440 if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) { 3441 struct page *page = folio_file_page(folio, start); 3442 vm_fault_t ret = do_set_pmd(vmf, page); 3443 if (!ret) { 3444 /* The page is mapped successfully, reference consumed. */ 3445 folio_unlock(folio); 3446 return true; 3447 } 3448 } 3449 3450 if (pmd_none(*vmf->pmd) && vmf->prealloc_pte) 3451 pmd_install(mm, vmf->pmd, &vmf->prealloc_pte); 3452 3453 return false; 3454 } 3455 3456 static struct folio *next_uptodate_folio(struct xa_state *xas, 3457 struct address_space *mapping, pgoff_t end_pgoff) 3458 { 3459 struct folio *folio = xas_next_entry(xas, end_pgoff); 3460 unsigned long max_idx; 3461 3462 do { 3463 if (!folio) 3464 return NULL; 3465 if (xas_retry(xas, folio)) 3466 continue; 3467 if (xa_is_value(folio)) 3468 continue; 3469 if (folio_test_locked(folio)) 3470 continue; 3471 if (!folio_try_get(folio)) 3472 continue; 3473 /* Has the page moved or been split? */ 3474 if (unlikely(folio != xas_reload(xas))) 3475 goto skip; 3476 if (!folio_test_uptodate(folio) || folio_test_readahead(folio)) 3477 goto skip; 3478 if (!folio_trylock(folio)) 3479 goto skip; 3480 if (folio->mapping != mapping) 3481 goto unlock; 3482 if (!folio_test_uptodate(folio)) 3483 goto unlock; 3484 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); 3485 if (xas->xa_index >= max_idx) 3486 goto unlock; 3487 return folio; 3488 unlock: 3489 folio_unlock(folio); 3490 skip: 3491 folio_put(folio); 3492 } while ((folio = xas_next_entry(xas, end_pgoff)) != NULL); 3493 3494 return NULL; 3495 } 3496 3497 /* 3498 * Map page range [start_page, start_page + nr_pages) of folio. 3499 * start_page is gotten from start by folio_page(folio, start) 3500 */ 3501 static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, 3502 struct folio *folio, unsigned long start, 3503 unsigned long addr, unsigned int nr_pages, 3504 unsigned long *rss, unsigned int *mmap_miss) 3505 { 3506 vm_fault_t ret = 0; 3507 struct page *page = folio_page(folio, start); 3508 unsigned int count = 0; 3509 pte_t *old_ptep = vmf->pte; 3510 3511 do { 3512 if (PageHWPoison(page + count)) 3513 goto skip; 3514 3515 /* 3516 * If there are too many folios that are recently evicted 3517 * in a file, they will probably continue to be evicted. 3518 * In such situation, read-ahead is only a waste of IO. 3519 * Don't decrease mmap_miss in this scenario to make sure 3520 * we can stop read-ahead. 3521 */ 3522 if (!folio_test_workingset(folio)) 3523 (*mmap_miss)++; 3524 3525 /* 3526 * NOTE: If there're PTE markers, we'll leave them to be 3527 * handled in the specific fault path, and it'll prohibit the 3528 * fault-around logic. 3529 */ 3530 if (!pte_none(ptep_get(&vmf->pte[count]))) 3531 goto skip; 3532 3533 count++; 3534 continue; 3535 skip: 3536 if (count) { 3537 set_pte_range(vmf, folio, page, count, addr); 3538 *rss += count; 3539 folio_ref_add(folio, count); 3540 if (in_range(vmf->address, addr, count * PAGE_SIZE)) 3541 ret = VM_FAULT_NOPAGE; 3542 } 3543 3544 count++; 3545 page += count; 3546 vmf->pte += count; 3547 addr += count * PAGE_SIZE; 3548 count = 0; 3549 } while (--nr_pages > 0); 3550 3551 if (count) { 3552 set_pte_range(vmf, folio, page, count, addr); 3553 *rss += count; 3554 folio_ref_add(folio, count); 3555 if (in_range(vmf->address, addr, count * PAGE_SIZE)) 3556 ret = VM_FAULT_NOPAGE; 3557 } 3558 3559 vmf->pte = old_ptep; 3560 3561 return ret; 3562 } 3563 3564 static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf, 3565 struct folio *folio, unsigned long addr, 3566 unsigned long *rss, unsigned int *mmap_miss) 3567 { 3568 vm_fault_t ret = 0; 3569 struct page *page = &folio->page; 3570 3571 if (PageHWPoison(page)) 3572 return ret; 3573 3574 /* See comment of filemap_map_folio_range() */ 3575 if (!folio_test_workingset(folio)) 3576 (*mmap_miss)++; 3577 3578 /* 3579 * NOTE: If there're PTE markers, we'll leave them to be 3580 * handled in the specific fault path, and it'll prohibit 3581 * the fault-around logic. 3582 */ 3583 if (!pte_none(ptep_get(vmf->pte))) 3584 return ret; 3585 3586 if (vmf->address == addr) 3587 ret = VM_FAULT_NOPAGE; 3588 3589 set_pte_range(vmf, folio, page, 1, addr); 3590 (*rss)++; 3591 folio_ref_inc(folio); 3592 3593 return ret; 3594 } 3595 3596 vm_fault_t filemap_map_pages(struct vm_fault *vmf, 3597 pgoff_t start_pgoff, pgoff_t end_pgoff) 3598 { 3599 struct vm_area_struct *vma = vmf->vma; 3600 struct file *file = vma->vm_file; 3601 struct address_space *mapping = file->f_mapping; 3602 pgoff_t last_pgoff = start_pgoff; 3603 unsigned long addr; 3604 XA_STATE(xas, &mapping->i_pages, start_pgoff); 3605 struct folio *folio; 3606 vm_fault_t ret = 0; 3607 unsigned long rss = 0; 3608 unsigned int nr_pages = 0, mmap_miss = 0, mmap_miss_saved, folio_type; 3609 3610 rcu_read_lock(); 3611 folio = next_uptodate_folio(&xas, mapping, end_pgoff); 3612 if (!folio) 3613 goto out; 3614 3615 if (filemap_map_pmd(vmf, folio, start_pgoff)) { 3616 ret = VM_FAULT_NOPAGE; 3617 goto out; 3618 } 3619 3620 addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT); 3621 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); 3622 if (!vmf->pte) { 3623 folio_unlock(folio); 3624 folio_put(folio); 3625 goto out; 3626 } 3627 3628 folio_type = mm_counter_file(folio); 3629 do { 3630 unsigned long end; 3631 3632 addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT; 3633 vmf->pte += xas.xa_index - last_pgoff; 3634 last_pgoff = xas.xa_index; 3635 end = folio_next_index(folio) - 1; 3636 nr_pages = min(end, end_pgoff) - xas.xa_index + 1; 3637 3638 if (!folio_test_large(folio)) 3639 ret |= filemap_map_order0_folio(vmf, 3640 folio, addr, &rss, &mmap_miss); 3641 else 3642 ret |= filemap_map_folio_range(vmf, folio, 3643 xas.xa_index - folio->index, addr, 3644 nr_pages, &rss, &mmap_miss); 3645 3646 folio_unlock(folio); 3647 folio_put(folio); 3648 } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL); 3649 add_mm_counter(vma->vm_mm, folio_type, rss); 3650 pte_unmap_unlock(vmf->pte, vmf->ptl); 3651 trace_mm_filemap_map_pages(mapping, start_pgoff, end_pgoff); 3652 out: 3653 rcu_read_unlock(); 3654 3655 mmap_miss_saved = READ_ONCE(file->f_ra.mmap_miss); 3656 if (mmap_miss >= mmap_miss_saved) 3657 WRITE_ONCE(file->f_ra.mmap_miss, 0); 3658 else 3659 WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss_saved - mmap_miss); 3660 3661 return ret; 3662 } 3663 EXPORT_SYMBOL(filemap_map_pages); 3664 3665 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) 3666 { 3667 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 3668 struct folio *folio = page_folio(vmf->page); 3669 vm_fault_t ret = VM_FAULT_LOCKED; 3670 3671 sb_start_pagefault(mapping->host->i_sb); 3672 file_update_time(vmf->vma->vm_file); 3673 folio_lock(folio); 3674 if (folio->mapping != mapping) { 3675 folio_unlock(folio); 3676 ret = VM_FAULT_NOPAGE; 3677 goto out; 3678 } 3679 /* 3680 * We mark the folio dirty already here so that when freeze is in 3681 * progress, we are guaranteed that writeback during freezing will 3682 * see the dirty folio and writeprotect it again. 3683 */ 3684 folio_mark_dirty(folio); 3685 folio_wait_stable(folio); 3686 out: 3687 sb_end_pagefault(mapping->host->i_sb); 3688 return ret; 3689 } 3690 3691 const struct vm_operations_struct generic_file_vm_ops = { 3692 .fault = filemap_fault, 3693 .map_pages = filemap_map_pages, 3694 .page_mkwrite = filemap_page_mkwrite, 3695 }; 3696 3697 /* This is used for a general mmap of a disk file */ 3698 3699 int generic_file_mmap(struct file *file, struct vm_area_struct *vma) 3700 { 3701 struct address_space *mapping = file->f_mapping; 3702 3703 if (!mapping->a_ops->read_folio) 3704 return -ENOEXEC; 3705 file_accessed(file); 3706 vma->vm_ops = &generic_file_vm_ops; 3707 return 0; 3708 } 3709 3710 /* 3711 * This is for filesystems which do not implement ->writepage. 3712 */ 3713 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) 3714 { 3715 if (vma_is_shared_maywrite(vma)) 3716 return -EINVAL; 3717 return generic_file_mmap(file, vma); 3718 } 3719 #else 3720 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) 3721 { 3722 return VM_FAULT_SIGBUS; 3723 } 3724 int generic_file_mmap(struct file *file, struct vm_area_struct *vma) 3725 { 3726 return -ENOSYS; 3727 } 3728 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) 3729 { 3730 return -ENOSYS; 3731 } 3732 #endif /* CONFIG_MMU */ 3733 3734 EXPORT_SYMBOL(filemap_page_mkwrite); 3735 EXPORT_SYMBOL(generic_file_mmap); 3736 EXPORT_SYMBOL(generic_file_readonly_mmap); 3737 3738 static struct folio *do_read_cache_folio(struct address_space *mapping, 3739 pgoff_t index, filler_t filler, struct file *file, gfp_t gfp) 3740 { 3741 struct folio *folio; 3742 int err; 3743 3744 if (!filler) 3745 filler = mapping->a_ops->read_folio; 3746 repeat: 3747 folio = filemap_get_folio(mapping, index); 3748 if (IS_ERR(folio)) { 3749 folio = filemap_alloc_folio(gfp, 0); 3750 if (!folio) 3751 return ERR_PTR(-ENOMEM); 3752 err = filemap_add_folio(mapping, folio, index, gfp); 3753 if (unlikely(err)) { 3754 folio_put(folio); 3755 if (err == -EEXIST) 3756 goto repeat; 3757 /* Presumably ENOMEM for xarray node */ 3758 return ERR_PTR(err); 3759 } 3760 3761 goto filler; 3762 } 3763 if (folio_test_uptodate(folio)) 3764 goto out; 3765 3766 if (!folio_trylock(folio)) { 3767 folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE); 3768 goto repeat; 3769 } 3770 3771 /* Folio was truncated from mapping */ 3772 if (!folio->mapping) { 3773 folio_unlock(folio); 3774 folio_put(folio); 3775 goto repeat; 3776 } 3777 3778 /* Someone else locked and filled the page in a very small window */ 3779 if (folio_test_uptodate(folio)) { 3780 folio_unlock(folio); 3781 goto out; 3782 } 3783 3784 filler: 3785 err = filemap_read_folio(file, filler, folio); 3786 if (err) { 3787 folio_put(folio); 3788 if (err == AOP_TRUNCATED_PAGE) 3789 goto repeat; 3790 return ERR_PTR(err); 3791 } 3792 3793 out: 3794 folio_mark_accessed(folio); 3795 return folio; 3796 } 3797 3798 /** 3799 * read_cache_folio - Read into page cache, fill it if needed. 3800 * @mapping: The address_space to read from. 3801 * @index: The index to read. 3802 * @filler: Function to perform the read, or NULL to use aops->read_folio(). 3803 * @file: Passed to filler function, may be NULL if not required. 3804 * 3805 * Read one page into the page cache. If it succeeds, the folio returned 3806 * will contain @index, but it may not be the first page of the folio. 3807 * 3808 * If the filler function returns an error, it will be returned to the 3809 * caller. 3810 * 3811 * Context: May sleep. Expects mapping->invalidate_lock to be held. 3812 * Return: An uptodate folio on success, ERR_PTR() on failure. 3813 */ 3814 struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index, 3815 filler_t filler, struct file *file) 3816 { 3817 return do_read_cache_folio(mapping, index, filler, file, 3818 mapping_gfp_mask(mapping)); 3819 } 3820 EXPORT_SYMBOL(read_cache_folio); 3821 3822 /** 3823 * mapping_read_folio_gfp - Read into page cache, using specified allocation flags. 3824 * @mapping: The address_space for the folio. 3825 * @index: The index that the allocated folio will contain. 3826 * @gfp: The page allocator flags to use if allocating. 3827 * 3828 * This is the same as "read_cache_folio(mapping, index, NULL, NULL)", but with 3829 * any new memory allocations done using the specified allocation flags. 3830 * 3831 * The most likely error from this function is EIO, but ENOMEM is 3832 * possible and so is EINTR. If ->read_folio returns another error, 3833 * that will be returned to the caller. 3834 * 3835 * The function expects mapping->invalidate_lock to be already held. 3836 * 3837 * Return: Uptodate folio on success, ERR_PTR() on failure. 3838 */ 3839 struct folio *mapping_read_folio_gfp(struct address_space *mapping, 3840 pgoff_t index, gfp_t gfp) 3841 { 3842 return do_read_cache_folio(mapping, index, NULL, NULL, gfp); 3843 } 3844 EXPORT_SYMBOL(mapping_read_folio_gfp); 3845 3846 static struct page *do_read_cache_page(struct address_space *mapping, 3847 pgoff_t index, filler_t *filler, struct file *file, gfp_t gfp) 3848 { 3849 struct folio *folio; 3850 3851 folio = do_read_cache_folio(mapping, index, filler, file, gfp); 3852 if (IS_ERR(folio)) 3853 return &folio->page; 3854 return folio_file_page(folio, index); 3855 } 3856 3857 struct page *read_cache_page(struct address_space *mapping, 3858 pgoff_t index, filler_t *filler, struct file *file) 3859 { 3860 return do_read_cache_page(mapping, index, filler, file, 3861 mapping_gfp_mask(mapping)); 3862 } 3863 EXPORT_SYMBOL(read_cache_page); 3864 3865 /** 3866 * read_cache_page_gfp - read into page cache, using specified page allocation flags. 3867 * @mapping: the page's address_space 3868 * @index: the page index 3869 * @gfp: the page allocator flags to use if allocating 3870 * 3871 * This is the same as "read_mapping_page(mapping, index, NULL)", but with 3872 * any new page allocations done using the specified allocation flags. 3873 * 3874 * If the page does not get brought uptodate, return -EIO. 3875 * 3876 * The function expects mapping->invalidate_lock to be already held. 3877 * 3878 * Return: up to date page on success, ERR_PTR() on failure. 3879 */ 3880 struct page *read_cache_page_gfp(struct address_space *mapping, 3881 pgoff_t index, 3882 gfp_t gfp) 3883 { 3884 return do_read_cache_page(mapping, index, NULL, NULL, gfp); 3885 } 3886 EXPORT_SYMBOL(read_cache_page_gfp); 3887 3888 /* 3889 * Warn about a page cache invalidation failure during a direct I/O write. 3890 */ 3891 static void dio_warn_stale_pagecache(struct file *filp) 3892 { 3893 static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST); 3894 char pathname[128]; 3895 char *path; 3896 3897 errseq_set(&filp->f_mapping->wb_err, -EIO); 3898 if (__ratelimit(&_rs)) { 3899 path = file_path(filp, pathname, sizeof(pathname)); 3900 if (IS_ERR(path)) 3901 path = "(unknown)"; 3902 pr_crit("Page cache invalidation failure on direct I/O. Possible data corruption due to collision with buffered I/O!\n"); 3903 pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid, 3904 current->comm); 3905 } 3906 } 3907 3908 void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count) 3909 { 3910 struct address_space *mapping = iocb->ki_filp->f_mapping; 3911 3912 if (mapping->nrpages && 3913 invalidate_inode_pages2_range(mapping, 3914 iocb->ki_pos >> PAGE_SHIFT, 3915 (iocb->ki_pos + count - 1) >> PAGE_SHIFT)) 3916 dio_warn_stale_pagecache(iocb->ki_filp); 3917 } 3918 3919 ssize_t 3920 generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) 3921 { 3922 struct address_space *mapping = iocb->ki_filp->f_mapping; 3923 size_t write_len = iov_iter_count(from); 3924 ssize_t written; 3925 3926 /* 3927 * If a page can not be invalidated, return 0 to fall back 3928 * to buffered write. 3929 */ 3930 written = kiocb_invalidate_pages(iocb, write_len); 3931 if (written) { 3932 if (written == -EBUSY) 3933 return 0; 3934 return written; 3935 } 3936 3937 written = mapping->a_ops->direct_IO(iocb, from); 3938 3939 /* 3940 * Finally, try again to invalidate clean pages which might have been 3941 * cached by non-direct readahead, or faulted in by get_user_pages() 3942 * if the source of the write was an mmap'ed region of the file 3943 * we're writing. Either one is a pretty crazy thing to do, 3944 * so we don't support it 100%. If this invalidation 3945 * fails, tough, the write still worked... 3946 * 3947 * Most of the time we do not need this since dio_complete() will do 3948 * the invalidation for us. However there are some file systems that 3949 * do not end up with dio_complete() being called, so let's not break 3950 * them by removing it completely. 3951 * 3952 * Noticeable example is a blkdev_direct_IO(). 3953 * 3954 * Skip invalidation for async writes or if mapping has no pages. 3955 */ 3956 if (written > 0) { 3957 struct inode *inode = mapping->host; 3958 loff_t pos = iocb->ki_pos; 3959 3960 kiocb_invalidate_post_direct_write(iocb, written); 3961 pos += written; 3962 write_len -= written; 3963 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { 3964 i_size_write(inode, pos); 3965 mark_inode_dirty(inode); 3966 } 3967 iocb->ki_pos = pos; 3968 } 3969 if (written != -EIOCBQUEUED) 3970 iov_iter_revert(from, write_len - iov_iter_count(from)); 3971 return written; 3972 } 3973 EXPORT_SYMBOL(generic_file_direct_write); 3974 3975 ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i) 3976 { 3977 struct file *file = iocb->ki_filp; 3978 loff_t pos = iocb->ki_pos; 3979 struct address_space *mapping = file->f_mapping; 3980 const struct address_space_operations *a_ops = mapping->a_ops; 3981 size_t chunk = mapping_max_folio_size(mapping); 3982 long status = 0; 3983 ssize_t written = 0; 3984 3985 do { 3986 struct page *page; 3987 struct folio *folio; 3988 size_t offset; /* Offset into folio */ 3989 size_t bytes; /* Bytes to write to folio */ 3990 size_t copied; /* Bytes copied from user */ 3991 void *fsdata = NULL; 3992 3993 bytes = iov_iter_count(i); 3994 retry: 3995 offset = pos & (chunk - 1); 3996 bytes = min(chunk - offset, bytes); 3997 balance_dirty_pages_ratelimited(mapping); 3998 3999 /* 4000 * Bring in the user page that we will copy from _first_. 4001 * Otherwise there's a nasty deadlock on copying from the 4002 * same page as we're writing to, without it being marked 4003 * up-to-date. 4004 */ 4005 if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) { 4006 status = -EFAULT; 4007 break; 4008 } 4009 4010 if (fatal_signal_pending(current)) { 4011 status = -EINTR; 4012 break; 4013 } 4014 4015 status = a_ops->write_begin(file, mapping, pos, bytes, 4016 &page, &fsdata); 4017 if (unlikely(status < 0)) 4018 break; 4019 4020 folio = page_folio(page); 4021 offset = offset_in_folio(folio, pos); 4022 if (bytes > folio_size(folio) - offset) 4023 bytes = folio_size(folio) - offset; 4024 4025 if (mapping_writably_mapped(mapping)) 4026 flush_dcache_folio(folio); 4027 4028 copied = copy_folio_from_iter_atomic(folio, offset, bytes, i); 4029 flush_dcache_folio(folio); 4030 4031 status = a_ops->write_end(file, mapping, pos, bytes, copied, 4032 page, fsdata); 4033 if (unlikely(status != copied)) { 4034 iov_iter_revert(i, copied - max(status, 0L)); 4035 if (unlikely(status < 0)) 4036 break; 4037 } 4038 cond_resched(); 4039 4040 if (unlikely(status == 0)) { 4041 /* 4042 * A short copy made ->write_end() reject the 4043 * thing entirely. Might be memory poisoning 4044 * halfway through, might be a race with munmap, 4045 * might be severe memory pressure. 4046 */ 4047 if (chunk > PAGE_SIZE) 4048 chunk /= 2; 4049 if (copied) { 4050 bytes = copied; 4051 goto retry; 4052 } 4053 } else { 4054 pos += status; 4055 written += status; 4056 } 4057 } while (iov_iter_count(i)); 4058 4059 if (!written) 4060 return status; 4061 iocb->ki_pos += written; 4062 return written; 4063 } 4064 EXPORT_SYMBOL(generic_perform_write); 4065 4066 /** 4067 * __generic_file_write_iter - write data to a file 4068 * @iocb: IO state structure (file, offset, etc.) 4069 * @from: iov_iter with data to write 4070 * 4071 * This function does all the work needed for actually writing data to a 4072 * file. It does all basic checks, removes SUID from the file, updates 4073 * modification times and calls proper subroutines depending on whether we 4074 * do direct IO or a standard buffered write. 4075 * 4076 * It expects i_rwsem to be grabbed unless we work on a block device or similar 4077 * object which does not need locking at all. 4078 * 4079 * This function does *not* take care of syncing data in case of O_SYNC write. 4080 * A caller has to handle it. This is mainly due to the fact that we want to 4081 * avoid syncing under i_rwsem. 4082 * 4083 * Return: 4084 * * number of bytes written, even for truncated writes 4085 * * negative error code if no data has been written at all 4086 */ 4087 ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 4088 { 4089 struct file *file = iocb->ki_filp; 4090 struct address_space *mapping = file->f_mapping; 4091 struct inode *inode = mapping->host; 4092 ssize_t ret; 4093 4094 ret = file_remove_privs(file); 4095 if (ret) 4096 return ret; 4097 4098 ret = file_update_time(file); 4099 if (ret) 4100 return ret; 4101 4102 if (iocb->ki_flags & IOCB_DIRECT) { 4103 ret = generic_file_direct_write(iocb, from); 4104 /* 4105 * If the write stopped short of completing, fall back to 4106 * buffered writes. Some filesystems do this for writes to 4107 * holes, for example. For DAX files, a buffered write will 4108 * not succeed (even if it did, DAX does not handle dirty 4109 * page-cache pages correctly). 4110 */ 4111 if (ret < 0 || !iov_iter_count(from) || IS_DAX(inode)) 4112 return ret; 4113 return direct_write_fallback(iocb, from, ret, 4114 generic_perform_write(iocb, from)); 4115 } 4116 4117 return generic_perform_write(iocb, from); 4118 } 4119 EXPORT_SYMBOL(__generic_file_write_iter); 4120 4121 /** 4122 * generic_file_write_iter - write data to a file 4123 * @iocb: IO state structure 4124 * @from: iov_iter with data to write 4125 * 4126 * This is a wrapper around __generic_file_write_iter() to be used by most 4127 * filesystems. It takes care of syncing the file in case of O_SYNC file 4128 * and acquires i_rwsem as needed. 4129 * Return: 4130 * * negative error code if no data has been written at all of 4131 * vfs_fsync_range() failed for a synchronous write 4132 * * number of bytes written, even for truncated writes 4133 */ 4134 ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 4135 { 4136 struct file *file = iocb->ki_filp; 4137 struct inode *inode = file->f_mapping->host; 4138 ssize_t ret; 4139 4140 inode_lock(inode); 4141 ret = generic_write_checks(iocb, from); 4142 if (ret > 0) 4143 ret = __generic_file_write_iter(iocb, from); 4144 inode_unlock(inode); 4145 4146 if (ret > 0) 4147 ret = generic_write_sync(iocb, ret); 4148 return ret; 4149 } 4150 EXPORT_SYMBOL(generic_file_write_iter); 4151 4152 /** 4153 * filemap_release_folio() - Release fs-specific metadata on a folio. 4154 * @folio: The folio which the kernel is trying to free. 4155 * @gfp: Memory allocation flags (and I/O mode). 4156 * 4157 * The address_space is trying to release any data attached to a folio 4158 * (presumably at folio->private). 4159 * 4160 * This will also be called if the private_2 flag is set on a page, 4161 * indicating that the folio has other metadata associated with it. 4162 * 4163 * The @gfp argument specifies whether I/O may be performed to release 4164 * this page (__GFP_IO), and whether the call may block 4165 * (__GFP_RECLAIM & __GFP_FS). 4166 * 4167 * Return: %true if the release was successful, otherwise %false. 4168 */ 4169 bool filemap_release_folio(struct folio *folio, gfp_t gfp) 4170 { 4171 struct address_space * const mapping = folio->mapping; 4172 4173 BUG_ON(!folio_test_locked(folio)); 4174 if (!folio_needs_release(folio)) 4175 return true; 4176 if (folio_test_writeback(folio)) 4177 return false; 4178 4179 if (mapping && mapping->a_ops->release_folio) 4180 return mapping->a_ops->release_folio(folio, gfp); 4181 return try_to_free_buffers(folio); 4182 } 4183 EXPORT_SYMBOL(filemap_release_folio); 4184 4185 /** 4186 * filemap_invalidate_inode - Invalidate/forcibly write back a range of an inode's pagecache 4187 * @inode: The inode to flush 4188 * @flush: Set to write back rather than simply invalidate. 4189 * @start: First byte to in range. 4190 * @end: Last byte in range (inclusive), or LLONG_MAX for everything from start 4191 * onwards. 4192 * 4193 * Invalidate all the folios on an inode that contribute to the specified 4194 * range, possibly writing them back first. Whilst the operation is 4195 * undertaken, the invalidate lock is held to prevent new folios from being 4196 * installed. 4197 */ 4198 int filemap_invalidate_inode(struct inode *inode, bool flush, 4199 loff_t start, loff_t end) 4200 { 4201 struct address_space *mapping = inode->i_mapping; 4202 pgoff_t first = start >> PAGE_SHIFT; 4203 pgoff_t last = end >> PAGE_SHIFT; 4204 pgoff_t nr = end == LLONG_MAX ? ULONG_MAX : last - first + 1; 4205 4206 if (!mapping || !mapping->nrpages || end < start) 4207 goto out; 4208 4209 /* Prevent new folios from being added to the inode. */ 4210 filemap_invalidate_lock(mapping); 4211 4212 if (!mapping->nrpages) 4213 goto unlock; 4214 4215 unmap_mapping_pages(mapping, first, nr, false); 4216 4217 /* Write back the data if we're asked to. */ 4218 if (flush) { 4219 struct writeback_control wbc = { 4220 .sync_mode = WB_SYNC_ALL, 4221 .nr_to_write = LONG_MAX, 4222 .range_start = start, 4223 .range_end = end, 4224 }; 4225 4226 filemap_fdatawrite_wbc(mapping, &wbc); 4227 } 4228 4229 /* Wait for writeback to complete on all folios and discard. */ 4230 truncate_inode_pages_range(mapping, start, end); 4231 4232 unlock: 4233 filemap_invalidate_unlock(mapping); 4234 out: 4235 return filemap_check_errors(mapping); 4236 } 4237 EXPORT_SYMBOL_GPL(filemap_invalidate_inode); 4238 4239 #ifdef CONFIG_CACHESTAT_SYSCALL 4240 /** 4241 * filemap_cachestat() - compute the page cache statistics of a mapping 4242 * @mapping: The mapping to compute the statistics for. 4243 * @first_index: The starting page cache index. 4244 * @last_index: The final page index (inclusive). 4245 * @cs: the cachestat struct to write the result to. 4246 * 4247 * This will query the page cache statistics of a mapping in the 4248 * page range of [first_index, last_index] (inclusive). The statistics 4249 * queried include: number of dirty pages, number of pages marked for 4250 * writeback, and the number of (recently) evicted pages. 4251 */ 4252 static void filemap_cachestat(struct address_space *mapping, 4253 pgoff_t first_index, pgoff_t last_index, struct cachestat *cs) 4254 { 4255 XA_STATE(xas, &mapping->i_pages, first_index); 4256 struct folio *folio; 4257 4258 /* Flush stats (and potentially sleep) outside the RCU read section. */ 4259 mem_cgroup_flush_stats_ratelimited(NULL); 4260 4261 rcu_read_lock(); 4262 xas_for_each(&xas, folio, last_index) { 4263 int order; 4264 unsigned long nr_pages; 4265 pgoff_t folio_first_index, folio_last_index; 4266 4267 /* 4268 * Don't deref the folio. It is not pinned, and might 4269 * get freed (and reused) underneath us. 4270 * 4271 * We *could* pin it, but that would be expensive for 4272 * what should be a fast and lightweight syscall. 4273 * 4274 * Instead, derive all information of interest from 4275 * the rcu-protected xarray. 4276 */ 4277 4278 if (xas_retry(&xas, folio)) 4279 continue; 4280 4281 order = xa_get_order(xas.xa, xas.xa_index); 4282 nr_pages = 1 << order; 4283 folio_first_index = round_down(xas.xa_index, 1 << order); 4284 folio_last_index = folio_first_index + nr_pages - 1; 4285 4286 /* Folios might straddle the range boundaries, only count covered pages */ 4287 if (folio_first_index < first_index) 4288 nr_pages -= first_index - folio_first_index; 4289 4290 if (folio_last_index > last_index) 4291 nr_pages -= folio_last_index - last_index; 4292 4293 if (xa_is_value(folio)) { 4294 /* page is evicted */ 4295 void *shadow = (void *)folio; 4296 bool workingset; /* not used */ 4297 4298 cs->nr_evicted += nr_pages; 4299 4300 #ifdef CONFIG_SWAP /* implies CONFIG_MMU */ 4301 if (shmem_mapping(mapping)) { 4302 /* shmem file - in swap cache */ 4303 swp_entry_t swp = radix_to_swp_entry(folio); 4304 4305 /* swapin error results in poisoned entry */ 4306 if (non_swap_entry(swp)) 4307 goto resched; 4308 4309 /* 4310 * Getting a swap entry from the shmem 4311 * inode means we beat 4312 * shmem_unuse(). rcu_read_lock() 4313 * ensures swapoff waits for us before 4314 * freeing the swapper space. However, 4315 * we can race with swapping and 4316 * invalidation, so there might not be 4317 * a shadow in the swapcache (yet). 4318 */ 4319 shadow = get_shadow_from_swap_cache(swp); 4320 if (!shadow) 4321 goto resched; 4322 } 4323 #endif 4324 if (workingset_test_recent(shadow, true, &workingset, false)) 4325 cs->nr_recently_evicted += nr_pages; 4326 4327 goto resched; 4328 } 4329 4330 /* page is in cache */ 4331 cs->nr_cache += nr_pages; 4332 4333 if (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY)) 4334 cs->nr_dirty += nr_pages; 4335 4336 if (xas_get_mark(&xas, PAGECACHE_TAG_WRITEBACK)) 4337 cs->nr_writeback += nr_pages; 4338 4339 resched: 4340 if (need_resched()) { 4341 xas_pause(&xas); 4342 cond_resched_rcu(); 4343 } 4344 } 4345 rcu_read_unlock(); 4346 } 4347 4348 /* 4349 * The cachestat(2) system call. 4350 * 4351 * cachestat() returns the page cache statistics of a file in the 4352 * bytes range specified by `off` and `len`: number of cached pages, 4353 * number of dirty pages, number of pages marked for writeback, 4354 * number of evicted pages, and number of recently evicted pages. 4355 * 4356 * An evicted page is a page that is previously in the page cache 4357 * but has been evicted since. A page is recently evicted if its last 4358 * eviction was recent enough that its reentry to the cache would 4359 * indicate that it is actively being used by the system, and that 4360 * there is memory pressure on the system. 4361 * 4362 * `off` and `len` must be non-negative integers. If `len` > 0, 4363 * the queried range is [`off`, `off` + `len`]. If `len` == 0, 4364 * we will query in the range from `off` to the end of the file. 4365 * 4366 * The `flags` argument is unused for now, but is included for future 4367 * extensibility. User should pass 0 (i.e no flag specified). 4368 * 4369 * Currently, hugetlbfs is not supported. 4370 * 4371 * Because the status of a page can change after cachestat() checks it 4372 * but before it returns to the application, the returned values may 4373 * contain stale information. 4374 * 4375 * return values: 4376 * zero - success 4377 * -EFAULT - cstat or cstat_range points to an illegal address 4378 * -EINVAL - invalid flags 4379 * -EBADF - invalid file descriptor 4380 * -EOPNOTSUPP - file descriptor is of a hugetlbfs file 4381 */ 4382 SYSCALL_DEFINE4(cachestat, unsigned int, fd, 4383 struct cachestat_range __user *, cstat_range, 4384 struct cachestat __user *, cstat, unsigned int, flags) 4385 { 4386 struct fd f = fdget(fd); 4387 struct address_space *mapping; 4388 struct cachestat_range csr; 4389 struct cachestat cs; 4390 pgoff_t first_index, last_index; 4391 4392 if (!f.file) 4393 return -EBADF; 4394 4395 if (copy_from_user(&csr, cstat_range, 4396 sizeof(struct cachestat_range))) { 4397 fdput(f); 4398 return -EFAULT; 4399 } 4400 4401 /* hugetlbfs is not supported */ 4402 if (is_file_hugepages(f.file)) { 4403 fdput(f); 4404 return -EOPNOTSUPP; 4405 } 4406 4407 if (flags != 0) { 4408 fdput(f); 4409 return -EINVAL; 4410 } 4411 4412 first_index = csr.off >> PAGE_SHIFT; 4413 last_index = 4414 csr.len == 0 ? ULONG_MAX : (csr.off + csr.len - 1) >> PAGE_SHIFT; 4415 memset(&cs, 0, sizeof(struct cachestat)); 4416 mapping = f.file->f_mapping; 4417 filemap_cachestat(mapping, first_index, last_index, &cs); 4418 fdput(f); 4419 4420 if (copy_to_user(cstat, &cs, sizeof(struct cachestat))) 4421 return -EFAULT; 4422 4423 return 0; 4424 } 4425 #endif /* CONFIG_CACHESTAT_SYSCALL */ 4426