1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/filemap.c 4 * 5 * Copyright (C) 1994-1999 Linus Torvalds 6 */ 7 8 /* 9 * This file handles the generic file mmap semantics used by 10 * most "normal" filesystems (but you don't /have/ to use this: 11 * the NFS filesystem used to do this differently, for example) 12 */ 13 #include <linux/export.h> 14 #include <linux/compiler.h> 15 #include <linux/dax.h> 16 #include <linux/fs.h> 17 #include <linux/sched/signal.h> 18 #include <linux/uaccess.h> 19 #include <linux/capability.h> 20 #include <linux/kernel_stat.h> 21 #include <linux/gfp.h> 22 #include <linux/mm.h> 23 #include <linux/swap.h> 24 #include <linux/swapops.h> 25 #include <linux/syscalls.h> 26 #include <linux/mman.h> 27 #include <linux/pagemap.h> 28 #include <linux/file.h> 29 #include <linux/uio.h> 30 #include <linux/error-injection.h> 31 #include <linux/hash.h> 32 #include <linux/writeback.h> 33 #include <linux/backing-dev.h> 34 #include <linux/pagevec.h> 35 #include <linux/security.h> 36 #include <linux/cpuset.h> 37 #include <linux/hugetlb.h> 38 #include <linux/memcontrol.h> 39 #include <linux/shmem_fs.h> 40 #include <linux/rmap.h> 41 #include <linux/delayacct.h> 42 #include <linux/psi.h> 43 #include <linux/ramfs.h> 44 #include <linux/page_idle.h> 45 #include <linux/migrate.h> 46 #include <linux/pipe_fs_i.h> 47 #include <linux/splice.h> 48 #include <linux/rcupdate_wait.h> 49 #include <linux/sched/mm.h> 50 #include <linux/sysctl.h> 51 #include <asm/pgalloc.h> 52 #include <asm/tlbflush.h> 53 #include "internal.h" 54 55 #define CREATE_TRACE_POINTS 56 #include <trace/events/filemap.h> 57 58 /* 59 * FIXME: remove all knowledge of the buffer layer from the core VM 60 */ 61 #include <linux/buffer_head.h> /* for try_to_free_buffers */ 62 63 #include <asm/mman.h> 64 65 #include "swap.h" 66 67 /* 68 * Shared mappings implemented 30.11.1994. It's not fully working yet, 69 * though. 70 * 71 * Shared mappings now work. 15.8.1995 Bruno. 72 * 73 * finished 'unifying' the page and buffer cache and SMP-threaded the 74 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> 75 * 76 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> 77 */ 78 79 /* 80 * Lock ordering: 81 * 82 * ->i_mmap_rwsem (truncate_pagecache) 83 * ->private_lock (__free_pte->block_dirty_folio) 84 * ->swap_lock (exclusive_swap_page, others) 85 * ->i_pages lock 86 * 87 * ->i_rwsem 88 * ->invalidate_lock (acquired by fs in truncate path) 89 * ->i_mmap_rwsem (truncate->unmap_mapping_range) 90 * 91 * ->mmap_lock 92 * ->i_mmap_rwsem 93 * ->page_table_lock or pte_lock (various, mainly in memory.c) 94 * ->i_pages lock (arch-dependent flush_dcache_mmap_lock) 95 * 96 * ->mmap_lock 97 * ->invalidate_lock (filemap_fault) 98 * ->lock_page (filemap_fault, access_process_vm) 99 * 100 * ->i_rwsem (generic_perform_write) 101 * ->mmap_lock (fault_in_readable->do_page_fault) 102 * 103 * bdi->wb.list_lock 104 * sb_lock (fs/fs-writeback.c) 105 * ->i_pages lock (__sync_single_inode) 106 * 107 * ->i_mmap_rwsem 108 * ->anon_vma.lock (vma_merge) 109 * 110 * ->anon_vma.lock 111 * ->page_table_lock or pte_lock (anon_vma_prepare and various) 112 * 113 * ->page_table_lock or pte_lock 114 * ->swap_lock (try_to_unmap_one) 115 * ->private_lock (try_to_unmap_one) 116 * ->i_pages lock (try_to_unmap_one) 117 * ->lruvec->lru_lock (follow_page_mask->mark_page_accessed) 118 * ->lruvec->lru_lock (check_pte_range->folio_isolate_lru) 119 * ->private_lock (folio_remove_rmap_pte->set_page_dirty) 120 * ->i_pages lock (folio_remove_rmap_pte->set_page_dirty) 121 * bdi.wb->list_lock (folio_remove_rmap_pte->set_page_dirty) 122 * ->inode->i_lock (folio_remove_rmap_pte->set_page_dirty) 123 * bdi.wb->list_lock (zap_pte_range->set_page_dirty) 124 * ->inode->i_lock (zap_pte_range->set_page_dirty) 125 * ->private_lock (zap_pte_range->block_dirty_folio) 126 */ 127 128 static void page_cache_delete(struct address_space *mapping, 129 struct folio *folio, void *shadow) 130 { 131 XA_STATE(xas, &mapping->i_pages, folio->index); 132 long nr = 1; 133 134 mapping_set_update(&xas, mapping); 135 136 xas_set_order(&xas, folio->index, folio_order(folio)); 137 nr = folio_nr_pages(folio); 138 139 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 140 141 xas_store(&xas, shadow); 142 xas_init_marks(&xas); 143 144 folio->mapping = NULL; 145 /* Leave folio->index set: truncation lookup relies upon it */ 146 mapping->nrpages -= nr; 147 } 148 149 static void filemap_unaccount_folio(struct address_space *mapping, 150 struct folio *folio) 151 { 152 long nr; 153 154 VM_BUG_ON_FOLIO(folio_mapped(folio), folio); 155 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) { 156 pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n", 157 current->comm, folio_pfn(folio)); 158 dump_page(&folio->page, "still mapped when deleted"); 159 dump_stack(); 160 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 161 162 if (mapping_exiting(mapping) && !folio_test_large(folio)) { 163 int mapcount = folio_mapcount(folio); 164 165 if (folio_ref_count(folio) >= mapcount + 2) { 166 /* 167 * All vmas have already been torn down, so it's 168 * a good bet that actually the page is unmapped 169 * and we'd rather not leak it: if we're wrong, 170 * another bad page check should catch it later. 171 */ 172 atomic_set(&folio->_mapcount, -1); 173 folio_ref_sub(folio, mapcount); 174 } 175 } 176 } 177 178 /* hugetlb folios do not participate in page cache accounting. */ 179 if (folio_test_hugetlb(folio)) 180 return; 181 182 nr = folio_nr_pages(folio); 183 184 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); 185 if (folio_test_swapbacked(folio)) { 186 __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); 187 if (folio_test_pmd_mappable(folio)) 188 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr); 189 } else if (folio_test_pmd_mappable(folio)) { 190 __lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr); 191 filemap_nr_thps_dec(mapping); 192 } 193 194 /* 195 * At this point folio must be either written or cleaned by 196 * truncate. Dirty folio here signals a bug and loss of 197 * unwritten data - on ordinary filesystems. 198 * 199 * But it's harmless on in-memory filesystems like tmpfs; and can 200 * occur when a driver which did get_user_pages() sets page dirty 201 * before putting it, while the inode is being finally evicted. 202 * 203 * Below fixes dirty accounting after removing the folio entirely 204 * but leaves the dirty flag set: it has no effect for truncated 205 * folio and anyway will be cleared before returning folio to 206 * buddy allocator. 207 */ 208 if (WARN_ON_ONCE(folio_test_dirty(folio) && 209 mapping_can_writeback(mapping))) 210 folio_account_cleaned(folio, inode_to_wb(mapping->host)); 211 } 212 213 /* 214 * Delete a page from the page cache and free it. Caller has to make 215 * sure the page is locked and that nobody else uses it - or that usage 216 * is safe. The caller must hold the i_pages lock. 217 */ 218 void __filemap_remove_folio(struct folio *folio, void *shadow) 219 { 220 struct address_space *mapping = folio->mapping; 221 222 trace_mm_filemap_delete_from_page_cache(folio); 223 filemap_unaccount_folio(mapping, folio); 224 page_cache_delete(mapping, folio, shadow); 225 } 226 227 void filemap_free_folio(struct address_space *mapping, struct folio *folio) 228 { 229 void (*free_folio)(struct folio *); 230 231 free_folio = mapping->a_ops->free_folio; 232 if (free_folio) 233 free_folio(folio); 234 235 folio_put_refs(folio, folio_nr_pages(folio)); 236 } 237 238 /** 239 * filemap_remove_folio - Remove folio from page cache. 240 * @folio: The folio. 241 * 242 * This must be called only on folios that are locked and have been 243 * verified to be in the page cache. It will never put the folio into 244 * the free list because the caller has a reference on the page. 245 */ 246 void filemap_remove_folio(struct folio *folio) 247 { 248 struct address_space *mapping = folio->mapping; 249 250 BUG_ON(!folio_test_locked(folio)); 251 spin_lock(&mapping->host->i_lock); 252 xa_lock_irq(&mapping->i_pages); 253 __filemap_remove_folio(folio, NULL); 254 xa_unlock_irq(&mapping->i_pages); 255 if (mapping_shrinkable(mapping)) 256 inode_add_lru(mapping->host); 257 spin_unlock(&mapping->host->i_lock); 258 259 filemap_free_folio(mapping, folio); 260 } 261 262 /* 263 * page_cache_delete_batch - delete several folios from page cache 264 * @mapping: the mapping to which folios belong 265 * @fbatch: batch of folios to delete 266 * 267 * The function walks over mapping->i_pages and removes folios passed in 268 * @fbatch from the mapping. The function expects @fbatch to be sorted 269 * by page index and is optimised for it to be dense. 270 * It tolerates holes in @fbatch (mapping entries at those indices are not 271 * modified). 272 * 273 * The function expects the i_pages lock to be held. 274 */ 275 static void page_cache_delete_batch(struct address_space *mapping, 276 struct folio_batch *fbatch) 277 { 278 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); 279 long total_pages = 0; 280 int i = 0; 281 struct folio *folio; 282 283 mapping_set_update(&xas, mapping); 284 xas_for_each(&xas, folio, ULONG_MAX) { 285 if (i >= folio_batch_count(fbatch)) 286 break; 287 288 /* A swap/dax/shadow entry got inserted? Skip it. */ 289 if (xa_is_value(folio)) 290 continue; 291 /* 292 * A page got inserted in our range? Skip it. We have our 293 * pages locked so they are protected from being removed. 294 * If we see a page whose index is higher than ours, it 295 * means our page has been removed, which shouldn't be 296 * possible because we're holding the PageLock. 297 */ 298 if (folio != fbatch->folios[i]) { 299 VM_BUG_ON_FOLIO(folio->index > 300 fbatch->folios[i]->index, folio); 301 continue; 302 } 303 304 WARN_ON_ONCE(!folio_test_locked(folio)); 305 306 folio->mapping = NULL; 307 /* Leave folio->index set: truncation lookup relies on it */ 308 309 i++; 310 xas_store(&xas, NULL); 311 total_pages += folio_nr_pages(folio); 312 } 313 mapping->nrpages -= total_pages; 314 } 315 316 void delete_from_page_cache_batch(struct address_space *mapping, 317 struct folio_batch *fbatch) 318 { 319 int i; 320 321 if (!folio_batch_count(fbatch)) 322 return; 323 324 spin_lock(&mapping->host->i_lock); 325 xa_lock_irq(&mapping->i_pages); 326 for (i = 0; i < folio_batch_count(fbatch); i++) { 327 struct folio *folio = fbatch->folios[i]; 328 329 trace_mm_filemap_delete_from_page_cache(folio); 330 filemap_unaccount_folio(mapping, folio); 331 } 332 page_cache_delete_batch(mapping, fbatch); 333 xa_unlock_irq(&mapping->i_pages); 334 if (mapping_shrinkable(mapping)) 335 inode_add_lru(mapping->host); 336 spin_unlock(&mapping->host->i_lock); 337 338 for (i = 0; i < folio_batch_count(fbatch); i++) 339 filemap_free_folio(mapping, fbatch->folios[i]); 340 } 341 342 int filemap_check_errors(struct address_space *mapping) 343 { 344 int ret = 0; 345 /* Check for outstanding write errors */ 346 if (test_bit(AS_ENOSPC, &mapping->flags) && 347 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) 348 ret = -ENOSPC; 349 if (test_bit(AS_EIO, &mapping->flags) && 350 test_and_clear_bit(AS_EIO, &mapping->flags)) 351 ret = -EIO; 352 return ret; 353 } 354 EXPORT_SYMBOL(filemap_check_errors); 355 356 static int filemap_check_and_keep_errors(struct address_space *mapping) 357 { 358 /* Check for outstanding write errors */ 359 if (test_bit(AS_EIO, &mapping->flags)) 360 return -EIO; 361 if (test_bit(AS_ENOSPC, &mapping->flags)) 362 return -ENOSPC; 363 return 0; 364 } 365 366 /** 367 * filemap_fdatawrite_wbc - start writeback on mapping dirty pages in range 368 * @mapping: address space structure to write 369 * @wbc: the writeback_control controlling the writeout 370 * 371 * Call writepages on the mapping using the provided wbc to control the 372 * writeout. 373 * 374 * Return: %0 on success, negative error code otherwise. 375 */ 376 int filemap_fdatawrite_wbc(struct address_space *mapping, 377 struct writeback_control *wbc) 378 { 379 int ret; 380 381 if (!mapping_can_writeback(mapping) || 382 !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 383 return 0; 384 385 wbc_attach_fdatawrite_inode(wbc, mapping->host); 386 ret = do_writepages(mapping, wbc); 387 wbc_detach_inode(wbc); 388 return ret; 389 } 390 EXPORT_SYMBOL(filemap_fdatawrite_wbc); 391 392 /** 393 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range 394 * @mapping: address space structure to write 395 * @start: offset in bytes where the range starts 396 * @end: offset in bytes where the range ends (inclusive) 397 * @sync_mode: enable synchronous operation 398 * 399 * Start writeback against all of a mapping's dirty pages that lie 400 * within the byte offsets <start, end> inclusive. 401 * 402 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as 403 * opposed to a regular memory cleansing writeback. The difference between 404 * these two operations is that if a dirty page/buffer is encountered, it must 405 * be waited upon, and not just skipped over. 406 * 407 * Return: %0 on success, negative error code otherwise. 408 */ 409 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 410 loff_t end, int sync_mode) 411 { 412 struct writeback_control wbc = { 413 .sync_mode = sync_mode, 414 .nr_to_write = LONG_MAX, 415 .range_start = start, 416 .range_end = end, 417 }; 418 419 return filemap_fdatawrite_wbc(mapping, &wbc); 420 } 421 422 static inline int __filemap_fdatawrite(struct address_space *mapping, 423 int sync_mode) 424 { 425 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); 426 } 427 428 int filemap_fdatawrite(struct address_space *mapping) 429 { 430 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); 431 } 432 EXPORT_SYMBOL(filemap_fdatawrite); 433 434 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 435 loff_t end) 436 { 437 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); 438 } 439 EXPORT_SYMBOL(filemap_fdatawrite_range); 440 441 /** 442 * filemap_fdatawrite_range_kick - start writeback on a range 443 * @mapping: target address_space 444 * @start: index to start writeback on 445 * @end: last (inclusive) index for writeback 446 * 447 * This is a non-integrity writeback helper, to start writing back folios 448 * for the indicated range. 449 * 450 * Return: %0 on success, negative error code otherwise. 451 */ 452 int filemap_fdatawrite_range_kick(struct address_space *mapping, loff_t start, 453 loff_t end) 454 { 455 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_NONE); 456 } 457 EXPORT_SYMBOL_GPL(filemap_fdatawrite_range_kick); 458 459 /** 460 * filemap_flush - mostly a non-blocking flush 461 * @mapping: target address_space 462 * 463 * This is a mostly non-blocking flush. Not suitable for data-integrity 464 * purposes - I/O may not be started against all dirty pages. 465 * 466 * Return: %0 on success, negative error code otherwise. 467 */ 468 int filemap_flush(struct address_space *mapping) 469 { 470 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); 471 } 472 EXPORT_SYMBOL(filemap_flush); 473 474 /** 475 * filemap_range_has_page - check if a page exists in range. 476 * @mapping: address space within which to check 477 * @start_byte: offset in bytes where the range starts 478 * @end_byte: offset in bytes where the range ends (inclusive) 479 * 480 * Find at least one page in the range supplied, usually used to check if 481 * direct writing in this range will trigger a writeback. 482 * 483 * Return: %true if at least one page exists in the specified range, 484 * %false otherwise. 485 */ 486 bool filemap_range_has_page(struct address_space *mapping, 487 loff_t start_byte, loff_t end_byte) 488 { 489 struct folio *folio; 490 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); 491 pgoff_t max = end_byte >> PAGE_SHIFT; 492 493 if (end_byte < start_byte) 494 return false; 495 496 rcu_read_lock(); 497 for (;;) { 498 folio = xas_find(&xas, max); 499 if (xas_retry(&xas, folio)) 500 continue; 501 /* Shadow entries don't count */ 502 if (xa_is_value(folio)) 503 continue; 504 /* 505 * We don't need to try to pin this page; we're about to 506 * release the RCU lock anyway. It is enough to know that 507 * there was a page here recently. 508 */ 509 break; 510 } 511 rcu_read_unlock(); 512 513 return folio != NULL; 514 } 515 EXPORT_SYMBOL(filemap_range_has_page); 516 517 static void __filemap_fdatawait_range(struct address_space *mapping, 518 loff_t start_byte, loff_t end_byte) 519 { 520 pgoff_t index = start_byte >> PAGE_SHIFT; 521 pgoff_t end = end_byte >> PAGE_SHIFT; 522 struct folio_batch fbatch; 523 unsigned nr_folios; 524 525 folio_batch_init(&fbatch); 526 527 while (index <= end) { 528 unsigned i; 529 530 nr_folios = filemap_get_folios_tag(mapping, &index, end, 531 PAGECACHE_TAG_WRITEBACK, &fbatch); 532 533 if (!nr_folios) 534 break; 535 536 for (i = 0; i < nr_folios; i++) { 537 struct folio *folio = fbatch.folios[i]; 538 539 folio_wait_writeback(folio); 540 } 541 folio_batch_release(&fbatch); 542 cond_resched(); 543 } 544 } 545 546 /** 547 * filemap_fdatawait_range - wait for writeback to complete 548 * @mapping: address space structure to wait for 549 * @start_byte: offset in bytes where the range starts 550 * @end_byte: offset in bytes where the range ends (inclusive) 551 * 552 * Walk the list of under-writeback pages of the given address space 553 * in the given range and wait for all of them. Check error status of 554 * the address space and return it. 555 * 556 * Since the error status of the address space is cleared by this function, 557 * callers are responsible for checking the return value and handling and/or 558 * reporting the error. 559 * 560 * Return: error status of the address space. 561 */ 562 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, 563 loff_t end_byte) 564 { 565 __filemap_fdatawait_range(mapping, start_byte, end_byte); 566 return filemap_check_errors(mapping); 567 } 568 EXPORT_SYMBOL(filemap_fdatawait_range); 569 570 /** 571 * filemap_fdatawait_range_keep_errors - wait for writeback to complete 572 * @mapping: address space structure to wait for 573 * @start_byte: offset in bytes where the range starts 574 * @end_byte: offset in bytes where the range ends (inclusive) 575 * 576 * Walk the list of under-writeback pages of the given address space in the 577 * given range and wait for all of them. Unlike filemap_fdatawait_range(), 578 * this function does not clear error status of the address space. 579 * 580 * Use this function if callers don't handle errors themselves. Expected 581 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), 582 * fsfreeze(8) 583 */ 584 int filemap_fdatawait_range_keep_errors(struct address_space *mapping, 585 loff_t start_byte, loff_t end_byte) 586 { 587 __filemap_fdatawait_range(mapping, start_byte, end_byte); 588 return filemap_check_and_keep_errors(mapping); 589 } 590 EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors); 591 592 /** 593 * file_fdatawait_range - wait for writeback to complete 594 * @file: file pointing to address space structure to wait for 595 * @start_byte: offset in bytes where the range starts 596 * @end_byte: offset in bytes where the range ends (inclusive) 597 * 598 * Walk the list of under-writeback pages of the address space that file 599 * refers to, in the given range and wait for all of them. Check error 600 * status of the address space vs. the file->f_wb_err cursor and return it. 601 * 602 * Since the error status of the file is advanced by this function, 603 * callers are responsible for checking the return value and handling and/or 604 * reporting the error. 605 * 606 * Return: error status of the address space vs. the file->f_wb_err cursor. 607 */ 608 int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte) 609 { 610 struct address_space *mapping = file->f_mapping; 611 612 __filemap_fdatawait_range(mapping, start_byte, end_byte); 613 return file_check_and_advance_wb_err(file); 614 } 615 EXPORT_SYMBOL(file_fdatawait_range); 616 617 /** 618 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors 619 * @mapping: address space structure to wait for 620 * 621 * Walk the list of under-writeback pages of the given address space 622 * and wait for all of them. Unlike filemap_fdatawait(), this function 623 * does not clear error status of the address space. 624 * 625 * Use this function if callers don't handle errors themselves. Expected 626 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), 627 * fsfreeze(8) 628 * 629 * Return: error status of the address space. 630 */ 631 int filemap_fdatawait_keep_errors(struct address_space *mapping) 632 { 633 __filemap_fdatawait_range(mapping, 0, LLONG_MAX); 634 return filemap_check_and_keep_errors(mapping); 635 } 636 EXPORT_SYMBOL(filemap_fdatawait_keep_errors); 637 638 /* Returns true if writeback might be needed or already in progress. */ 639 static bool mapping_needs_writeback(struct address_space *mapping) 640 { 641 return mapping->nrpages; 642 } 643 644 bool filemap_range_has_writeback(struct address_space *mapping, 645 loff_t start_byte, loff_t end_byte) 646 { 647 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); 648 pgoff_t max = end_byte >> PAGE_SHIFT; 649 struct folio *folio; 650 651 if (end_byte < start_byte) 652 return false; 653 654 rcu_read_lock(); 655 xas_for_each(&xas, folio, max) { 656 if (xas_retry(&xas, folio)) 657 continue; 658 if (xa_is_value(folio)) 659 continue; 660 if (folio_test_dirty(folio) || folio_test_locked(folio) || 661 folio_test_writeback(folio)) 662 break; 663 } 664 rcu_read_unlock(); 665 return folio != NULL; 666 } 667 EXPORT_SYMBOL_GPL(filemap_range_has_writeback); 668 669 /** 670 * filemap_write_and_wait_range - write out & wait on a file range 671 * @mapping: the address_space for the pages 672 * @lstart: offset in bytes where the range starts 673 * @lend: offset in bytes where the range ends (inclusive) 674 * 675 * Write out and wait upon file offsets lstart->lend, inclusive. 676 * 677 * Note that @lend is inclusive (describes the last byte to be written) so 678 * that this function can be used to write to the very end-of-file (end = -1). 679 * 680 * Return: error status of the address space. 681 */ 682 int filemap_write_and_wait_range(struct address_space *mapping, 683 loff_t lstart, loff_t lend) 684 { 685 int err = 0, err2; 686 687 if (lend < lstart) 688 return 0; 689 690 if (mapping_needs_writeback(mapping)) { 691 err = __filemap_fdatawrite_range(mapping, lstart, lend, 692 WB_SYNC_ALL); 693 /* 694 * Even if the above returned error, the pages may be 695 * written partially (e.g. -ENOSPC), so we wait for it. 696 * But the -EIO is special case, it may indicate the worst 697 * thing (e.g. bug) happened, so we avoid waiting for it. 698 */ 699 if (err != -EIO) 700 __filemap_fdatawait_range(mapping, lstart, lend); 701 } 702 err2 = filemap_check_errors(mapping); 703 if (!err) 704 err = err2; 705 return err; 706 } 707 EXPORT_SYMBOL(filemap_write_and_wait_range); 708 709 void __filemap_set_wb_err(struct address_space *mapping, int err) 710 { 711 errseq_t eseq = errseq_set(&mapping->wb_err, err); 712 713 trace_filemap_set_wb_err(mapping, eseq); 714 } 715 EXPORT_SYMBOL(__filemap_set_wb_err); 716 717 /** 718 * file_check_and_advance_wb_err - report wb error (if any) that was previously 719 * and advance wb_err to current one 720 * @file: struct file on which the error is being reported 721 * 722 * When userland calls fsync (or something like nfsd does the equivalent), we 723 * want to report any writeback errors that occurred since the last fsync (or 724 * since the file was opened if there haven't been any). 725 * 726 * Grab the wb_err from the mapping. If it matches what we have in the file, 727 * then just quickly return 0. The file is all caught up. 728 * 729 * If it doesn't match, then take the mapping value, set the "seen" flag in 730 * it and try to swap it into place. If it works, or another task beat us 731 * to it with the new value, then update the f_wb_err and return the error 732 * portion. The error at this point must be reported via proper channels 733 * (a'la fsync, or NFS COMMIT operation, etc.). 734 * 735 * While we handle mapping->wb_err with atomic operations, the f_wb_err 736 * value is protected by the f_lock since we must ensure that it reflects 737 * the latest value swapped in for this file descriptor. 738 * 739 * Return: %0 on success, negative error code otherwise. 740 */ 741 int file_check_and_advance_wb_err(struct file *file) 742 { 743 int err = 0; 744 errseq_t old = READ_ONCE(file->f_wb_err); 745 struct address_space *mapping = file->f_mapping; 746 747 /* Locklessly handle the common case where nothing has changed */ 748 if (errseq_check(&mapping->wb_err, old)) { 749 /* Something changed, must use slow path */ 750 spin_lock(&file->f_lock); 751 old = file->f_wb_err; 752 err = errseq_check_and_advance(&mapping->wb_err, 753 &file->f_wb_err); 754 trace_file_check_and_advance_wb_err(file, old); 755 spin_unlock(&file->f_lock); 756 } 757 758 /* 759 * We're mostly using this function as a drop in replacement for 760 * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect 761 * that the legacy code would have had on these flags. 762 */ 763 clear_bit(AS_EIO, &mapping->flags); 764 clear_bit(AS_ENOSPC, &mapping->flags); 765 return err; 766 } 767 EXPORT_SYMBOL(file_check_and_advance_wb_err); 768 769 /** 770 * file_write_and_wait_range - write out & wait on a file range 771 * @file: file pointing to address_space with pages 772 * @lstart: offset in bytes where the range starts 773 * @lend: offset in bytes where the range ends (inclusive) 774 * 775 * Write out and wait upon file offsets lstart->lend, inclusive. 776 * 777 * Note that @lend is inclusive (describes the last byte to be written) so 778 * that this function can be used to write to the very end-of-file (end = -1). 779 * 780 * After writing out and waiting on the data, we check and advance the 781 * f_wb_err cursor to the latest value, and return any errors detected there. 782 * 783 * Return: %0 on success, negative error code otherwise. 784 */ 785 int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend) 786 { 787 int err = 0, err2; 788 struct address_space *mapping = file->f_mapping; 789 790 if (lend < lstart) 791 return 0; 792 793 if (mapping_needs_writeback(mapping)) { 794 err = __filemap_fdatawrite_range(mapping, lstart, lend, 795 WB_SYNC_ALL); 796 /* See comment of filemap_write_and_wait() */ 797 if (err != -EIO) 798 __filemap_fdatawait_range(mapping, lstart, lend); 799 } 800 err2 = file_check_and_advance_wb_err(file); 801 if (!err) 802 err = err2; 803 return err; 804 } 805 EXPORT_SYMBOL(file_write_and_wait_range); 806 807 /** 808 * replace_page_cache_folio - replace a pagecache folio with a new one 809 * @old: folio to be replaced 810 * @new: folio to replace with 811 * 812 * This function replaces a folio in the pagecache with a new one. On 813 * success it acquires the pagecache reference for the new folio and 814 * drops it for the old folio. Both the old and new folios must be 815 * locked. This function does not add the new folio to the LRU, the 816 * caller must do that. 817 * 818 * The remove + add is atomic. This function cannot fail. 819 */ 820 void replace_page_cache_folio(struct folio *old, struct folio *new) 821 { 822 struct address_space *mapping = old->mapping; 823 void (*free_folio)(struct folio *) = mapping->a_ops->free_folio; 824 pgoff_t offset = old->index; 825 XA_STATE(xas, &mapping->i_pages, offset); 826 827 VM_BUG_ON_FOLIO(!folio_test_locked(old), old); 828 VM_BUG_ON_FOLIO(!folio_test_locked(new), new); 829 VM_BUG_ON_FOLIO(new->mapping, new); 830 831 folio_get(new); 832 new->mapping = mapping; 833 new->index = offset; 834 835 mem_cgroup_replace_folio(old, new); 836 837 xas_lock_irq(&xas); 838 xas_store(&xas, new); 839 840 old->mapping = NULL; 841 /* hugetlb pages do not participate in page cache accounting. */ 842 if (!folio_test_hugetlb(old)) 843 __lruvec_stat_sub_folio(old, NR_FILE_PAGES); 844 if (!folio_test_hugetlb(new)) 845 __lruvec_stat_add_folio(new, NR_FILE_PAGES); 846 if (folio_test_swapbacked(old)) 847 __lruvec_stat_sub_folio(old, NR_SHMEM); 848 if (folio_test_swapbacked(new)) 849 __lruvec_stat_add_folio(new, NR_SHMEM); 850 xas_unlock_irq(&xas); 851 if (free_folio) 852 free_folio(old); 853 folio_put(old); 854 } 855 EXPORT_SYMBOL_GPL(replace_page_cache_folio); 856 857 noinline int __filemap_add_folio(struct address_space *mapping, 858 struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp) 859 { 860 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); 861 bool huge; 862 long nr; 863 unsigned int forder = folio_order(folio); 864 865 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 866 VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio); 867 VM_BUG_ON_FOLIO(folio_order(folio) < mapping_min_folio_order(mapping), 868 folio); 869 mapping_set_update(&xas, mapping); 870 871 VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio); 872 huge = folio_test_hugetlb(folio); 873 nr = folio_nr_pages(folio); 874 875 gfp &= GFP_RECLAIM_MASK; 876 folio_ref_add(folio, nr); 877 folio->mapping = mapping; 878 folio->index = xas.xa_index; 879 880 for (;;) { 881 int order = -1; 882 void *entry, *old = NULL; 883 884 xas_lock_irq(&xas); 885 xas_for_each_conflict(&xas, entry) { 886 old = entry; 887 if (!xa_is_value(entry)) { 888 xas_set_err(&xas, -EEXIST); 889 goto unlock; 890 } 891 /* 892 * If a larger entry exists, 893 * it will be the first and only entry iterated. 894 */ 895 if (order == -1) 896 order = xas_get_order(&xas); 897 } 898 899 if (old) { 900 if (order > 0 && order > forder) { 901 unsigned int split_order = max(forder, 902 xas_try_split_min_order(order)); 903 904 /* How to handle large swap entries? */ 905 BUG_ON(shmem_mapping(mapping)); 906 907 while (order > forder) { 908 xas_set_order(&xas, index, split_order); 909 xas_try_split(&xas, old, order); 910 if (xas_error(&xas)) 911 goto unlock; 912 order = split_order; 913 split_order = 914 max(xas_try_split_min_order( 915 split_order), 916 forder); 917 } 918 xas_reset(&xas); 919 } 920 if (shadowp) 921 *shadowp = old; 922 } 923 924 xas_store(&xas, folio); 925 if (xas_error(&xas)) 926 goto unlock; 927 928 mapping->nrpages += nr; 929 930 /* hugetlb pages do not participate in page cache accounting */ 931 if (!huge) { 932 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr); 933 if (folio_test_pmd_mappable(folio)) 934 __lruvec_stat_mod_folio(folio, 935 NR_FILE_THPS, nr); 936 } 937 938 unlock: 939 xas_unlock_irq(&xas); 940 941 if (!xas_nomem(&xas, gfp)) 942 break; 943 } 944 945 if (xas_error(&xas)) 946 goto error; 947 948 trace_mm_filemap_add_to_page_cache(folio); 949 return 0; 950 error: 951 folio->mapping = NULL; 952 /* Leave folio->index set: truncation relies upon it */ 953 folio_put_refs(folio, nr); 954 return xas_error(&xas); 955 } 956 ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO); 957 958 int filemap_add_folio(struct address_space *mapping, struct folio *folio, 959 pgoff_t index, gfp_t gfp) 960 { 961 void *shadow = NULL; 962 int ret; 963 964 ret = mem_cgroup_charge(folio, NULL, gfp); 965 if (ret) 966 return ret; 967 968 __folio_set_locked(folio); 969 ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow); 970 if (unlikely(ret)) { 971 mem_cgroup_uncharge(folio); 972 __folio_clear_locked(folio); 973 } else { 974 /* 975 * The folio might have been evicted from cache only 976 * recently, in which case it should be activated like 977 * any other repeatedly accessed folio. 978 * The exception is folios getting rewritten; evicting other 979 * data from the working set, only to cache data that will 980 * get overwritten with something else, is a waste of memory. 981 */ 982 WARN_ON_ONCE(folio_test_active(folio)); 983 if (!(gfp & __GFP_WRITE) && shadow) 984 workingset_refault(folio, shadow); 985 folio_add_lru(folio); 986 } 987 return ret; 988 } 989 EXPORT_SYMBOL_GPL(filemap_add_folio); 990 991 #ifdef CONFIG_NUMA 992 struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) 993 { 994 int n; 995 struct folio *folio; 996 997 if (cpuset_do_page_mem_spread()) { 998 unsigned int cpuset_mems_cookie; 999 do { 1000 cpuset_mems_cookie = read_mems_allowed_begin(); 1001 n = cpuset_mem_spread_node(); 1002 folio = __folio_alloc_node_noprof(gfp, order, n); 1003 } while (!folio && read_mems_allowed_retry(cpuset_mems_cookie)); 1004 1005 return folio; 1006 } 1007 return folio_alloc_noprof(gfp, order); 1008 } 1009 EXPORT_SYMBOL(filemap_alloc_folio_noprof); 1010 #endif 1011 1012 /* 1013 * filemap_invalidate_lock_two - lock invalidate_lock for two mappings 1014 * 1015 * Lock exclusively invalidate_lock of any passed mapping that is not NULL. 1016 * 1017 * @mapping1: the first mapping to lock 1018 * @mapping2: the second mapping to lock 1019 */ 1020 void filemap_invalidate_lock_two(struct address_space *mapping1, 1021 struct address_space *mapping2) 1022 { 1023 if (mapping1 > mapping2) 1024 swap(mapping1, mapping2); 1025 if (mapping1) 1026 down_write(&mapping1->invalidate_lock); 1027 if (mapping2 && mapping1 != mapping2) 1028 down_write_nested(&mapping2->invalidate_lock, 1); 1029 } 1030 EXPORT_SYMBOL(filemap_invalidate_lock_two); 1031 1032 /* 1033 * filemap_invalidate_unlock_two - unlock invalidate_lock for two mappings 1034 * 1035 * Unlock exclusive invalidate_lock of any passed mapping that is not NULL. 1036 * 1037 * @mapping1: the first mapping to unlock 1038 * @mapping2: the second mapping to unlock 1039 */ 1040 void filemap_invalidate_unlock_two(struct address_space *mapping1, 1041 struct address_space *mapping2) 1042 { 1043 if (mapping1) 1044 up_write(&mapping1->invalidate_lock); 1045 if (mapping2 && mapping1 != mapping2) 1046 up_write(&mapping2->invalidate_lock); 1047 } 1048 EXPORT_SYMBOL(filemap_invalidate_unlock_two); 1049 1050 /* 1051 * In order to wait for pages to become available there must be 1052 * waitqueues associated with pages. By using a hash table of 1053 * waitqueues where the bucket discipline is to maintain all 1054 * waiters on the same queue and wake all when any of the pages 1055 * become available, and for the woken contexts to check to be 1056 * sure the appropriate page became available, this saves space 1057 * at a cost of "thundering herd" phenomena during rare hash 1058 * collisions. 1059 */ 1060 #define PAGE_WAIT_TABLE_BITS 8 1061 #define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS) 1062 static wait_queue_head_t folio_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned; 1063 1064 static wait_queue_head_t *folio_waitqueue(struct folio *folio) 1065 { 1066 return &folio_wait_table[hash_ptr(folio, PAGE_WAIT_TABLE_BITS)]; 1067 } 1068 1069 /* How many times do we accept lock stealing from under a waiter? */ 1070 static int sysctl_page_lock_unfairness = 5; 1071 static const struct ctl_table filemap_sysctl_table[] = { 1072 { 1073 .procname = "page_lock_unfairness", 1074 .data = &sysctl_page_lock_unfairness, 1075 .maxlen = sizeof(sysctl_page_lock_unfairness), 1076 .mode = 0644, 1077 .proc_handler = proc_dointvec_minmax, 1078 .extra1 = SYSCTL_ZERO, 1079 } 1080 }; 1081 1082 void __init pagecache_init(void) 1083 { 1084 int i; 1085 1086 for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++) 1087 init_waitqueue_head(&folio_wait_table[i]); 1088 1089 page_writeback_init(); 1090 register_sysctl_init("vm", filemap_sysctl_table); 1091 } 1092 1093 /* 1094 * The page wait code treats the "wait->flags" somewhat unusually, because 1095 * we have multiple different kinds of waits, not just the usual "exclusive" 1096 * one. 1097 * 1098 * We have: 1099 * 1100 * (a) no special bits set: 1101 * 1102 * We're just waiting for the bit to be released, and when a waker 1103 * calls the wakeup function, we set WQ_FLAG_WOKEN and wake it up, 1104 * and remove it from the wait queue. 1105 * 1106 * Simple and straightforward. 1107 * 1108 * (b) WQ_FLAG_EXCLUSIVE: 1109 * 1110 * The waiter is waiting to get the lock, and only one waiter should 1111 * be woken up to avoid any thundering herd behavior. We'll set the 1112 * WQ_FLAG_WOKEN bit, wake it up, and remove it from the wait queue. 1113 * 1114 * This is the traditional exclusive wait. 1115 * 1116 * (c) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM: 1117 * 1118 * The waiter is waiting to get the bit, and additionally wants the 1119 * lock to be transferred to it for fair lock behavior. If the lock 1120 * cannot be taken, we stop walking the wait queue without waking 1121 * the waiter. 1122 * 1123 * This is the "fair lock handoff" case, and in addition to setting 1124 * WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to let the waiter easily see 1125 * that it now has the lock. 1126 */ 1127 static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg) 1128 { 1129 unsigned int flags; 1130 struct wait_page_key *key = arg; 1131 struct wait_page_queue *wait_page 1132 = container_of(wait, struct wait_page_queue, wait); 1133 1134 if (!wake_page_match(wait_page, key)) 1135 return 0; 1136 1137 /* 1138 * If it's a lock handoff wait, we get the bit for it, and 1139 * stop walking (and do not wake it up) if we can't. 1140 */ 1141 flags = wait->flags; 1142 if (flags & WQ_FLAG_EXCLUSIVE) { 1143 if (test_bit(key->bit_nr, &key->folio->flags.f)) 1144 return -1; 1145 if (flags & WQ_FLAG_CUSTOM) { 1146 if (test_and_set_bit(key->bit_nr, &key->folio->flags.f)) 1147 return -1; 1148 flags |= WQ_FLAG_DONE; 1149 } 1150 } 1151 1152 /* 1153 * We are holding the wait-queue lock, but the waiter that 1154 * is waiting for this will be checking the flags without 1155 * any locking. 1156 * 1157 * So update the flags atomically, and wake up the waiter 1158 * afterwards to avoid any races. This store-release pairs 1159 * with the load-acquire in folio_wait_bit_common(). 1160 */ 1161 smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN); 1162 wake_up_state(wait->private, mode); 1163 1164 /* 1165 * Ok, we have successfully done what we're waiting for, 1166 * and we can unconditionally remove the wait entry. 1167 * 1168 * Note that this pairs with the "finish_wait()" in the 1169 * waiter, and has to be the absolute last thing we do. 1170 * After this list_del_init(&wait->entry) the wait entry 1171 * might be de-allocated and the process might even have 1172 * exited. 1173 */ 1174 list_del_init_careful(&wait->entry); 1175 return (flags & WQ_FLAG_EXCLUSIVE) != 0; 1176 } 1177 1178 static void folio_wake_bit(struct folio *folio, int bit_nr) 1179 { 1180 wait_queue_head_t *q = folio_waitqueue(folio); 1181 struct wait_page_key key; 1182 unsigned long flags; 1183 1184 key.folio = folio; 1185 key.bit_nr = bit_nr; 1186 key.page_match = 0; 1187 1188 spin_lock_irqsave(&q->lock, flags); 1189 __wake_up_locked_key(q, TASK_NORMAL, &key); 1190 1191 /* 1192 * It's possible to miss clearing waiters here, when we woke our page 1193 * waiters, but the hashed waitqueue has waiters for other pages on it. 1194 * That's okay, it's a rare case. The next waker will clear it. 1195 * 1196 * Note that, depending on the page pool (buddy, hugetlb, ZONE_DEVICE, 1197 * other), the flag may be cleared in the course of freeing the page; 1198 * but that is not required for correctness. 1199 */ 1200 if (!waitqueue_active(q) || !key.page_match) 1201 folio_clear_waiters(folio); 1202 1203 spin_unlock_irqrestore(&q->lock, flags); 1204 } 1205 1206 /* 1207 * A choice of three behaviors for folio_wait_bit_common(): 1208 */ 1209 enum behavior { 1210 EXCLUSIVE, /* Hold ref to page and take the bit when woken, like 1211 * __folio_lock() waiting on then setting PG_locked. 1212 */ 1213 SHARED, /* Hold ref to page and check the bit when woken, like 1214 * folio_wait_writeback() waiting on PG_writeback. 1215 */ 1216 DROP, /* Drop ref to page before wait, no check when woken, 1217 * like folio_put_wait_locked() on PG_locked. 1218 */ 1219 }; 1220 1221 /* 1222 * Attempt to check (or get) the folio flag, and mark us done 1223 * if successful. 1224 */ 1225 static inline bool folio_trylock_flag(struct folio *folio, int bit_nr, 1226 struct wait_queue_entry *wait) 1227 { 1228 if (wait->flags & WQ_FLAG_EXCLUSIVE) { 1229 if (test_and_set_bit(bit_nr, &folio->flags.f)) 1230 return false; 1231 } else if (test_bit(bit_nr, &folio->flags.f)) 1232 return false; 1233 1234 wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE; 1235 return true; 1236 } 1237 1238 static inline int folio_wait_bit_common(struct folio *folio, int bit_nr, 1239 int state, enum behavior behavior) 1240 { 1241 wait_queue_head_t *q = folio_waitqueue(folio); 1242 int unfairness = sysctl_page_lock_unfairness; 1243 struct wait_page_queue wait_page; 1244 wait_queue_entry_t *wait = &wait_page.wait; 1245 bool thrashing = false; 1246 unsigned long pflags; 1247 bool in_thrashing; 1248 1249 if (bit_nr == PG_locked && 1250 !folio_test_uptodate(folio) && folio_test_workingset(folio)) { 1251 delayacct_thrashing_start(&in_thrashing); 1252 psi_memstall_enter(&pflags); 1253 thrashing = true; 1254 } 1255 1256 init_wait(wait); 1257 wait->func = wake_page_function; 1258 wait_page.folio = folio; 1259 wait_page.bit_nr = bit_nr; 1260 1261 repeat: 1262 wait->flags = 0; 1263 if (behavior == EXCLUSIVE) { 1264 wait->flags = WQ_FLAG_EXCLUSIVE; 1265 if (--unfairness < 0) 1266 wait->flags |= WQ_FLAG_CUSTOM; 1267 } 1268 1269 /* 1270 * Do one last check whether we can get the 1271 * page bit synchronously. 1272 * 1273 * Do the folio_set_waiters() marking before that 1274 * to let any waker we _just_ missed know they 1275 * need to wake us up (otherwise they'll never 1276 * even go to the slow case that looks at the 1277 * page queue), and add ourselves to the wait 1278 * queue if we need to sleep. 1279 * 1280 * This part needs to be done under the queue 1281 * lock to avoid races. 1282 */ 1283 spin_lock_irq(&q->lock); 1284 folio_set_waiters(folio); 1285 if (!folio_trylock_flag(folio, bit_nr, wait)) 1286 __add_wait_queue_entry_tail(q, wait); 1287 spin_unlock_irq(&q->lock); 1288 1289 /* 1290 * From now on, all the logic will be based on 1291 * the WQ_FLAG_WOKEN and WQ_FLAG_DONE flag, to 1292 * see whether the page bit testing has already 1293 * been done by the wake function. 1294 * 1295 * We can drop our reference to the folio. 1296 */ 1297 if (behavior == DROP) 1298 folio_put(folio); 1299 1300 /* 1301 * Note that until the "finish_wait()", or until 1302 * we see the WQ_FLAG_WOKEN flag, we need to 1303 * be very careful with the 'wait->flags', because 1304 * we may race with a waker that sets them. 1305 */ 1306 for (;;) { 1307 unsigned int flags; 1308 1309 set_current_state(state); 1310 1311 /* Loop until we've been woken or interrupted */ 1312 flags = smp_load_acquire(&wait->flags); 1313 if (!(flags & WQ_FLAG_WOKEN)) { 1314 if (signal_pending_state(state, current)) 1315 break; 1316 1317 io_schedule(); 1318 continue; 1319 } 1320 1321 /* If we were non-exclusive, we're done */ 1322 if (behavior != EXCLUSIVE) 1323 break; 1324 1325 /* If the waker got the lock for us, we're done */ 1326 if (flags & WQ_FLAG_DONE) 1327 break; 1328 1329 /* 1330 * Otherwise, if we're getting the lock, we need to 1331 * try to get it ourselves. 1332 * 1333 * And if that fails, we'll have to retry this all. 1334 */ 1335 if (unlikely(test_and_set_bit(bit_nr, folio_flags(folio, 0)))) 1336 goto repeat; 1337 1338 wait->flags |= WQ_FLAG_DONE; 1339 break; 1340 } 1341 1342 /* 1343 * If a signal happened, this 'finish_wait()' may remove the last 1344 * waiter from the wait-queues, but the folio waiters bit will remain 1345 * set. That's ok. The next wakeup will take care of it, and trying 1346 * to do it here would be difficult and prone to races. 1347 */ 1348 finish_wait(q, wait); 1349 1350 if (thrashing) { 1351 delayacct_thrashing_end(&in_thrashing); 1352 psi_memstall_leave(&pflags); 1353 } 1354 1355 /* 1356 * NOTE! The wait->flags weren't stable until we've done the 1357 * 'finish_wait()', and we could have exited the loop above due 1358 * to a signal, and had a wakeup event happen after the signal 1359 * test but before the 'finish_wait()'. 1360 * 1361 * So only after the finish_wait() can we reliably determine 1362 * if we got woken up or not, so we can now figure out the final 1363 * return value based on that state without races. 1364 * 1365 * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive 1366 * waiter, but an exclusive one requires WQ_FLAG_DONE. 1367 */ 1368 if (behavior == EXCLUSIVE) 1369 return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR; 1370 1371 return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR; 1372 } 1373 1374 #ifdef CONFIG_MIGRATION 1375 /** 1376 * migration_entry_wait_on_locked - Wait for a migration entry to be removed 1377 * @entry: migration swap entry. 1378 * @ptl: already locked ptl. This function will drop the lock. 1379 * 1380 * Wait for a migration entry referencing the given page to be removed. This is 1381 * equivalent to folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE) except 1382 * this can be called without taking a reference on the page. Instead this 1383 * should be called while holding the ptl for the migration entry referencing 1384 * the page. 1385 * 1386 * Returns after unlocking the ptl. 1387 * 1388 * This follows the same logic as folio_wait_bit_common() so see the comments 1389 * there. 1390 */ 1391 void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl) 1392 __releases(ptl) 1393 { 1394 struct wait_page_queue wait_page; 1395 wait_queue_entry_t *wait = &wait_page.wait; 1396 bool thrashing = false; 1397 unsigned long pflags; 1398 bool in_thrashing; 1399 wait_queue_head_t *q; 1400 struct folio *folio = pfn_swap_entry_folio(entry); 1401 1402 q = folio_waitqueue(folio); 1403 if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) { 1404 delayacct_thrashing_start(&in_thrashing); 1405 psi_memstall_enter(&pflags); 1406 thrashing = true; 1407 } 1408 1409 init_wait(wait); 1410 wait->func = wake_page_function; 1411 wait_page.folio = folio; 1412 wait_page.bit_nr = PG_locked; 1413 wait->flags = 0; 1414 1415 spin_lock_irq(&q->lock); 1416 folio_set_waiters(folio); 1417 if (!folio_trylock_flag(folio, PG_locked, wait)) 1418 __add_wait_queue_entry_tail(q, wait); 1419 spin_unlock_irq(&q->lock); 1420 1421 /* 1422 * If a migration entry exists for the page the migration path must hold 1423 * a valid reference to the page, and it must take the ptl to remove the 1424 * migration entry. So the page is valid until the ptl is dropped. 1425 */ 1426 spin_unlock(ptl); 1427 1428 for (;;) { 1429 unsigned int flags; 1430 1431 set_current_state(TASK_UNINTERRUPTIBLE); 1432 1433 /* Loop until we've been woken or interrupted */ 1434 flags = smp_load_acquire(&wait->flags); 1435 if (!(flags & WQ_FLAG_WOKEN)) { 1436 if (signal_pending_state(TASK_UNINTERRUPTIBLE, current)) 1437 break; 1438 1439 io_schedule(); 1440 continue; 1441 } 1442 break; 1443 } 1444 1445 finish_wait(q, wait); 1446 1447 if (thrashing) { 1448 delayacct_thrashing_end(&in_thrashing); 1449 psi_memstall_leave(&pflags); 1450 } 1451 } 1452 #endif 1453 1454 void folio_wait_bit(struct folio *folio, int bit_nr) 1455 { 1456 folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED); 1457 } 1458 EXPORT_SYMBOL(folio_wait_bit); 1459 1460 int folio_wait_bit_killable(struct folio *folio, int bit_nr) 1461 { 1462 return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED); 1463 } 1464 EXPORT_SYMBOL(folio_wait_bit_killable); 1465 1466 /** 1467 * folio_put_wait_locked - Drop a reference and wait for it to be unlocked 1468 * @folio: The folio to wait for. 1469 * @state: The sleep state (TASK_KILLABLE, TASK_UNINTERRUPTIBLE, etc). 1470 * 1471 * The caller should hold a reference on @folio. They expect the page to 1472 * become unlocked relatively soon, but do not wish to hold up migration 1473 * (for example) by holding the reference while waiting for the folio to 1474 * come unlocked. After this function returns, the caller should not 1475 * dereference @folio. 1476 * 1477 * Return: 0 if the folio was unlocked or -EINTR if interrupted by a signal. 1478 */ 1479 static int folio_put_wait_locked(struct folio *folio, int state) 1480 { 1481 return folio_wait_bit_common(folio, PG_locked, state, DROP); 1482 } 1483 1484 /** 1485 * folio_unlock - Unlock a locked folio. 1486 * @folio: The folio. 1487 * 1488 * Unlocks the folio and wakes up any thread sleeping on the page lock. 1489 * 1490 * Context: May be called from interrupt or process context. May not be 1491 * called from NMI context. 1492 */ 1493 void folio_unlock(struct folio *folio) 1494 { 1495 /* Bit 7 allows x86 to check the byte's sign bit */ 1496 BUILD_BUG_ON(PG_waiters != 7); 1497 BUILD_BUG_ON(PG_locked > 7); 1498 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 1499 if (folio_xor_flags_has_waiters(folio, 1 << PG_locked)) 1500 folio_wake_bit(folio, PG_locked); 1501 } 1502 EXPORT_SYMBOL(folio_unlock); 1503 1504 /** 1505 * folio_end_read - End read on a folio. 1506 * @folio: The folio. 1507 * @success: True if all reads completed successfully. 1508 * 1509 * When all reads against a folio have completed, filesystems should 1510 * call this function to let the pagecache know that no more reads 1511 * are outstanding. This will unlock the folio and wake up any thread 1512 * sleeping on the lock. The folio will also be marked uptodate if all 1513 * reads succeeded. 1514 * 1515 * Context: May be called from interrupt or process context. May not be 1516 * called from NMI context. 1517 */ 1518 void folio_end_read(struct folio *folio, bool success) 1519 { 1520 unsigned long mask = 1 << PG_locked; 1521 1522 /* Must be in bottom byte for x86 to work */ 1523 BUILD_BUG_ON(PG_uptodate > 7); 1524 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 1525 VM_BUG_ON_FOLIO(success && folio_test_uptodate(folio), folio); 1526 1527 if (likely(success)) 1528 mask |= 1 << PG_uptodate; 1529 if (folio_xor_flags_has_waiters(folio, mask)) 1530 folio_wake_bit(folio, PG_locked); 1531 } 1532 EXPORT_SYMBOL(folio_end_read); 1533 1534 /** 1535 * folio_end_private_2 - Clear PG_private_2 and wake any waiters. 1536 * @folio: The folio. 1537 * 1538 * Clear the PG_private_2 bit on a folio and wake up any sleepers waiting for 1539 * it. The folio reference held for PG_private_2 being set is released. 1540 * 1541 * This is, for example, used when a netfs folio is being written to a local 1542 * disk cache, thereby allowing writes to the cache for the same folio to be 1543 * serialised. 1544 */ 1545 void folio_end_private_2(struct folio *folio) 1546 { 1547 VM_BUG_ON_FOLIO(!folio_test_private_2(folio), folio); 1548 clear_bit_unlock(PG_private_2, folio_flags(folio, 0)); 1549 folio_wake_bit(folio, PG_private_2); 1550 folio_put(folio); 1551 } 1552 EXPORT_SYMBOL(folio_end_private_2); 1553 1554 /** 1555 * folio_wait_private_2 - Wait for PG_private_2 to be cleared on a folio. 1556 * @folio: The folio to wait on. 1557 * 1558 * Wait for PG_private_2 to be cleared on a folio. 1559 */ 1560 void folio_wait_private_2(struct folio *folio) 1561 { 1562 while (folio_test_private_2(folio)) 1563 folio_wait_bit(folio, PG_private_2); 1564 } 1565 EXPORT_SYMBOL(folio_wait_private_2); 1566 1567 /** 1568 * folio_wait_private_2_killable - Wait for PG_private_2 to be cleared on a folio. 1569 * @folio: The folio to wait on. 1570 * 1571 * Wait for PG_private_2 to be cleared on a folio or until a fatal signal is 1572 * received by the calling task. 1573 * 1574 * Return: 1575 * - 0 if successful. 1576 * - -EINTR if a fatal signal was encountered. 1577 */ 1578 int folio_wait_private_2_killable(struct folio *folio) 1579 { 1580 int ret = 0; 1581 1582 while (folio_test_private_2(folio)) { 1583 ret = folio_wait_bit_killable(folio, PG_private_2); 1584 if (ret < 0) 1585 break; 1586 } 1587 1588 return ret; 1589 } 1590 EXPORT_SYMBOL(folio_wait_private_2_killable); 1591 1592 static void filemap_end_dropbehind(struct folio *folio) 1593 { 1594 struct address_space *mapping = folio->mapping; 1595 1596 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 1597 1598 if (folio_test_writeback(folio) || folio_test_dirty(folio)) 1599 return; 1600 if (!folio_test_clear_dropbehind(folio)) 1601 return; 1602 if (mapping) 1603 folio_unmap_invalidate(mapping, folio, 0); 1604 } 1605 1606 /* 1607 * If folio was marked as dropbehind, then pages should be dropped when writeback 1608 * completes. Do that now. If we fail, it's likely because of a big folio - 1609 * just reset dropbehind for that case and latter completions should invalidate. 1610 */ 1611 static void filemap_end_dropbehind_write(struct folio *folio) 1612 { 1613 if (!folio_test_dropbehind(folio)) 1614 return; 1615 1616 /* 1617 * Hitting !in_task() should not happen off RWF_DONTCACHE writeback, 1618 * but can happen if normal writeback just happens to find dirty folios 1619 * that were created as part of uncached writeback, and that writeback 1620 * would otherwise not need non-IRQ handling. Just skip the 1621 * invalidation in that case. 1622 */ 1623 if (in_task() && folio_trylock(folio)) { 1624 filemap_end_dropbehind(folio); 1625 folio_unlock(folio); 1626 } 1627 } 1628 1629 /** 1630 * folio_end_writeback - End writeback against a folio. 1631 * @folio: The folio. 1632 * 1633 * The folio must actually be under writeback. 1634 * 1635 * Context: May be called from process or interrupt context. 1636 */ 1637 void folio_end_writeback(struct folio *folio) 1638 { 1639 VM_BUG_ON_FOLIO(!folio_test_writeback(folio), folio); 1640 1641 /* 1642 * folio_test_clear_reclaim() could be used here but it is an 1643 * atomic operation and overkill in this particular case. Failing 1644 * to shuffle a folio marked for immediate reclaim is too mild 1645 * a gain to justify taking an atomic operation penalty at the 1646 * end of every folio writeback. 1647 */ 1648 if (folio_test_reclaim(folio)) { 1649 folio_clear_reclaim(folio); 1650 folio_rotate_reclaimable(folio); 1651 } 1652 1653 /* 1654 * Writeback does not hold a folio reference of its own, relying 1655 * on truncation to wait for the clearing of PG_writeback. 1656 * But here we must make sure that the folio is not freed and 1657 * reused before the folio_wake_bit(). 1658 */ 1659 folio_get(folio); 1660 if (__folio_end_writeback(folio)) 1661 folio_wake_bit(folio, PG_writeback); 1662 1663 filemap_end_dropbehind_write(folio); 1664 acct_reclaim_writeback(folio); 1665 folio_put(folio); 1666 } 1667 EXPORT_SYMBOL(folio_end_writeback); 1668 1669 /** 1670 * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it. 1671 * @folio: The folio to lock 1672 */ 1673 void __folio_lock(struct folio *folio) 1674 { 1675 folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE, 1676 EXCLUSIVE); 1677 } 1678 EXPORT_SYMBOL(__folio_lock); 1679 1680 int __folio_lock_killable(struct folio *folio) 1681 { 1682 return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE, 1683 EXCLUSIVE); 1684 } 1685 EXPORT_SYMBOL_GPL(__folio_lock_killable); 1686 1687 static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait) 1688 { 1689 struct wait_queue_head *q = folio_waitqueue(folio); 1690 int ret; 1691 1692 wait->folio = folio; 1693 wait->bit_nr = PG_locked; 1694 1695 spin_lock_irq(&q->lock); 1696 __add_wait_queue_entry_tail(q, &wait->wait); 1697 folio_set_waiters(folio); 1698 ret = !folio_trylock(folio); 1699 /* 1700 * If we were successful now, we know we're still on the 1701 * waitqueue as we're still under the lock. This means it's 1702 * safe to remove and return success, we know the callback 1703 * isn't going to trigger. 1704 */ 1705 if (!ret) 1706 __remove_wait_queue(q, &wait->wait); 1707 else 1708 ret = -EIOCBQUEUED; 1709 spin_unlock_irq(&q->lock); 1710 return ret; 1711 } 1712 1713 /* 1714 * Return values: 1715 * 0 - folio is locked. 1716 * non-zero - folio is not locked. 1717 * mmap_lock or per-VMA lock has been released (mmap_read_unlock() or 1718 * vma_end_read()), unless flags had both FAULT_FLAG_ALLOW_RETRY and 1719 * FAULT_FLAG_RETRY_NOWAIT set, in which case the lock is still held. 1720 * 1721 * If neither ALLOW_RETRY nor KILLABLE are set, will always return 0 1722 * with the folio locked and the mmap_lock/per-VMA lock is left unperturbed. 1723 */ 1724 vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf) 1725 { 1726 unsigned int flags = vmf->flags; 1727 1728 if (fault_flag_allow_retry_first(flags)) { 1729 /* 1730 * CAUTION! In this case, mmap_lock/per-VMA lock is not 1731 * released even though returning VM_FAULT_RETRY. 1732 */ 1733 if (flags & FAULT_FLAG_RETRY_NOWAIT) 1734 return VM_FAULT_RETRY; 1735 1736 release_fault_lock(vmf); 1737 if (flags & FAULT_FLAG_KILLABLE) 1738 folio_wait_locked_killable(folio); 1739 else 1740 folio_wait_locked(folio); 1741 return VM_FAULT_RETRY; 1742 } 1743 if (flags & FAULT_FLAG_KILLABLE) { 1744 bool ret; 1745 1746 ret = __folio_lock_killable(folio); 1747 if (ret) { 1748 release_fault_lock(vmf); 1749 return VM_FAULT_RETRY; 1750 } 1751 } else { 1752 __folio_lock(folio); 1753 } 1754 1755 return 0; 1756 } 1757 1758 /** 1759 * page_cache_next_miss() - Find the next gap in the page cache. 1760 * @mapping: Mapping. 1761 * @index: Index. 1762 * @max_scan: Maximum range to search. 1763 * 1764 * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the 1765 * gap with the lowest index. 1766 * 1767 * This function may be called under the rcu_read_lock. However, this will 1768 * not atomically search a snapshot of the cache at a single point in time. 1769 * For example, if a gap is created at index 5, then subsequently a gap is 1770 * created at index 10, page_cache_next_miss covering both indices may 1771 * return 10 if called under the rcu_read_lock. 1772 * 1773 * Return: The index of the gap if found, otherwise an index outside the 1774 * range specified (in which case 'return - index >= max_scan' will be true). 1775 * In the rare case of index wrap-around, 0 will be returned. 1776 */ 1777 pgoff_t page_cache_next_miss(struct address_space *mapping, 1778 pgoff_t index, unsigned long max_scan) 1779 { 1780 XA_STATE(xas, &mapping->i_pages, index); 1781 unsigned long nr = max_scan; 1782 1783 while (nr--) { 1784 void *entry = xas_next(&xas); 1785 if (!entry || xa_is_value(entry)) 1786 return xas.xa_index; 1787 if (xas.xa_index == 0) 1788 return 0; 1789 } 1790 1791 return index + max_scan; 1792 } 1793 EXPORT_SYMBOL(page_cache_next_miss); 1794 1795 /** 1796 * page_cache_prev_miss() - Find the previous gap in the page cache. 1797 * @mapping: Mapping. 1798 * @index: Index. 1799 * @max_scan: Maximum range to search. 1800 * 1801 * Search the range [max(index - max_scan + 1, 0), index] for the 1802 * gap with the highest index. 1803 * 1804 * This function may be called under the rcu_read_lock. However, this will 1805 * not atomically search a snapshot of the cache at a single point in time. 1806 * For example, if a gap is created at index 10, then subsequently a gap is 1807 * created at index 5, page_cache_prev_miss() covering both indices may 1808 * return 5 if called under the rcu_read_lock. 1809 * 1810 * Return: The index of the gap if found, otherwise an index outside the 1811 * range specified (in which case 'index - return >= max_scan' will be true). 1812 * In the rare case of wrap-around, ULONG_MAX will be returned. 1813 */ 1814 pgoff_t page_cache_prev_miss(struct address_space *mapping, 1815 pgoff_t index, unsigned long max_scan) 1816 { 1817 XA_STATE(xas, &mapping->i_pages, index); 1818 1819 while (max_scan--) { 1820 void *entry = xas_prev(&xas); 1821 if (!entry || xa_is_value(entry)) 1822 break; 1823 if (xas.xa_index == ULONG_MAX) 1824 break; 1825 } 1826 1827 return xas.xa_index; 1828 } 1829 EXPORT_SYMBOL(page_cache_prev_miss); 1830 1831 /* 1832 * Lockless page cache protocol: 1833 * On the lookup side: 1834 * 1. Load the folio from i_pages 1835 * 2. Increment the refcount if it's not zero 1836 * 3. If the folio is not found by xas_reload(), put the refcount and retry 1837 * 1838 * On the removal side: 1839 * A. Freeze the page (by zeroing the refcount if nobody else has a reference) 1840 * B. Remove the page from i_pages 1841 * C. Return the page to the page allocator 1842 * 1843 * This means that any page may have its reference count temporarily 1844 * increased by a speculative page cache (or GUP-fast) lookup as it can 1845 * be allocated by another user before the RCU grace period expires. 1846 * Because the refcount temporarily acquired here may end up being the 1847 * last refcount on the page, any page allocation must be freeable by 1848 * folio_put(). 1849 */ 1850 1851 /* 1852 * filemap_get_entry - Get a page cache entry. 1853 * @mapping: the address_space to search 1854 * @index: The page cache index. 1855 * 1856 * Looks up the page cache entry at @mapping & @index. If it is a folio, 1857 * it is returned with an increased refcount. If it is a shadow entry 1858 * of a previously evicted folio, or a swap entry from shmem/tmpfs, 1859 * it is returned without further action. 1860 * 1861 * Return: The folio, swap or shadow entry, %NULL if nothing is found. 1862 */ 1863 void *filemap_get_entry(struct address_space *mapping, pgoff_t index) 1864 { 1865 XA_STATE(xas, &mapping->i_pages, index); 1866 struct folio *folio; 1867 1868 rcu_read_lock(); 1869 repeat: 1870 xas_reset(&xas); 1871 folio = xas_load(&xas); 1872 if (xas_retry(&xas, folio)) 1873 goto repeat; 1874 /* 1875 * A shadow entry of a recently evicted page, or a swap entry from 1876 * shmem/tmpfs. Return it without attempting to raise page count. 1877 */ 1878 if (!folio || xa_is_value(folio)) 1879 goto out; 1880 1881 if (!folio_try_get(folio)) 1882 goto repeat; 1883 1884 if (unlikely(folio != xas_reload(&xas))) { 1885 folio_put(folio); 1886 goto repeat; 1887 } 1888 out: 1889 rcu_read_unlock(); 1890 1891 return folio; 1892 } 1893 1894 /** 1895 * __filemap_get_folio - Find and get a reference to a folio. 1896 * @mapping: The address_space to search. 1897 * @index: The page index. 1898 * @fgp_flags: %FGP flags modify how the folio is returned. 1899 * @gfp: Memory allocation flags to use if %FGP_CREAT is specified. 1900 * 1901 * Looks up the page cache entry at @mapping & @index. 1902 * 1903 * If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even 1904 * if the %GFP flags specified for %FGP_CREAT are atomic. 1905 * 1906 * If this function returns a folio, it is returned with an increased refcount. 1907 * 1908 * Return: The found folio or an ERR_PTR() otherwise. 1909 */ 1910 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, 1911 fgf_t fgp_flags, gfp_t gfp) 1912 { 1913 struct folio *folio; 1914 1915 repeat: 1916 folio = filemap_get_entry(mapping, index); 1917 if (xa_is_value(folio)) 1918 folio = NULL; 1919 if (!folio) 1920 goto no_page; 1921 1922 if (fgp_flags & FGP_LOCK) { 1923 if (fgp_flags & FGP_NOWAIT) { 1924 if (!folio_trylock(folio)) { 1925 folio_put(folio); 1926 return ERR_PTR(-EAGAIN); 1927 } 1928 } else { 1929 folio_lock(folio); 1930 } 1931 1932 /* Has the page been truncated? */ 1933 if (unlikely(folio->mapping != mapping)) { 1934 folio_unlock(folio); 1935 folio_put(folio); 1936 goto repeat; 1937 } 1938 VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio); 1939 } 1940 1941 if (fgp_flags & FGP_ACCESSED) 1942 folio_mark_accessed(folio); 1943 else if (fgp_flags & FGP_WRITE) { 1944 /* Clear idle flag for buffer write */ 1945 if (folio_test_idle(folio)) 1946 folio_clear_idle(folio); 1947 } 1948 1949 if (fgp_flags & FGP_STABLE) 1950 folio_wait_stable(folio); 1951 no_page: 1952 if (!folio && (fgp_flags & FGP_CREAT)) { 1953 unsigned int min_order = mapping_min_folio_order(mapping); 1954 unsigned int order = max(min_order, FGF_GET_ORDER(fgp_flags)); 1955 int err; 1956 index = mapping_align_index(mapping, index); 1957 1958 if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping)) 1959 gfp |= __GFP_WRITE; 1960 if (fgp_flags & FGP_NOFS) 1961 gfp &= ~__GFP_FS; 1962 if (fgp_flags & FGP_NOWAIT) { 1963 gfp &= ~GFP_KERNEL; 1964 gfp |= GFP_NOWAIT; 1965 } 1966 if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP)))) 1967 fgp_flags |= FGP_LOCK; 1968 1969 if (order > mapping_max_folio_order(mapping)) 1970 order = mapping_max_folio_order(mapping); 1971 /* If we're not aligned, allocate a smaller folio */ 1972 if (index & ((1UL << order) - 1)) 1973 order = __ffs(index); 1974 1975 do { 1976 gfp_t alloc_gfp = gfp; 1977 1978 err = -ENOMEM; 1979 if (order > min_order) 1980 alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN; 1981 folio = filemap_alloc_folio(alloc_gfp, order); 1982 if (!folio) 1983 continue; 1984 1985 /* Init accessed so avoid atomic mark_page_accessed later */ 1986 if (fgp_flags & FGP_ACCESSED) 1987 __folio_set_referenced(folio); 1988 if (fgp_flags & FGP_DONTCACHE) 1989 __folio_set_dropbehind(folio); 1990 1991 err = filemap_add_folio(mapping, folio, index, gfp); 1992 if (!err) 1993 break; 1994 folio_put(folio); 1995 folio = NULL; 1996 } while (order-- > min_order); 1997 1998 if (err == -EEXIST) 1999 goto repeat; 2000 if (err) { 2001 /* 2002 * When NOWAIT I/O fails to allocate folios this could 2003 * be due to a nonblocking memory allocation and not 2004 * because the system actually is out of memory. 2005 * Return -EAGAIN so that there caller retries in a 2006 * blocking fashion instead of propagating -ENOMEM 2007 * to the application. 2008 */ 2009 if ((fgp_flags & FGP_NOWAIT) && err == -ENOMEM) 2010 err = -EAGAIN; 2011 return ERR_PTR(err); 2012 } 2013 /* 2014 * filemap_add_folio locks the page, and for mmap 2015 * we expect an unlocked page. 2016 */ 2017 if (folio && (fgp_flags & FGP_FOR_MMAP)) 2018 folio_unlock(folio); 2019 } 2020 2021 if (!folio) 2022 return ERR_PTR(-ENOENT); 2023 /* not an uncached lookup, clear uncached if set */ 2024 if (folio_test_dropbehind(folio) && !(fgp_flags & FGP_DONTCACHE)) 2025 folio_clear_dropbehind(folio); 2026 return folio; 2027 } 2028 EXPORT_SYMBOL(__filemap_get_folio); 2029 2030 static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max, 2031 xa_mark_t mark) 2032 { 2033 struct folio *folio; 2034 2035 retry: 2036 if (mark == XA_PRESENT) 2037 folio = xas_find(xas, max); 2038 else 2039 folio = xas_find_marked(xas, max, mark); 2040 2041 if (xas_retry(xas, folio)) 2042 goto retry; 2043 /* 2044 * A shadow entry of a recently evicted page, a swap 2045 * entry from shmem/tmpfs or a DAX entry. Return it 2046 * without attempting to raise page count. 2047 */ 2048 if (!folio || xa_is_value(folio)) 2049 return folio; 2050 2051 if (!folio_try_get(folio)) 2052 goto reset; 2053 2054 if (unlikely(folio != xas_reload(xas))) { 2055 folio_put(folio); 2056 goto reset; 2057 } 2058 2059 return folio; 2060 reset: 2061 xas_reset(xas); 2062 goto retry; 2063 } 2064 2065 /** 2066 * find_get_entries - gang pagecache lookup 2067 * @mapping: The address_space to search 2068 * @start: The starting page cache index 2069 * @end: The final page index (inclusive). 2070 * @fbatch: Where the resulting entries are placed. 2071 * @indices: The cache indices corresponding to the entries in @entries 2072 * 2073 * find_get_entries() will search for and return a batch of entries in 2074 * the mapping. The entries are placed in @fbatch. find_get_entries() 2075 * takes a reference on any actual folios it returns. 2076 * 2077 * The entries have ascending indexes. The indices may not be consecutive 2078 * due to not-present entries or large folios. 2079 * 2080 * Any shadow entries of evicted folios, or swap entries from 2081 * shmem/tmpfs, are included in the returned array. 2082 * 2083 * Return: The number of entries which were found. 2084 */ 2085 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start, 2086 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices) 2087 { 2088 XA_STATE(xas, &mapping->i_pages, *start); 2089 struct folio *folio; 2090 2091 rcu_read_lock(); 2092 while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) { 2093 indices[fbatch->nr] = xas.xa_index; 2094 if (!folio_batch_add(fbatch, folio)) 2095 break; 2096 } 2097 2098 if (folio_batch_count(fbatch)) { 2099 unsigned long nr; 2100 int idx = folio_batch_count(fbatch) - 1; 2101 2102 folio = fbatch->folios[idx]; 2103 if (!xa_is_value(folio)) 2104 nr = folio_nr_pages(folio); 2105 else 2106 nr = 1 << xa_get_order(&mapping->i_pages, indices[idx]); 2107 *start = round_down(indices[idx] + nr, nr); 2108 } 2109 rcu_read_unlock(); 2110 2111 return folio_batch_count(fbatch); 2112 } 2113 2114 /** 2115 * find_lock_entries - Find a batch of pagecache entries. 2116 * @mapping: The address_space to search. 2117 * @start: The starting page cache index. 2118 * @end: The final page index (inclusive). 2119 * @fbatch: Where the resulting entries are placed. 2120 * @indices: The cache indices of the entries in @fbatch. 2121 * 2122 * find_lock_entries() will return a batch of entries from @mapping. 2123 * Swap, shadow and DAX entries are included. Folios are returned 2124 * locked and with an incremented refcount. Folios which are locked 2125 * by somebody else or under writeback are skipped. Folios which are 2126 * partially outside the range are not returned. 2127 * 2128 * The entries have ascending indexes. The indices may not be consecutive 2129 * due to not-present entries, large folios, folios which could not be 2130 * locked or folios under writeback. 2131 * 2132 * Return: The number of entries which were found. 2133 */ 2134 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, 2135 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices) 2136 { 2137 XA_STATE(xas, &mapping->i_pages, *start); 2138 struct folio *folio; 2139 2140 rcu_read_lock(); 2141 while ((folio = find_get_entry(&xas, end, XA_PRESENT))) { 2142 unsigned long base; 2143 unsigned long nr; 2144 2145 if (!xa_is_value(folio)) { 2146 nr = folio_nr_pages(folio); 2147 base = folio->index; 2148 /* Omit large folio which begins before the start */ 2149 if (base < *start) 2150 goto put; 2151 /* Omit large folio which extends beyond the end */ 2152 if (base + nr - 1 > end) 2153 goto put; 2154 if (!folio_trylock(folio)) 2155 goto put; 2156 if (folio->mapping != mapping || 2157 folio_test_writeback(folio)) 2158 goto unlock; 2159 VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index), 2160 folio); 2161 } else { 2162 nr = 1 << xas_get_order(&xas); 2163 base = xas.xa_index & ~(nr - 1); 2164 /* Omit order>0 value which begins before the start */ 2165 if (base < *start) 2166 continue; 2167 /* Omit order>0 value which extends beyond the end */ 2168 if (base + nr - 1 > end) 2169 break; 2170 } 2171 2172 /* Update start now so that last update is correct on return */ 2173 *start = base + nr; 2174 indices[fbatch->nr] = xas.xa_index; 2175 if (!folio_batch_add(fbatch, folio)) 2176 break; 2177 continue; 2178 unlock: 2179 folio_unlock(folio); 2180 put: 2181 folio_put(folio); 2182 } 2183 rcu_read_unlock(); 2184 2185 return folio_batch_count(fbatch); 2186 } 2187 2188 /** 2189 * filemap_get_folios - Get a batch of folios 2190 * @mapping: The address_space to search 2191 * @start: The starting page index 2192 * @end: The final page index (inclusive) 2193 * @fbatch: The batch to fill. 2194 * 2195 * Search for and return a batch of folios in the mapping starting at 2196 * index @start and up to index @end (inclusive). The folios are returned 2197 * in @fbatch with an elevated reference count. 2198 * 2199 * Return: The number of folios which were found. 2200 * We also update @start to index the next folio for the traversal. 2201 */ 2202 unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start, 2203 pgoff_t end, struct folio_batch *fbatch) 2204 { 2205 return filemap_get_folios_tag(mapping, start, end, XA_PRESENT, fbatch); 2206 } 2207 EXPORT_SYMBOL(filemap_get_folios); 2208 2209 /** 2210 * filemap_get_folios_contig - Get a batch of contiguous folios 2211 * @mapping: The address_space to search 2212 * @start: The starting page index 2213 * @end: The final page index (inclusive) 2214 * @fbatch: The batch to fill 2215 * 2216 * filemap_get_folios_contig() works exactly like filemap_get_folios(), 2217 * except the returned folios are guaranteed to be contiguous. This may 2218 * not return all contiguous folios if the batch gets filled up. 2219 * 2220 * Return: The number of folios found. 2221 * Also update @start to be positioned for traversal of the next folio. 2222 */ 2223 2224 unsigned filemap_get_folios_contig(struct address_space *mapping, 2225 pgoff_t *start, pgoff_t end, struct folio_batch *fbatch) 2226 { 2227 XA_STATE(xas, &mapping->i_pages, *start); 2228 unsigned long nr; 2229 struct folio *folio; 2230 2231 rcu_read_lock(); 2232 2233 for (folio = xas_load(&xas); folio && xas.xa_index <= end; 2234 folio = xas_next(&xas)) { 2235 if (xas_retry(&xas, folio)) 2236 continue; 2237 /* 2238 * If the entry has been swapped out, we can stop looking. 2239 * No current caller is looking for DAX entries. 2240 */ 2241 if (xa_is_value(folio)) 2242 goto update_start; 2243 2244 /* If we landed in the middle of a THP, continue at its end. */ 2245 if (xa_is_sibling(folio)) 2246 goto update_start; 2247 2248 if (!folio_try_get(folio)) 2249 goto retry; 2250 2251 if (unlikely(folio != xas_reload(&xas))) 2252 goto put_folio; 2253 2254 if (!folio_batch_add(fbatch, folio)) { 2255 nr = folio_nr_pages(folio); 2256 *start = folio->index + nr; 2257 goto out; 2258 } 2259 xas_advance(&xas, folio_next_index(folio) - 1); 2260 continue; 2261 put_folio: 2262 folio_put(folio); 2263 2264 retry: 2265 xas_reset(&xas); 2266 } 2267 2268 update_start: 2269 nr = folio_batch_count(fbatch); 2270 2271 if (nr) { 2272 folio = fbatch->folios[nr - 1]; 2273 *start = folio_next_index(folio); 2274 } 2275 out: 2276 rcu_read_unlock(); 2277 return folio_batch_count(fbatch); 2278 } 2279 EXPORT_SYMBOL(filemap_get_folios_contig); 2280 2281 /** 2282 * filemap_get_folios_tag - Get a batch of folios matching @tag 2283 * @mapping: The address_space to search 2284 * @start: The starting page index 2285 * @end: The final page index (inclusive) 2286 * @tag: The tag index 2287 * @fbatch: The batch to fill 2288 * 2289 * The first folio may start before @start; if it does, it will contain 2290 * @start. The final folio may extend beyond @end; if it does, it will 2291 * contain @end. The folios have ascending indices. There may be gaps 2292 * between the folios if there are indices which have no folio in the 2293 * page cache. If folios are added to or removed from the page cache 2294 * while this is running, they may or may not be found by this call. 2295 * Only returns folios that are tagged with @tag. 2296 * 2297 * Return: The number of folios found. 2298 * Also update @start to index the next folio for traversal. 2299 */ 2300 unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start, 2301 pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch) 2302 { 2303 XA_STATE(xas, &mapping->i_pages, *start); 2304 struct folio *folio; 2305 2306 rcu_read_lock(); 2307 while ((folio = find_get_entry(&xas, end, tag)) != NULL) { 2308 /* 2309 * Shadow entries should never be tagged, but this iteration 2310 * is lockless so there is a window for page reclaim to evict 2311 * a page we saw tagged. Skip over it. 2312 */ 2313 if (xa_is_value(folio)) 2314 continue; 2315 if (!folio_batch_add(fbatch, folio)) { 2316 unsigned long nr = folio_nr_pages(folio); 2317 *start = folio->index + nr; 2318 goto out; 2319 } 2320 } 2321 /* 2322 * We come here when there is no page beyond @end. We take care to not 2323 * overflow the index @start as it confuses some of the callers. This 2324 * breaks the iteration when there is a page at index -1 but that is 2325 * already broke anyway. 2326 */ 2327 if (end == (pgoff_t)-1) 2328 *start = (pgoff_t)-1; 2329 else 2330 *start = end + 1; 2331 out: 2332 rcu_read_unlock(); 2333 2334 return folio_batch_count(fbatch); 2335 } 2336 EXPORT_SYMBOL(filemap_get_folios_tag); 2337 2338 /* 2339 * CD/DVDs are error prone. When a medium error occurs, the driver may fail 2340 * a _large_ part of the i/o request. Imagine the worst scenario: 2341 * 2342 * ---R__________________________________________B__________ 2343 * ^ reading here ^ bad block(assume 4k) 2344 * 2345 * read(R) => miss => readahead(R...B) => media error => frustrating retries 2346 * => failing the whole request => read(R) => read(R+1) => 2347 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => 2348 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => 2349 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... 2350 * 2351 * It is going insane. Fix it by quickly scaling down the readahead size. 2352 */ 2353 static void shrink_readahead_size_eio(struct file_ra_state *ra) 2354 { 2355 ra->ra_pages /= 4; 2356 } 2357 2358 /* 2359 * filemap_get_read_batch - Get a batch of folios for read 2360 * 2361 * Get a batch of folios which represent a contiguous range of bytes in 2362 * the file. No exceptional entries will be returned. If @index is in 2363 * the middle of a folio, the entire folio will be returned. The last 2364 * folio in the batch may have the readahead flag set or the uptodate flag 2365 * clear so that the caller can take the appropriate action. 2366 */ 2367 static void filemap_get_read_batch(struct address_space *mapping, 2368 pgoff_t index, pgoff_t max, struct folio_batch *fbatch) 2369 { 2370 XA_STATE(xas, &mapping->i_pages, index); 2371 struct folio *folio; 2372 2373 rcu_read_lock(); 2374 for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) { 2375 if (xas_retry(&xas, folio)) 2376 continue; 2377 if (xas.xa_index > max || xa_is_value(folio)) 2378 break; 2379 if (xa_is_sibling(folio)) 2380 break; 2381 if (!folio_try_get(folio)) 2382 goto retry; 2383 2384 if (unlikely(folio != xas_reload(&xas))) 2385 goto put_folio; 2386 2387 if (!folio_batch_add(fbatch, folio)) 2388 break; 2389 if (!folio_test_uptodate(folio)) 2390 break; 2391 if (folio_test_readahead(folio)) 2392 break; 2393 xas_advance(&xas, folio_next_index(folio) - 1); 2394 continue; 2395 put_folio: 2396 folio_put(folio); 2397 retry: 2398 xas_reset(&xas); 2399 } 2400 rcu_read_unlock(); 2401 } 2402 2403 static int filemap_read_folio(struct file *file, filler_t filler, 2404 struct folio *folio) 2405 { 2406 bool workingset = folio_test_workingset(folio); 2407 unsigned long pflags; 2408 int error; 2409 2410 /* Start the actual read. The read will unlock the page. */ 2411 if (unlikely(workingset)) 2412 psi_memstall_enter(&pflags); 2413 error = filler(file, folio); 2414 if (unlikely(workingset)) 2415 psi_memstall_leave(&pflags); 2416 if (error) 2417 return error; 2418 2419 error = folio_wait_locked_killable(folio); 2420 if (error) 2421 return error; 2422 if (folio_test_uptodate(folio)) 2423 return 0; 2424 if (file) 2425 shrink_readahead_size_eio(&file->f_ra); 2426 return -EIO; 2427 } 2428 2429 static bool filemap_range_uptodate(struct address_space *mapping, 2430 loff_t pos, size_t count, struct folio *folio, 2431 bool need_uptodate) 2432 { 2433 if (folio_test_uptodate(folio)) 2434 return true; 2435 /* pipes can't handle partially uptodate pages */ 2436 if (need_uptodate) 2437 return false; 2438 if (!mapping->a_ops->is_partially_uptodate) 2439 return false; 2440 if (mapping->host->i_blkbits >= folio_shift(folio)) 2441 return false; 2442 2443 if (folio_pos(folio) > pos) { 2444 count -= folio_pos(folio) - pos; 2445 pos = 0; 2446 } else { 2447 pos -= folio_pos(folio); 2448 } 2449 2450 if (pos == 0 && count >= folio_size(folio)) 2451 return false; 2452 2453 return mapping->a_ops->is_partially_uptodate(folio, pos, count); 2454 } 2455 2456 static int filemap_update_page(struct kiocb *iocb, 2457 struct address_space *mapping, size_t count, 2458 struct folio *folio, bool need_uptodate) 2459 { 2460 int error; 2461 2462 if (iocb->ki_flags & IOCB_NOWAIT) { 2463 if (!filemap_invalidate_trylock_shared(mapping)) 2464 return -EAGAIN; 2465 } else { 2466 filemap_invalidate_lock_shared(mapping); 2467 } 2468 2469 if (!folio_trylock(folio)) { 2470 error = -EAGAIN; 2471 if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) 2472 goto unlock_mapping; 2473 if (!(iocb->ki_flags & IOCB_WAITQ)) { 2474 filemap_invalidate_unlock_shared(mapping); 2475 /* 2476 * This is where we usually end up waiting for a 2477 * previously submitted readahead to finish. 2478 */ 2479 folio_put_wait_locked(folio, TASK_KILLABLE); 2480 return AOP_TRUNCATED_PAGE; 2481 } 2482 error = __folio_lock_async(folio, iocb->ki_waitq); 2483 if (error) 2484 goto unlock_mapping; 2485 } 2486 2487 error = AOP_TRUNCATED_PAGE; 2488 if (!folio->mapping) 2489 goto unlock; 2490 2491 error = 0; 2492 if (filemap_range_uptodate(mapping, iocb->ki_pos, count, folio, 2493 need_uptodate)) 2494 goto unlock; 2495 2496 error = -EAGAIN; 2497 if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ)) 2498 goto unlock; 2499 2500 error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio, 2501 folio); 2502 goto unlock_mapping; 2503 unlock: 2504 folio_unlock(folio); 2505 unlock_mapping: 2506 filemap_invalidate_unlock_shared(mapping); 2507 if (error == AOP_TRUNCATED_PAGE) 2508 folio_put(folio); 2509 return error; 2510 } 2511 2512 static int filemap_create_folio(struct kiocb *iocb, struct folio_batch *fbatch) 2513 { 2514 struct address_space *mapping = iocb->ki_filp->f_mapping; 2515 struct folio *folio; 2516 int error; 2517 unsigned int min_order = mapping_min_folio_order(mapping); 2518 pgoff_t index; 2519 2520 if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ)) 2521 return -EAGAIN; 2522 2523 folio = filemap_alloc_folio(mapping_gfp_mask(mapping), min_order); 2524 if (!folio) 2525 return -ENOMEM; 2526 if (iocb->ki_flags & IOCB_DONTCACHE) 2527 __folio_set_dropbehind(folio); 2528 2529 /* 2530 * Protect against truncate / hole punch. Grabbing invalidate_lock 2531 * here assures we cannot instantiate and bring uptodate new 2532 * pagecache folios after evicting page cache during truncate 2533 * and before actually freeing blocks. Note that we could 2534 * release invalidate_lock after inserting the folio into 2535 * the page cache as the locked folio would then be enough to 2536 * synchronize with hole punching. But there are code paths 2537 * such as filemap_update_page() filling in partially uptodate 2538 * pages or ->readahead() that need to hold invalidate_lock 2539 * while mapping blocks for IO so let's hold the lock here as 2540 * well to keep locking rules simple. 2541 */ 2542 filemap_invalidate_lock_shared(mapping); 2543 index = (iocb->ki_pos >> (PAGE_SHIFT + min_order)) << min_order; 2544 error = filemap_add_folio(mapping, folio, index, 2545 mapping_gfp_constraint(mapping, GFP_KERNEL)); 2546 if (error == -EEXIST) 2547 error = AOP_TRUNCATED_PAGE; 2548 if (error) 2549 goto error; 2550 2551 error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio, 2552 folio); 2553 if (error) 2554 goto error; 2555 2556 filemap_invalidate_unlock_shared(mapping); 2557 folio_batch_add(fbatch, folio); 2558 return 0; 2559 error: 2560 filemap_invalidate_unlock_shared(mapping); 2561 folio_put(folio); 2562 return error; 2563 } 2564 2565 static int filemap_readahead(struct kiocb *iocb, struct file *file, 2566 struct address_space *mapping, struct folio *folio, 2567 pgoff_t last_index) 2568 { 2569 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index); 2570 2571 if (iocb->ki_flags & IOCB_NOIO) 2572 return -EAGAIN; 2573 if (iocb->ki_flags & IOCB_DONTCACHE) 2574 ractl.dropbehind = 1; 2575 page_cache_async_ra(&ractl, folio, last_index - folio->index); 2576 return 0; 2577 } 2578 2579 static int filemap_get_pages(struct kiocb *iocb, size_t count, 2580 struct folio_batch *fbatch, bool need_uptodate) 2581 { 2582 struct file *filp = iocb->ki_filp; 2583 struct address_space *mapping = filp->f_mapping; 2584 pgoff_t index = iocb->ki_pos >> PAGE_SHIFT; 2585 pgoff_t last_index; 2586 struct folio *folio; 2587 unsigned int flags; 2588 int err = 0; 2589 2590 /* "last_index" is the index of the page beyond the end of the read */ 2591 last_index = DIV_ROUND_UP(iocb->ki_pos + count, PAGE_SIZE); 2592 retry: 2593 if (fatal_signal_pending(current)) 2594 return -EINTR; 2595 2596 filemap_get_read_batch(mapping, index, last_index - 1, fbatch); 2597 if (!folio_batch_count(fbatch)) { 2598 DEFINE_READAHEAD(ractl, filp, &filp->f_ra, mapping, index); 2599 2600 if (iocb->ki_flags & IOCB_NOIO) 2601 return -EAGAIN; 2602 if (iocb->ki_flags & IOCB_NOWAIT) 2603 flags = memalloc_noio_save(); 2604 if (iocb->ki_flags & IOCB_DONTCACHE) 2605 ractl.dropbehind = 1; 2606 page_cache_sync_ra(&ractl, last_index - index); 2607 if (iocb->ki_flags & IOCB_NOWAIT) 2608 memalloc_noio_restore(flags); 2609 filemap_get_read_batch(mapping, index, last_index - 1, fbatch); 2610 } 2611 if (!folio_batch_count(fbatch)) { 2612 err = filemap_create_folio(iocb, fbatch); 2613 if (err == AOP_TRUNCATED_PAGE) 2614 goto retry; 2615 return err; 2616 } 2617 2618 folio = fbatch->folios[folio_batch_count(fbatch) - 1]; 2619 if (folio_test_readahead(folio)) { 2620 err = filemap_readahead(iocb, filp, mapping, folio, last_index); 2621 if (err) 2622 goto err; 2623 } 2624 if (!folio_test_uptodate(folio)) { 2625 if (folio_batch_count(fbatch) > 1) { 2626 err = -EAGAIN; 2627 goto err; 2628 } 2629 err = filemap_update_page(iocb, mapping, count, folio, 2630 need_uptodate); 2631 if (err) 2632 goto err; 2633 } 2634 2635 trace_mm_filemap_get_pages(mapping, index, last_index - 1); 2636 return 0; 2637 err: 2638 if (err < 0) 2639 folio_put(folio); 2640 if (likely(--fbatch->nr)) 2641 return 0; 2642 if (err == AOP_TRUNCATED_PAGE) 2643 goto retry; 2644 return err; 2645 } 2646 2647 static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio) 2648 { 2649 unsigned int shift = folio_shift(folio); 2650 2651 return (pos1 >> shift == pos2 >> shift); 2652 } 2653 2654 static void filemap_end_dropbehind_read(struct folio *folio) 2655 { 2656 if (!folio_test_dropbehind(folio)) 2657 return; 2658 if (folio_test_writeback(folio) || folio_test_dirty(folio)) 2659 return; 2660 if (folio_trylock(folio)) { 2661 filemap_end_dropbehind(folio); 2662 folio_unlock(folio); 2663 } 2664 } 2665 2666 /** 2667 * filemap_read - Read data from the page cache. 2668 * @iocb: The iocb to read. 2669 * @iter: Destination for the data. 2670 * @already_read: Number of bytes already read by the caller. 2671 * 2672 * Copies data from the page cache. If the data is not currently present, 2673 * uses the readahead and read_folio address_space operations to fetch it. 2674 * 2675 * Return: Total number of bytes copied, including those already read by 2676 * the caller. If an error happens before any bytes are copied, returns 2677 * a negative error number. 2678 */ 2679 ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter, 2680 ssize_t already_read) 2681 { 2682 struct file *filp = iocb->ki_filp; 2683 struct file_ra_state *ra = &filp->f_ra; 2684 struct address_space *mapping = filp->f_mapping; 2685 struct inode *inode = mapping->host; 2686 struct folio_batch fbatch; 2687 int i, error = 0; 2688 bool writably_mapped; 2689 loff_t isize, end_offset; 2690 loff_t last_pos = ra->prev_pos; 2691 2692 if (unlikely(iocb->ki_pos < 0)) 2693 return -EINVAL; 2694 if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes)) 2695 return 0; 2696 if (unlikely(!iov_iter_count(iter))) 2697 return 0; 2698 2699 iov_iter_truncate(iter, inode->i_sb->s_maxbytes - iocb->ki_pos); 2700 folio_batch_init(&fbatch); 2701 2702 do { 2703 cond_resched(); 2704 2705 /* 2706 * If we've already successfully copied some data, then we 2707 * can no longer safely return -EIOCBQUEUED. Hence mark 2708 * an async read NOWAIT at that point. 2709 */ 2710 if ((iocb->ki_flags & IOCB_WAITQ) && already_read) 2711 iocb->ki_flags |= IOCB_NOWAIT; 2712 2713 if (unlikely(iocb->ki_pos >= i_size_read(inode))) 2714 break; 2715 2716 error = filemap_get_pages(iocb, iter->count, &fbatch, false); 2717 if (error < 0) 2718 break; 2719 2720 /* 2721 * i_size must be checked after we know the pages are Uptodate. 2722 * 2723 * Checking i_size after the check allows us to calculate 2724 * the correct value for "nr", which means the zero-filled 2725 * part of the page is not copied back to userspace (unless 2726 * another truncate extends the file - this is desired though). 2727 */ 2728 isize = i_size_read(inode); 2729 if (unlikely(iocb->ki_pos >= isize)) 2730 goto put_folios; 2731 end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count); 2732 2733 /* 2734 * Once we start copying data, we don't want to be touching any 2735 * cachelines that might be contended: 2736 */ 2737 writably_mapped = mapping_writably_mapped(mapping); 2738 2739 /* 2740 * When a read accesses the same folio several times, only 2741 * mark it as accessed the first time. 2742 */ 2743 if (!pos_same_folio(iocb->ki_pos, last_pos - 1, 2744 fbatch.folios[0])) 2745 folio_mark_accessed(fbatch.folios[0]); 2746 2747 for (i = 0; i < folio_batch_count(&fbatch); i++) { 2748 struct folio *folio = fbatch.folios[i]; 2749 size_t fsize = folio_size(folio); 2750 size_t offset = iocb->ki_pos & (fsize - 1); 2751 size_t bytes = min_t(loff_t, end_offset - iocb->ki_pos, 2752 fsize - offset); 2753 size_t copied; 2754 2755 if (end_offset < folio_pos(folio)) 2756 break; 2757 if (i > 0) 2758 folio_mark_accessed(folio); 2759 /* 2760 * If users can be writing to this folio using arbitrary 2761 * virtual addresses, take care of potential aliasing 2762 * before reading the folio on the kernel side. 2763 */ 2764 if (writably_mapped) 2765 flush_dcache_folio(folio); 2766 2767 copied = copy_folio_to_iter(folio, offset, bytes, iter); 2768 2769 already_read += copied; 2770 iocb->ki_pos += copied; 2771 last_pos = iocb->ki_pos; 2772 2773 if (copied < bytes) { 2774 error = -EFAULT; 2775 break; 2776 } 2777 } 2778 put_folios: 2779 for (i = 0; i < folio_batch_count(&fbatch); i++) { 2780 struct folio *folio = fbatch.folios[i]; 2781 2782 filemap_end_dropbehind_read(folio); 2783 folio_put(folio); 2784 } 2785 folio_batch_init(&fbatch); 2786 } while (iov_iter_count(iter) && iocb->ki_pos < isize && !error); 2787 2788 file_accessed(filp); 2789 ra->prev_pos = last_pos; 2790 return already_read ? already_read : error; 2791 } 2792 EXPORT_SYMBOL_GPL(filemap_read); 2793 2794 int kiocb_write_and_wait(struct kiocb *iocb, size_t count) 2795 { 2796 struct address_space *mapping = iocb->ki_filp->f_mapping; 2797 loff_t pos = iocb->ki_pos; 2798 loff_t end = pos + count - 1; 2799 2800 if (iocb->ki_flags & IOCB_NOWAIT) { 2801 if (filemap_range_needs_writeback(mapping, pos, end)) 2802 return -EAGAIN; 2803 return 0; 2804 } 2805 2806 return filemap_write_and_wait_range(mapping, pos, end); 2807 } 2808 EXPORT_SYMBOL_GPL(kiocb_write_and_wait); 2809 2810 int filemap_invalidate_pages(struct address_space *mapping, 2811 loff_t pos, loff_t end, bool nowait) 2812 { 2813 int ret; 2814 2815 if (nowait) { 2816 /* we could block if there are any pages in the range */ 2817 if (filemap_range_has_page(mapping, pos, end)) 2818 return -EAGAIN; 2819 } else { 2820 ret = filemap_write_and_wait_range(mapping, pos, end); 2821 if (ret) 2822 return ret; 2823 } 2824 2825 /* 2826 * After a write we want buffered reads to be sure to go to disk to get 2827 * the new data. We invalidate clean cached page from the region we're 2828 * about to write. We do this *before* the write so that we can return 2829 * without clobbering -EIOCBQUEUED from ->direct_IO(). 2830 */ 2831 return invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, 2832 end >> PAGE_SHIFT); 2833 } 2834 2835 int kiocb_invalidate_pages(struct kiocb *iocb, size_t count) 2836 { 2837 struct address_space *mapping = iocb->ki_filp->f_mapping; 2838 2839 return filemap_invalidate_pages(mapping, iocb->ki_pos, 2840 iocb->ki_pos + count - 1, 2841 iocb->ki_flags & IOCB_NOWAIT); 2842 } 2843 EXPORT_SYMBOL_GPL(kiocb_invalidate_pages); 2844 2845 /** 2846 * generic_file_read_iter - generic filesystem read routine 2847 * @iocb: kernel I/O control block 2848 * @iter: destination for the data read 2849 * 2850 * This is the "read_iter()" routine for all filesystems 2851 * that can use the page cache directly. 2852 * 2853 * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall 2854 * be returned when no data can be read without waiting for I/O requests 2855 * to complete; it doesn't prevent readahead. 2856 * 2857 * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O 2858 * requests shall be made for the read or for readahead. When no data 2859 * can be read, -EAGAIN shall be returned. When readahead would be 2860 * triggered, a partial, possibly empty read shall be returned. 2861 * 2862 * Return: 2863 * * number of bytes copied, even for partial reads 2864 * * negative error code (or 0 if IOCB_NOIO) if nothing was read 2865 */ 2866 ssize_t 2867 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 2868 { 2869 size_t count = iov_iter_count(iter); 2870 ssize_t retval = 0; 2871 2872 if (!count) 2873 return 0; /* skip atime */ 2874 2875 if (iocb->ki_flags & IOCB_DIRECT) { 2876 struct file *file = iocb->ki_filp; 2877 struct address_space *mapping = file->f_mapping; 2878 struct inode *inode = mapping->host; 2879 2880 retval = kiocb_write_and_wait(iocb, count); 2881 if (retval < 0) 2882 return retval; 2883 file_accessed(file); 2884 2885 retval = mapping->a_ops->direct_IO(iocb, iter); 2886 if (retval >= 0) { 2887 iocb->ki_pos += retval; 2888 count -= retval; 2889 } 2890 if (retval != -EIOCBQUEUED) 2891 iov_iter_revert(iter, count - iov_iter_count(iter)); 2892 2893 /* 2894 * Btrfs can have a short DIO read if we encounter 2895 * compressed extents, so if there was an error, or if 2896 * we've already read everything we wanted to, or if 2897 * there was a short read because we hit EOF, go ahead 2898 * and return. Otherwise fallthrough to buffered io for 2899 * the rest of the read. Buffered reads will not work for 2900 * DAX files, so don't bother trying. 2901 */ 2902 if (retval < 0 || !count || IS_DAX(inode)) 2903 return retval; 2904 if (iocb->ki_pos >= i_size_read(inode)) 2905 return retval; 2906 } 2907 2908 return filemap_read(iocb, iter, retval); 2909 } 2910 EXPORT_SYMBOL(generic_file_read_iter); 2911 2912 /* 2913 * Splice subpages from a folio into a pipe. 2914 */ 2915 size_t splice_folio_into_pipe(struct pipe_inode_info *pipe, 2916 struct folio *folio, loff_t fpos, size_t size) 2917 { 2918 struct page *page; 2919 size_t spliced = 0, offset = offset_in_folio(folio, fpos); 2920 2921 page = folio_page(folio, offset / PAGE_SIZE); 2922 size = min(size, folio_size(folio) - offset); 2923 offset %= PAGE_SIZE; 2924 2925 while (spliced < size && !pipe_is_full(pipe)) { 2926 struct pipe_buffer *buf = pipe_head_buf(pipe); 2927 size_t part = min_t(size_t, PAGE_SIZE - offset, size - spliced); 2928 2929 *buf = (struct pipe_buffer) { 2930 .ops = &page_cache_pipe_buf_ops, 2931 .page = page, 2932 .offset = offset, 2933 .len = part, 2934 }; 2935 folio_get(folio); 2936 pipe->head++; 2937 page++; 2938 spliced += part; 2939 offset = 0; 2940 } 2941 2942 return spliced; 2943 } 2944 2945 /** 2946 * filemap_splice_read - Splice data from a file's pagecache into a pipe 2947 * @in: The file to read from 2948 * @ppos: Pointer to the file position to read from 2949 * @pipe: The pipe to splice into 2950 * @len: The amount to splice 2951 * @flags: The SPLICE_F_* flags 2952 * 2953 * This function gets folios from a file's pagecache and splices them into the 2954 * pipe. Readahead will be called as necessary to fill more folios. This may 2955 * be used for blockdevs also. 2956 * 2957 * Return: On success, the number of bytes read will be returned and *@ppos 2958 * will be updated if appropriate; 0 will be returned if there is no more data 2959 * to be read; -EAGAIN will be returned if the pipe had no space, and some 2960 * other negative error code will be returned on error. A short read may occur 2961 * if the pipe has insufficient space, we reach the end of the data or we hit a 2962 * hole. 2963 */ 2964 ssize_t filemap_splice_read(struct file *in, loff_t *ppos, 2965 struct pipe_inode_info *pipe, 2966 size_t len, unsigned int flags) 2967 { 2968 struct folio_batch fbatch; 2969 struct kiocb iocb; 2970 size_t total_spliced = 0, used, npages; 2971 loff_t isize, end_offset; 2972 bool writably_mapped; 2973 int i, error = 0; 2974 2975 if (unlikely(*ppos >= in->f_mapping->host->i_sb->s_maxbytes)) 2976 return 0; 2977 2978 init_sync_kiocb(&iocb, in); 2979 iocb.ki_pos = *ppos; 2980 2981 /* Work out how much data we can actually add into the pipe */ 2982 used = pipe_buf_usage(pipe); 2983 npages = max_t(ssize_t, pipe->max_usage - used, 0); 2984 len = min_t(size_t, len, npages * PAGE_SIZE); 2985 2986 folio_batch_init(&fbatch); 2987 2988 do { 2989 cond_resched(); 2990 2991 if (*ppos >= i_size_read(in->f_mapping->host)) 2992 break; 2993 2994 iocb.ki_pos = *ppos; 2995 error = filemap_get_pages(&iocb, len, &fbatch, true); 2996 if (error < 0) 2997 break; 2998 2999 /* 3000 * i_size must be checked after we know the pages are Uptodate. 3001 * 3002 * Checking i_size after the check allows us to calculate 3003 * the correct value for "nr", which means the zero-filled 3004 * part of the page is not copied back to userspace (unless 3005 * another truncate extends the file - this is desired though). 3006 */ 3007 isize = i_size_read(in->f_mapping->host); 3008 if (unlikely(*ppos >= isize)) 3009 break; 3010 end_offset = min_t(loff_t, isize, *ppos + len); 3011 3012 /* 3013 * Once we start copying data, we don't want to be touching any 3014 * cachelines that might be contended: 3015 */ 3016 writably_mapped = mapping_writably_mapped(in->f_mapping); 3017 3018 for (i = 0; i < folio_batch_count(&fbatch); i++) { 3019 struct folio *folio = fbatch.folios[i]; 3020 size_t n; 3021 3022 if (folio_pos(folio) >= end_offset) 3023 goto out; 3024 folio_mark_accessed(folio); 3025 3026 /* 3027 * If users can be writing to this folio using arbitrary 3028 * virtual addresses, take care of potential aliasing 3029 * before reading the folio on the kernel side. 3030 */ 3031 if (writably_mapped) 3032 flush_dcache_folio(folio); 3033 3034 n = min_t(loff_t, len, isize - *ppos); 3035 n = splice_folio_into_pipe(pipe, folio, *ppos, n); 3036 if (!n) 3037 goto out; 3038 len -= n; 3039 total_spliced += n; 3040 *ppos += n; 3041 in->f_ra.prev_pos = *ppos; 3042 if (pipe_is_full(pipe)) 3043 goto out; 3044 } 3045 3046 folio_batch_release(&fbatch); 3047 } while (len); 3048 3049 out: 3050 folio_batch_release(&fbatch); 3051 file_accessed(in); 3052 3053 return total_spliced ? total_spliced : error; 3054 } 3055 EXPORT_SYMBOL(filemap_splice_read); 3056 3057 static inline loff_t folio_seek_hole_data(struct xa_state *xas, 3058 struct address_space *mapping, struct folio *folio, 3059 loff_t start, loff_t end, bool seek_data) 3060 { 3061 const struct address_space_operations *ops = mapping->a_ops; 3062 size_t offset, bsz = i_blocksize(mapping->host); 3063 3064 if (xa_is_value(folio) || folio_test_uptodate(folio)) 3065 return seek_data ? start : end; 3066 if (!ops->is_partially_uptodate) 3067 return seek_data ? end : start; 3068 3069 xas_pause(xas); 3070 rcu_read_unlock(); 3071 folio_lock(folio); 3072 if (unlikely(folio->mapping != mapping)) 3073 goto unlock; 3074 3075 offset = offset_in_folio(folio, start) & ~(bsz - 1); 3076 3077 do { 3078 if (ops->is_partially_uptodate(folio, offset, bsz) == 3079 seek_data) 3080 break; 3081 start = (start + bsz) & ~((u64)bsz - 1); 3082 offset += bsz; 3083 } while (offset < folio_size(folio)); 3084 unlock: 3085 folio_unlock(folio); 3086 rcu_read_lock(); 3087 return start; 3088 } 3089 3090 static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio) 3091 { 3092 if (xa_is_value(folio)) 3093 return PAGE_SIZE << xas_get_order(xas); 3094 return folio_size(folio); 3095 } 3096 3097 /** 3098 * mapping_seek_hole_data - Seek for SEEK_DATA / SEEK_HOLE in the page cache. 3099 * @mapping: Address space to search. 3100 * @start: First byte to consider. 3101 * @end: Limit of search (exclusive). 3102 * @whence: Either SEEK_HOLE or SEEK_DATA. 3103 * 3104 * If the page cache knows which blocks contain holes and which blocks 3105 * contain data, your filesystem can use this function to implement 3106 * SEEK_HOLE and SEEK_DATA. This is useful for filesystems which are 3107 * entirely memory-based such as tmpfs, and filesystems which support 3108 * unwritten extents. 3109 * 3110 * Return: The requested offset on success, or -ENXIO if @whence specifies 3111 * SEEK_DATA and there is no data after @start. There is an implicit hole 3112 * after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start 3113 * and @end contain data. 3114 */ 3115 loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start, 3116 loff_t end, int whence) 3117 { 3118 XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT); 3119 pgoff_t max = (end - 1) >> PAGE_SHIFT; 3120 bool seek_data = (whence == SEEK_DATA); 3121 struct folio *folio; 3122 3123 if (end <= start) 3124 return -ENXIO; 3125 3126 rcu_read_lock(); 3127 while ((folio = find_get_entry(&xas, max, XA_PRESENT))) { 3128 loff_t pos = (u64)xas.xa_index << PAGE_SHIFT; 3129 size_t seek_size; 3130 3131 if (start < pos) { 3132 if (!seek_data) 3133 goto unlock; 3134 start = pos; 3135 } 3136 3137 seek_size = seek_folio_size(&xas, folio); 3138 pos = round_up((u64)pos + 1, seek_size); 3139 start = folio_seek_hole_data(&xas, mapping, folio, start, pos, 3140 seek_data); 3141 if (start < pos) 3142 goto unlock; 3143 if (start >= end) 3144 break; 3145 if (seek_size > PAGE_SIZE) 3146 xas_set(&xas, pos >> PAGE_SHIFT); 3147 if (!xa_is_value(folio)) 3148 folio_put(folio); 3149 } 3150 if (seek_data) 3151 start = -ENXIO; 3152 unlock: 3153 rcu_read_unlock(); 3154 if (folio && !xa_is_value(folio)) 3155 folio_put(folio); 3156 if (start > end) 3157 return end; 3158 return start; 3159 } 3160 3161 #ifdef CONFIG_MMU 3162 #define MMAP_LOTSAMISS (100) 3163 /* 3164 * lock_folio_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock 3165 * @vmf - the vm_fault for this fault. 3166 * @folio - the folio to lock. 3167 * @fpin - the pointer to the file we may pin (or is already pinned). 3168 * 3169 * This works similar to lock_folio_or_retry in that it can drop the 3170 * mmap_lock. It differs in that it actually returns the folio locked 3171 * if it returns 1 and 0 if it couldn't lock the folio. If we did have 3172 * to drop the mmap_lock then fpin will point to the pinned file and 3173 * needs to be fput()'ed at a later point. 3174 */ 3175 static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio, 3176 struct file **fpin) 3177 { 3178 if (folio_trylock(folio)) 3179 return 1; 3180 3181 /* 3182 * NOTE! This will make us return with VM_FAULT_RETRY, but with 3183 * the fault lock still held. That's how FAULT_FLAG_RETRY_NOWAIT 3184 * is supposed to work. We have way too many special cases.. 3185 */ 3186 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) 3187 return 0; 3188 3189 *fpin = maybe_unlock_mmap_for_io(vmf, *fpin); 3190 if (vmf->flags & FAULT_FLAG_KILLABLE) { 3191 if (__folio_lock_killable(folio)) { 3192 /* 3193 * We didn't have the right flags to drop the 3194 * fault lock, but all fault_handlers only check 3195 * for fatal signals if we return VM_FAULT_RETRY, 3196 * so we need to drop the fault lock here and 3197 * return 0 if we don't have a fpin. 3198 */ 3199 if (*fpin == NULL) 3200 release_fault_lock(vmf); 3201 return 0; 3202 } 3203 } else 3204 __folio_lock(folio); 3205 3206 return 1; 3207 } 3208 3209 /* 3210 * Synchronous readahead happens when we don't even find a page in the page 3211 * cache at all. We don't want to perform IO under the mmap sem, so if we have 3212 * to drop the mmap sem we return the file that was pinned in order for us to do 3213 * that. If we didn't pin a file then we return NULL. The file that is 3214 * returned needs to be fput()'ed when we're done with it. 3215 */ 3216 static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) 3217 { 3218 struct file *file = vmf->vma->vm_file; 3219 struct file_ra_state *ra = &file->f_ra; 3220 struct address_space *mapping = file->f_mapping; 3221 DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); 3222 struct file *fpin = NULL; 3223 vm_flags_t vm_flags = vmf->vma->vm_flags; 3224 unsigned short mmap_miss; 3225 3226 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3227 /* Use the readahead code, even if readahead is disabled */ 3228 if ((vm_flags & VM_HUGEPAGE) && HPAGE_PMD_ORDER <= MAX_PAGECACHE_ORDER) { 3229 fpin = maybe_unlock_mmap_for_io(vmf, fpin); 3230 ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1); 3231 ra->size = HPAGE_PMD_NR; 3232 /* 3233 * Fetch two PMD folios, so we get the chance to actually 3234 * readahead, unless we've been told not to. 3235 */ 3236 if (!(vm_flags & VM_RAND_READ)) 3237 ra->size *= 2; 3238 ra->async_size = HPAGE_PMD_NR; 3239 ra->order = HPAGE_PMD_ORDER; 3240 page_cache_ra_order(&ractl, ra); 3241 return fpin; 3242 } 3243 #endif 3244 3245 /* 3246 * If we don't want any read-ahead, don't bother. VM_EXEC case below is 3247 * already intended for random access. 3248 */ 3249 if ((vm_flags & (VM_RAND_READ | VM_EXEC)) == VM_RAND_READ) 3250 return fpin; 3251 if (!ra->ra_pages) 3252 return fpin; 3253 3254 if (vm_flags & VM_SEQ_READ) { 3255 fpin = maybe_unlock_mmap_for_io(vmf, fpin); 3256 page_cache_sync_ra(&ractl, ra->ra_pages); 3257 return fpin; 3258 } 3259 3260 /* Avoid banging the cache line if not needed */ 3261 mmap_miss = READ_ONCE(ra->mmap_miss); 3262 if (mmap_miss < MMAP_LOTSAMISS * 10) 3263 WRITE_ONCE(ra->mmap_miss, ++mmap_miss); 3264 3265 /* 3266 * Do we miss much more than hit in this file? If so, 3267 * stop bothering with read-ahead. It will only hurt. 3268 */ 3269 if (mmap_miss > MMAP_LOTSAMISS) 3270 return fpin; 3271 3272 if (vm_flags & VM_EXEC) { 3273 /* 3274 * Allow arch to request a preferred minimum folio order for 3275 * executable memory. This can often be beneficial to 3276 * performance if (e.g.) arm64 can contpte-map the folio. 3277 * Executable memory rarely benefits from readahead, due to its 3278 * random access nature, so set async_size to 0. 3279 * 3280 * Limit to the boundaries of the VMA to avoid reading in any 3281 * pad that might exist between sections, which would be a waste 3282 * of memory. 3283 */ 3284 struct vm_area_struct *vma = vmf->vma; 3285 unsigned long start = vma->vm_pgoff; 3286 unsigned long end = start + vma_pages(vma); 3287 unsigned long ra_end; 3288 3289 ra->order = exec_folio_order(); 3290 ra->start = round_down(vmf->pgoff, 1UL << ra->order); 3291 ra->start = max(ra->start, start); 3292 ra_end = round_up(ra->start + ra->ra_pages, 1UL << ra->order); 3293 ra_end = min(ra_end, end); 3294 ra->size = ra_end - ra->start; 3295 ra->async_size = 0; 3296 } else { 3297 /* 3298 * mmap read-around 3299 */ 3300 ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2); 3301 ra->size = ra->ra_pages; 3302 ra->async_size = ra->ra_pages / 4; 3303 ra->order = 0; 3304 } 3305 3306 fpin = maybe_unlock_mmap_for_io(vmf, fpin); 3307 ractl._index = ra->start; 3308 page_cache_ra_order(&ractl, ra); 3309 return fpin; 3310 } 3311 3312 /* 3313 * Asynchronous readahead happens when we find the page and PG_readahead, 3314 * so we want to possibly extend the readahead further. We return the file that 3315 * was pinned if we have to drop the mmap_lock in order to do IO. 3316 */ 3317 static struct file *do_async_mmap_readahead(struct vm_fault *vmf, 3318 struct folio *folio) 3319 { 3320 struct file *file = vmf->vma->vm_file; 3321 struct file_ra_state *ra = &file->f_ra; 3322 DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, vmf->pgoff); 3323 struct file *fpin = NULL; 3324 unsigned short mmap_miss; 3325 3326 /* If we don't want any read-ahead, don't bother */ 3327 if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages) 3328 return fpin; 3329 3330 /* 3331 * If the folio is locked, we're likely racing against another fault. 3332 * Don't touch the mmap_miss counter to avoid decreasing it multiple 3333 * times for a single folio and break the balance with mmap_miss 3334 * increase in do_sync_mmap_readahead(). 3335 */ 3336 if (likely(!folio_test_locked(folio))) { 3337 mmap_miss = READ_ONCE(ra->mmap_miss); 3338 if (mmap_miss) 3339 WRITE_ONCE(ra->mmap_miss, --mmap_miss); 3340 } 3341 3342 if (folio_test_readahead(folio)) { 3343 fpin = maybe_unlock_mmap_for_io(vmf, fpin); 3344 page_cache_async_ra(&ractl, folio, ra->ra_pages); 3345 } 3346 return fpin; 3347 } 3348 3349 static vm_fault_t filemap_fault_recheck_pte_none(struct vm_fault *vmf) 3350 { 3351 struct vm_area_struct *vma = vmf->vma; 3352 vm_fault_t ret = 0; 3353 pte_t *ptep; 3354 3355 /* 3356 * We might have COW'ed a pagecache folio and might now have an mlocked 3357 * anon folio mapped. The original pagecache folio is not mlocked and 3358 * might have been evicted. During a read+clear/modify/write update of 3359 * the PTE, such as done in do_numa_page()/change_pte_range(), we 3360 * temporarily clear the PTE under PT lock and might detect it here as 3361 * "none" when not holding the PT lock. 3362 * 3363 * Not rechecking the PTE under PT lock could result in an unexpected 3364 * major fault in an mlock'ed region. Recheck only for this special 3365 * scenario while holding the PT lock, to not degrade non-mlocked 3366 * scenarios. Recheck the PTE without PT lock firstly, thereby reducing 3367 * the number of times we hold PT lock. 3368 */ 3369 if (!(vma->vm_flags & VM_LOCKED)) 3370 return 0; 3371 3372 if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)) 3373 return 0; 3374 3375 ptep = pte_offset_map_ro_nolock(vma->vm_mm, vmf->pmd, vmf->address, 3376 &vmf->ptl); 3377 if (unlikely(!ptep)) 3378 return VM_FAULT_NOPAGE; 3379 3380 if (unlikely(!pte_none(ptep_get_lockless(ptep)))) { 3381 ret = VM_FAULT_NOPAGE; 3382 } else { 3383 spin_lock(vmf->ptl); 3384 if (unlikely(!pte_none(ptep_get(ptep)))) 3385 ret = VM_FAULT_NOPAGE; 3386 spin_unlock(vmf->ptl); 3387 } 3388 pte_unmap(ptep); 3389 return ret; 3390 } 3391 3392 /** 3393 * filemap_fault - read in file data for page fault handling 3394 * @vmf: struct vm_fault containing details of the fault 3395 * 3396 * filemap_fault() is invoked via the vma operations vector for a 3397 * mapped memory region to read in file data during a page fault. 3398 * 3399 * The goto's are kind of ugly, but this streamlines the normal case of having 3400 * it in the page cache, and handles the special cases reasonably without 3401 * having a lot of duplicated code. 3402 * 3403 * vma->vm_mm->mmap_lock must be held on entry. 3404 * 3405 * If our return value has VM_FAULT_RETRY set, it's because the mmap_lock 3406 * may be dropped before doing I/O or by lock_folio_maybe_drop_mmap(). 3407 * 3408 * If our return value does not have VM_FAULT_RETRY set, the mmap_lock 3409 * has not been released. 3410 * 3411 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set. 3412 * 3413 * Return: bitwise-OR of %VM_FAULT_ codes. 3414 */ 3415 vm_fault_t filemap_fault(struct vm_fault *vmf) 3416 { 3417 int error; 3418 struct file *file = vmf->vma->vm_file; 3419 struct file *fpin = NULL; 3420 struct address_space *mapping = file->f_mapping; 3421 struct inode *inode = mapping->host; 3422 pgoff_t max_idx, index = vmf->pgoff; 3423 struct folio *folio; 3424 vm_fault_t ret = 0; 3425 bool mapping_locked = false; 3426 3427 max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 3428 if (unlikely(index >= max_idx)) 3429 return VM_FAULT_SIGBUS; 3430 3431 trace_mm_filemap_fault(mapping, index); 3432 3433 /* 3434 * Do we have something in the page cache already? 3435 */ 3436 folio = filemap_get_folio(mapping, index); 3437 if (likely(!IS_ERR(folio))) { 3438 /* 3439 * We found the page, so try async readahead before waiting for 3440 * the lock. 3441 */ 3442 if (!(vmf->flags & FAULT_FLAG_TRIED)) 3443 fpin = do_async_mmap_readahead(vmf, folio); 3444 if (unlikely(!folio_test_uptodate(folio))) { 3445 filemap_invalidate_lock_shared(mapping); 3446 mapping_locked = true; 3447 } 3448 } else { 3449 ret = filemap_fault_recheck_pte_none(vmf); 3450 if (unlikely(ret)) 3451 return ret; 3452 3453 /* No page in the page cache at all */ 3454 count_vm_event(PGMAJFAULT); 3455 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); 3456 ret = VM_FAULT_MAJOR; 3457 fpin = do_sync_mmap_readahead(vmf); 3458 retry_find: 3459 /* 3460 * See comment in filemap_create_folio() why we need 3461 * invalidate_lock 3462 */ 3463 if (!mapping_locked) { 3464 filemap_invalidate_lock_shared(mapping); 3465 mapping_locked = true; 3466 } 3467 folio = __filemap_get_folio(mapping, index, 3468 FGP_CREAT|FGP_FOR_MMAP, 3469 vmf->gfp_mask); 3470 if (IS_ERR(folio)) { 3471 if (fpin) 3472 goto out_retry; 3473 filemap_invalidate_unlock_shared(mapping); 3474 return VM_FAULT_OOM; 3475 } 3476 } 3477 3478 if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin)) 3479 goto out_retry; 3480 3481 /* Did it get truncated? */ 3482 if (unlikely(folio->mapping != mapping)) { 3483 folio_unlock(folio); 3484 folio_put(folio); 3485 goto retry_find; 3486 } 3487 VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio); 3488 3489 /* 3490 * We have a locked folio in the page cache, now we need to check 3491 * that it's up-to-date. If not, it is going to be due to an error, 3492 * or because readahead was otherwise unable to retrieve it. 3493 */ 3494 if (unlikely(!folio_test_uptodate(folio))) { 3495 /* 3496 * If the invalidate lock is not held, the folio was in cache 3497 * and uptodate and now it is not. Strange but possible since we 3498 * didn't hold the page lock all the time. Let's drop 3499 * everything, get the invalidate lock and try again. 3500 */ 3501 if (!mapping_locked) { 3502 folio_unlock(folio); 3503 folio_put(folio); 3504 goto retry_find; 3505 } 3506 3507 /* 3508 * OK, the folio is really not uptodate. This can be because the 3509 * VMA has the VM_RAND_READ flag set, or because an error 3510 * arose. Let's read it in directly. 3511 */ 3512 goto page_not_uptodate; 3513 } 3514 3515 /* 3516 * We've made it this far and we had to drop our mmap_lock, now is the 3517 * time to return to the upper layer and have it re-find the vma and 3518 * redo the fault. 3519 */ 3520 if (fpin) { 3521 folio_unlock(folio); 3522 goto out_retry; 3523 } 3524 if (mapping_locked) 3525 filemap_invalidate_unlock_shared(mapping); 3526 3527 /* 3528 * Found the page and have a reference on it. 3529 * We must recheck i_size under page lock. 3530 */ 3531 max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 3532 if (unlikely(index >= max_idx)) { 3533 folio_unlock(folio); 3534 folio_put(folio); 3535 return VM_FAULT_SIGBUS; 3536 } 3537 3538 vmf->page = folio_file_page(folio, index); 3539 return ret | VM_FAULT_LOCKED; 3540 3541 page_not_uptodate: 3542 /* 3543 * Umm, take care of errors if the page isn't up-to-date. 3544 * Try to re-read it _once_. We do this synchronously, 3545 * because there really aren't any performance issues here 3546 * and we need to check for errors. 3547 */ 3548 fpin = maybe_unlock_mmap_for_io(vmf, fpin); 3549 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); 3550 if (fpin) 3551 goto out_retry; 3552 folio_put(folio); 3553 3554 if (!error || error == AOP_TRUNCATED_PAGE) 3555 goto retry_find; 3556 filemap_invalidate_unlock_shared(mapping); 3557 3558 return VM_FAULT_SIGBUS; 3559 3560 out_retry: 3561 /* 3562 * We dropped the mmap_lock, we need to return to the fault handler to 3563 * re-find the vma and come back and find our hopefully still populated 3564 * page. 3565 */ 3566 if (!IS_ERR(folio)) 3567 folio_put(folio); 3568 if (mapping_locked) 3569 filemap_invalidate_unlock_shared(mapping); 3570 if (fpin) 3571 fput(fpin); 3572 return ret | VM_FAULT_RETRY; 3573 } 3574 EXPORT_SYMBOL(filemap_fault); 3575 3576 static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio, 3577 pgoff_t start) 3578 { 3579 struct mm_struct *mm = vmf->vma->vm_mm; 3580 3581 /* Huge page is mapped? No need to proceed. */ 3582 if (pmd_trans_huge(*vmf->pmd)) { 3583 folio_unlock(folio); 3584 folio_put(folio); 3585 return true; 3586 } 3587 3588 if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) { 3589 struct page *page = folio_file_page(folio, start); 3590 vm_fault_t ret = do_set_pmd(vmf, folio, page); 3591 if (!ret) { 3592 /* The page is mapped successfully, reference consumed. */ 3593 folio_unlock(folio); 3594 return true; 3595 } 3596 } 3597 3598 if (pmd_none(*vmf->pmd) && vmf->prealloc_pte) 3599 pmd_install(mm, vmf->pmd, &vmf->prealloc_pte); 3600 3601 return false; 3602 } 3603 3604 static struct folio *next_uptodate_folio(struct xa_state *xas, 3605 struct address_space *mapping, pgoff_t end_pgoff) 3606 { 3607 struct folio *folio = xas_next_entry(xas, end_pgoff); 3608 unsigned long max_idx; 3609 3610 do { 3611 if (!folio) 3612 return NULL; 3613 if (xas_retry(xas, folio)) 3614 continue; 3615 if (xa_is_value(folio)) 3616 continue; 3617 if (!folio_try_get(folio)) 3618 continue; 3619 if (folio_test_locked(folio)) 3620 goto skip; 3621 /* Has the page moved or been split? */ 3622 if (unlikely(folio != xas_reload(xas))) 3623 goto skip; 3624 if (!folio_test_uptodate(folio) || folio_test_readahead(folio)) 3625 goto skip; 3626 if (!folio_trylock(folio)) 3627 goto skip; 3628 if (folio->mapping != mapping) 3629 goto unlock; 3630 if (!folio_test_uptodate(folio)) 3631 goto unlock; 3632 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); 3633 if (xas->xa_index >= max_idx) 3634 goto unlock; 3635 return folio; 3636 unlock: 3637 folio_unlock(folio); 3638 skip: 3639 folio_put(folio); 3640 } while ((folio = xas_next_entry(xas, end_pgoff)) != NULL); 3641 3642 return NULL; 3643 } 3644 3645 /* 3646 * Map page range [start_page, start_page + nr_pages) of folio. 3647 * start_page is gotten from start by folio_page(folio, start) 3648 */ 3649 static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, 3650 struct folio *folio, unsigned long start, 3651 unsigned long addr, unsigned int nr_pages, 3652 unsigned long *rss, unsigned short *mmap_miss) 3653 { 3654 vm_fault_t ret = 0; 3655 struct page *page = folio_page(folio, start); 3656 unsigned int count = 0; 3657 pte_t *old_ptep = vmf->pte; 3658 3659 do { 3660 if (PageHWPoison(page + count)) 3661 goto skip; 3662 3663 /* 3664 * If there are too many folios that are recently evicted 3665 * in a file, they will probably continue to be evicted. 3666 * In such situation, read-ahead is only a waste of IO. 3667 * Don't decrease mmap_miss in this scenario to make sure 3668 * we can stop read-ahead. 3669 */ 3670 if (!folio_test_workingset(folio)) 3671 (*mmap_miss)++; 3672 3673 /* 3674 * NOTE: If there're PTE markers, we'll leave them to be 3675 * handled in the specific fault path, and it'll prohibit the 3676 * fault-around logic. 3677 */ 3678 if (!pte_none(ptep_get(&vmf->pte[count]))) 3679 goto skip; 3680 3681 count++; 3682 continue; 3683 skip: 3684 if (count) { 3685 set_pte_range(vmf, folio, page, count, addr); 3686 *rss += count; 3687 folio_ref_add(folio, count); 3688 if (in_range(vmf->address, addr, count * PAGE_SIZE)) 3689 ret = VM_FAULT_NOPAGE; 3690 } 3691 3692 count++; 3693 page += count; 3694 vmf->pte += count; 3695 addr += count * PAGE_SIZE; 3696 count = 0; 3697 } while (--nr_pages > 0); 3698 3699 if (count) { 3700 set_pte_range(vmf, folio, page, count, addr); 3701 *rss += count; 3702 folio_ref_add(folio, count); 3703 if (in_range(vmf->address, addr, count * PAGE_SIZE)) 3704 ret = VM_FAULT_NOPAGE; 3705 } 3706 3707 vmf->pte = old_ptep; 3708 3709 return ret; 3710 } 3711 3712 static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf, 3713 struct folio *folio, unsigned long addr, 3714 unsigned long *rss, unsigned short *mmap_miss) 3715 { 3716 vm_fault_t ret = 0; 3717 struct page *page = &folio->page; 3718 3719 if (PageHWPoison(page)) 3720 return ret; 3721 3722 /* See comment of filemap_map_folio_range() */ 3723 if (!folio_test_workingset(folio)) 3724 (*mmap_miss)++; 3725 3726 /* 3727 * NOTE: If there're PTE markers, we'll leave them to be 3728 * handled in the specific fault path, and it'll prohibit 3729 * the fault-around logic. 3730 */ 3731 if (!pte_none(ptep_get(vmf->pte))) 3732 return ret; 3733 3734 if (vmf->address == addr) 3735 ret = VM_FAULT_NOPAGE; 3736 3737 set_pte_range(vmf, folio, page, 1, addr); 3738 (*rss)++; 3739 folio_ref_inc(folio); 3740 3741 return ret; 3742 } 3743 3744 vm_fault_t filemap_map_pages(struct vm_fault *vmf, 3745 pgoff_t start_pgoff, pgoff_t end_pgoff) 3746 { 3747 struct vm_area_struct *vma = vmf->vma; 3748 struct file *file = vma->vm_file; 3749 struct address_space *mapping = file->f_mapping; 3750 pgoff_t file_end, last_pgoff = start_pgoff; 3751 unsigned long addr; 3752 XA_STATE(xas, &mapping->i_pages, start_pgoff); 3753 struct folio *folio; 3754 vm_fault_t ret = 0; 3755 unsigned long rss = 0; 3756 unsigned int nr_pages = 0, folio_type; 3757 unsigned short mmap_miss = 0, mmap_miss_saved; 3758 3759 rcu_read_lock(); 3760 folio = next_uptodate_folio(&xas, mapping, end_pgoff); 3761 if (!folio) 3762 goto out; 3763 3764 if (filemap_map_pmd(vmf, folio, start_pgoff)) { 3765 ret = VM_FAULT_NOPAGE; 3766 goto out; 3767 } 3768 3769 addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT); 3770 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); 3771 if (!vmf->pte) { 3772 folio_unlock(folio); 3773 folio_put(folio); 3774 goto out; 3775 } 3776 3777 file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE) - 1; 3778 if (end_pgoff > file_end) 3779 end_pgoff = file_end; 3780 3781 folio_type = mm_counter_file(folio); 3782 do { 3783 unsigned long end; 3784 3785 addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT; 3786 vmf->pte += xas.xa_index - last_pgoff; 3787 last_pgoff = xas.xa_index; 3788 end = folio_next_index(folio) - 1; 3789 nr_pages = min(end, end_pgoff) - xas.xa_index + 1; 3790 3791 if (!folio_test_large(folio)) 3792 ret |= filemap_map_order0_folio(vmf, 3793 folio, addr, &rss, &mmap_miss); 3794 else 3795 ret |= filemap_map_folio_range(vmf, folio, 3796 xas.xa_index - folio->index, addr, 3797 nr_pages, &rss, &mmap_miss); 3798 3799 folio_unlock(folio); 3800 folio_put(folio); 3801 } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL); 3802 add_mm_counter(vma->vm_mm, folio_type, rss); 3803 pte_unmap_unlock(vmf->pte, vmf->ptl); 3804 trace_mm_filemap_map_pages(mapping, start_pgoff, end_pgoff); 3805 out: 3806 rcu_read_unlock(); 3807 3808 mmap_miss_saved = READ_ONCE(file->f_ra.mmap_miss); 3809 if (mmap_miss >= mmap_miss_saved) 3810 WRITE_ONCE(file->f_ra.mmap_miss, 0); 3811 else 3812 WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss_saved - mmap_miss); 3813 3814 return ret; 3815 } 3816 EXPORT_SYMBOL(filemap_map_pages); 3817 3818 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) 3819 { 3820 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 3821 struct folio *folio = page_folio(vmf->page); 3822 vm_fault_t ret = VM_FAULT_LOCKED; 3823 3824 sb_start_pagefault(mapping->host->i_sb); 3825 file_update_time(vmf->vma->vm_file); 3826 folio_lock(folio); 3827 if (folio->mapping != mapping) { 3828 folio_unlock(folio); 3829 ret = VM_FAULT_NOPAGE; 3830 goto out; 3831 } 3832 /* 3833 * We mark the folio dirty already here so that when freeze is in 3834 * progress, we are guaranteed that writeback during freezing will 3835 * see the dirty folio and writeprotect it again. 3836 */ 3837 folio_mark_dirty(folio); 3838 folio_wait_stable(folio); 3839 out: 3840 sb_end_pagefault(mapping->host->i_sb); 3841 return ret; 3842 } 3843 3844 const struct vm_operations_struct generic_file_vm_ops = { 3845 .fault = filemap_fault, 3846 .map_pages = filemap_map_pages, 3847 .page_mkwrite = filemap_page_mkwrite, 3848 }; 3849 3850 /* This is used for a general mmap of a disk file */ 3851 3852 int generic_file_mmap(struct file *file, struct vm_area_struct *vma) 3853 { 3854 struct address_space *mapping = file->f_mapping; 3855 3856 if (!mapping->a_ops->read_folio) 3857 return -ENOEXEC; 3858 file_accessed(file); 3859 vma->vm_ops = &generic_file_vm_ops; 3860 return 0; 3861 } 3862 3863 int generic_file_mmap_prepare(struct vm_area_desc *desc) 3864 { 3865 struct file *file = desc->file; 3866 struct address_space *mapping = file->f_mapping; 3867 3868 if (!mapping->a_ops->read_folio) 3869 return -ENOEXEC; 3870 file_accessed(file); 3871 desc->vm_ops = &generic_file_vm_ops; 3872 return 0; 3873 } 3874 3875 /* 3876 * This is for filesystems which do not implement ->writepage. 3877 */ 3878 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) 3879 { 3880 if (vma_is_shared_maywrite(vma)) 3881 return -EINVAL; 3882 return generic_file_mmap(file, vma); 3883 } 3884 3885 int generic_file_readonly_mmap_prepare(struct vm_area_desc *desc) 3886 { 3887 if (is_shared_maywrite(desc->vm_flags)) 3888 return -EINVAL; 3889 return generic_file_mmap_prepare(desc); 3890 } 3891 #else 3892 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) 3893 { 3894 return VM_FAULT_SIGBUS; 3895 } 3896 int generic_file_mmap(struct file *file, struct vm_area_struct *vma) 3897 { 3898 return -ENOSYS; 3899 } 3900 int generic_file_mmap_prepare(struct vm_area_desc *desc) 3901 { 3902 return -ENOSYS; 3903 } 3904 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) 3905 { 3906 return -ENOSYS; 3907 } 3908 int generic_file_readonly_mmap_prepare(struct vm_area_desc *desc) 3909 { 3910 return -ENOSYS; 3911 } 3912 #endif /* CONFIG_MMU */ 3913 3914 EXPORT_SYMBOL(filemap_page_mkwrite); 3915 EXPORT_SYMBOL(generic_file_mmap); 3916 EXPORT_SYMBOL(generic_file_mmap_prepare); 3917 EXPORT_SYMBOL(generic_file_readonly_mmap); 3918 EXPORT_SYMBOL(generic_file_readonly_mmap_prepare); 3919 3920 static struct folio *do_read_cache_folio(struct address_space *mapping, 3921 pgoff_t index, filler_t filler, struct file *file, gfp_t gfp) 3922 { 3923 struct folio *folio; 3924 int err; 3925 3926 if (!filler) 3927 filler = mapping->a_ops->read_folio; 3928 repeat: 3929 folio = filemap_get_folio(mapping, index); 3930 if (IS_ERR(folio)) { 3931 folio = filemap_alloc_folio(gfp, 3932 mapping_min_folio_order(mapping)); 3933 if (!folio) 3934 return ERR_PTR(-ENOMEM); 3935 index = mapping_align_index(mapping, index); 3936 err = filemap_add_folio(mapping, folio, index, gfp); 3937 if (unlikely(err)) { 3938 folio_put(folio); 3939 if (err == -EEXIST) 3940 goto repeat; 3941 /* Presumably ENOMEM for xarray node */ 3942 return ERR_PTR(err); 3943 } 3944 3945 goto filler; 3946 } 3947 if (folio_test_uptodate(folio)) 3948 goto out; 3949 3950 if (!folio_trylock(folio)) { 3951 folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE); 3952 goto repeat; 3953 } 3954 3955 /* Folio was truncated from mapping */ 3956 if (!folio->mapping) { 3957 folio_unlock(folio); 3958 folio_put(folio); 3959 goto repeat; 3960 } 3961 3962 /* Someone else locked and filled the page in a very small window */ 3963 if (folio_test_uptodate(folio)) { 3964 folio_unlock(folio); 3965 goto out; 3966 } 3967 3968 filler: 3969 err = filemap_read_folio(file, filler, folio); 3970 if (err) { 3971 folio_put(folio); 3972 if (err == AOP_TRUNCATED_PAGE) 3973 goto repeat; 3974 return ERR_PTR(err); 3975 } 3976 3977 out: 3978 folio_mark_accessed(folio); 3979 return folio; 3980 } 3981 3982 /** 3983 * read_cache_folio - Read into page cache, fill it if needed. 3984 * @mapping: The address_space to read from. 3985 * @index: The index to read. 3986 * @filler: Function to perform the read, or NULL to use aops->read_folio(). 3987 * @file: Passed to filler function, may be NULL if not required. 3988 * 3989 * Read one page into the page cache. If it succeeds, the folio returned 3990 * will contain @index, but it may not be the first page of the folio. 3991 * 3992 * If the filler function returns an error, it will be returned to the 3993 * caller. 3994 * 3995 * Context: May sleep. Expects mapping->invalidate_lock to be held. 3996 * Return: An uptodate folio on success, ERR_PTR() on failure. 3997 */ 3998 struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index, 3999 filler_t filler, struct file *file) 4000 { 4001 return do_read_cache_folio(mapping, index, filler, file, 4002 mapping_gfp_mask(mapping)); 4003 } 4004 EXPORT_SYMBOL(read_cache_folio); 4005 4006 /** 4007 * mapping_read_folio_gfp - Read into page cache, using specified allocation flags. 4008 * @mapping: The address_space for the folio. 4009 * @index: The index that the allocated folio will contain. 4010 * @gfp: The page allocator flags to use if allocating. 4011 * 4012 * This is the same as "read_cache_folio(mapping, index, NULL, NULL)", but with 4013 * any new memory allocations done using the specified allocation flags. 4014 * 4015 * The most likely error from this function is EIO, but ENOMEM is 4016 * possible and so is EINTR. If ->read_folio returns another error, 4017 * that will be returned to the caller. 4018 * 4019 * The function expects mapping->invalidate_lock to be already held. 4020 * 4021 * Return: Uptodate folio on success, ERR_PTR() on failure. 4022 */ 4023 struct folio *mapping_read_folio_gfp(struct address_space *mapping, 4024 pgoff_t index, gfp_t gfp) 4025 { 4026 return do_read_cache_folio(mapping, index, NULL, NULL, gfp); 4027 } 4028 EXPORT_SYMBOL(mapping_read_folio_gfp); 4029 4030 static struct page *do_read_cache_page(struct address_space *mapping, 4031 pgoff_t index, filler_t *filler, struct file *file, gfp_t gfp) 4032 { 4033 struct folio *folio; 4034 4035 folio = do_read_cache_folio(mapping, index, filler, file, gfp); 4036 if (IS_ERR(folio)) 4037 return &folio->page; 4038 return folio_file_page(folio, index); 4039 } 4040 4041 struct page *read_cache_page(struct address_space *mapping, 4042 pgoff_t index, filler_t *filler, struct file *file) 4043 { 4044 return do_read_cache_page(mapping, index, filler, file, 4045 mapping_gfp_mask(mapping)); 4046 } 4047 EXPORT_SYMBOL(read_cache_page); 4048 4049 /** 4050 * read_cache_page_gfp - read into page cache, using specified page allocation flags. 4051 * @mapping: the page's address_space 4052 * @index: the page index 4053 * @gfp: the page allocator flags to use if allocating 4054 * 4055 * This is the same as "read_mapping_page(mapping, index, NULL)", but with 4056 * any new page allocations done using the specified allocation flags. 4057 * 4058 * If the page does not get brought uptodate, return -EIO. 4059 * 4060 * The function expects mapping->invalidate_lock to be already held. 4061 * 4062 * Return: up to date page on success, ERR_PTR() on failure. 4063 */ 4064 struct page *read_cache_page_gfp(struct address_space *mapping, 4065 pgoff_t index, 4066 gfp_t gfp) 4067 { 4068 return do_read_cache_page(mapping, index, NULL, NULL, gfp); 4069 } 4070 EXPORT_SYMBOL(read_cache_page_gfp); 4071 4072 /* 4073 * Warn about a page cache invalidation failure during a direct I/O write. 4074 */ 4075 static void dio_warn_stale_pagecache(struct file *filp) 4076 { 4077 static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST); 4078 char pathname[128]; 4079 char *path; 4080 4081 errseq_set(&filp->f_mapping->wb_err, -EIO); 4082 if (__ratelimit(&_rs)) { 4083 path = file_path(filp, pathname, sizeof(pathname)); 4084 if (IS_ERR(path)) 4085 path = "(unknown)"; 4086 pr_crit("Page cache invalidation failure on direct I/O. Possible data corruption due to collision with buffered I/O!\n"); 4087 pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid, 4088 current->comm); 4089 } 4090 } 4091 4092 void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count) 4093 { 4094 struct address_space *mapping = iocb->ki_filp->f_mapping; 4095 4096 if (mapping->nrpages && 4097 invalidate_inode_pages2_range(mapping, 4098 iocb->ki_pos >> PAGE_SHIFT, 4099 (iocb->ki_pos + count - 1) >> PAGE_SHIFT)) 4100 dio_warn_stale_pagecache(iocb->ki_filp); 4101 } 4102 4103 ssize_t 4104 generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) 4105 { 4106 struct address_space *mapping = iocb->ki_filp->f_mapping; 4107 size_t write_len = iov_iter_count(from); 4108 ssize_t written; 4109 4110 /* 4111 * If a page can not be invalidated, return 0 to fall back 4112 * to buffered write. 4113 */ 4114 written = kiocb_invalidate_pages(iocb, write_len); 4115 if (written) { 4116 if (written == -EBUSY) 4117 return 0; 4118 return written; 4119 } 4120 4121 written = mapping->a_ops->direct_IO(iocb, from); 4122 4123 /* 4124 * Finally, try again to invalidate clean pages which might have been 4125 * cached by non-direct readahead, or faulted in by get_user_pages() 4126 * if the source of the write was an mmap'ed region of the file 4127 * we're writing. Either one is a pretty crazy thing to do, 4128 * so we don't support it 100%. If this invalidation 4129 * fails, tough, the write still worked... 4130 * 4131 * Most of the time we do not need this since dio_complete() will do 4132 * the invalidation for us. However there are some file systems that 4133 * do not end up with dio_complete() being called, so let's not break 4134 * them by removing it completely. 4135 * 4136 * Noticeable example is a blkdev_direct_IO(). 4137 * 4138 * Skip invalidation for async writes or if mapping has no pages. 4139 */ 4140 if (written > 0) { 4141 struct inode *inode = mapping->host; 4142 loff_t pos = iocb->ki_pos; 4143 4144 kiocb_invalidate_post_direct_write(iocb, written); 4145 pos += written; 4146 write_len -= written; 4147 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { 4148 i_size_write(inode, pos); 4149 mark_inode_dirty(inode); 4150 } 4151 iocb->ki_pos = pos; 4152 } 4153 if (written != -EIOCBQUEUED) 4154 iov_iter_revert(from, write_len - iov_iter_count(from)); 4155 return written; 4156 } 4157 EXPORT_SYMBOL(generic_file_direct_write); 4158 4159 ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i) 4160 { 4161 struct file *file = iocb->ki_filp; 4162 loff_t pos = iocb->ki_pos; 4163 struct address_space *mapping = file->f_mapping; 4164 const struct address_space_operations *a_ops = mapping->a_ops; 4165 size_t chunk = mapping_max_folio_size(mapping); 4166 long status = 0; 4167 ssize_t written = 0; 4168 4169 do { 4170 struct folio *folio; 4171 size_t offset; /* Offset into folio */ 4172 size_t bytes; /* Bytes to write to folio */ 4173 size_t copied; /* Bytes copied from user */ 4174 void *fsdata = NULL; 4175 4176 bytes = iov_iter_count(i); 4177 retry: 4178 offset = pos & (chunk - 1); 4179 bytes = min(chunk - offset, bytes); 4180 balance_dirty_pages_ratelimited(mapping); 4181 4182 if (fatal_signal_pending(current)) { 4183 status = -EINTR; 4184 break; 4185 } 4186 4187 status = a_ops->write_begin(iocb, mapping, pos, bytes, 4188 &folio, &fsdata); 4189 if (unlikely(status < 0)) 4190 break; 4191 4192 offset = offset_in_folio(folio, pos); 4193 if (bytes > folio_size(folio) - offset) 4194 bytes = folio_size(folio) - offset; 4195 4196 if (mapping_writably_mapped(mapping)) 4197 flush_dcache_folio(folio); 4198 4199 /* 4200 * Faults here on mmap()s can recurse into arbitrary 4201 * filesystem code. Lots of locks are held that can 4202 * deadlock. Use an atomic copy to avoid deadlocking 4203 * in page fault handling. 4204 */ 4205 copied = copy_folio_from_iter_atomic(folio, offset, bytes, i); 4206 flush_dcache_folio(folio); 4207 4208 status = a_ops->write_end(iocb, mapping, pos, bytes, copied, 4209 folio, fsdata); 4210 if (unlikely(status != copied)) { 4211 iov_iter_revert(i, copied - max(status, 0L)); 4212 if (unlikely(status < 0)) 4213 break; 4214 } 4215 cond_resched(); 4216 4217 if (unlikely(status == 0)) { 4218 /* 4219 * A short copy made ->write_end() reject the 4220 * thing entirely. Might be memory poisoning 4221 * halfway through, might be a race with munmap, 4222 * might be severe memory pressure. 4223 */ 4224 if (chunk > PAGE_SIZE) 4225 chunk /= 2; 4226 if (copied) { 4227 bytes = copied; 4228 goto retry; 4229 } 4230 4231 /* 4232 * 'folio' is now unlocked and faults on it can be 4233 * handled. Ensure forward progress by trying to 4234 * fault it in now. 4235 */ 4236 if (fault_in_iov_iter_readable(i, bytes) == bytes) { 4237 status = -EFAULT; 4238 break; 4239 } 4240 } else { 4241 pos += status; 4242 written += status; 4243 } 4244 } while (iov_iter_count(i)); 4245 4246 if (!written) 4247 return status; 4248 iocb->ki_pos += written; 4249 return written; 4250 } 4251 EXPORT_SYMBOL(generic_perform_write); 4252 4253 /** 4254 * __generic_file_write_iter - write data to a file 4255 * @iocb: IO state structure (file, offset, etc.) 4256 * @from: iov_iter with data to write 4257 * 4258 * This function does all the work needed for actually writing data to a 4259 * file. It does all basic checks, removes SUID from the file, updates 4260 * modification times and calls proper subroutines depending on whether we 4261 * do direct IO or a standard buffered write. 4262 * 4263 * It expects i_rwsem to be grabbed unless we work on a block device or similar 4264 * object which does not need locking at all. 4265 * 4266 * This function does *not* take care of syncing data in case of O_SYNC write. 4267 * A caller has to handle it. This is mainly due to the fact that we want to 4268 * avoid syncing under i_rwsem. 4269 * 4270 * Return: 4271 * * number of bytes written, even for truncated writes 4272 * * negative error code if no data has been written at all 4273 */ 4274 ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 4275 { 4276 struct file *file = iocb->ki_filp; 4277 struct address_space *mapping = file->f_mapping; 4278 struct inode *inode = mapping->host; 4279 ssize_t ret; 4280 4281 ret = file_remove_privs(file); 4282 if (ret) 4283 return ret; 4284 4285 ret = file_update_time(file); 4286 if (ret) 4287 return ret; 4288 4289 if (iocb->ki_flags & IOCB_DIRECT) { 4290 ret = generic_file_direct_write(iocb, from); 4291 /* 4292 * If the write stopped short of completing, fall back to 4293 * buffered writes. Some filesystems do this for writes to 4294 * holes, for example. For DAX files, a buffered write will 4295 * not succeed (even if it did, DAX does not handle dirty 4296 * page-cache pages correctly). 4297 */ 4298 if (ret < 0 || !iov_iter_count(from) || IS_DAX(inode)) 4299 return ret; 4300 return direct_write_fallback(iocb, from, ret, 4301 generic_perform_write(iocb, from)); 4302 } 4303 4304 return generic_perform_write(iocb, from); 4305 } 4306 EXPORT_SYMBOL(__generic_file_write_iter); 4307 4308 /** 4309 * generic_file_write_iter - write data to a file 4310 * @iocb: IO state structure 4311 * @from: iov_iter with data to write 4312 * 4313 * This is a wrapper around __generic_file_write_iter() to be used by most 4314 * filesystems. It takes care of syncing the file in case of O_SYNC file 4315 * and acquires i_rwsem as needed. 4316 * Return: 4317 * * negative error code if no data has been written at all of 4318 * vfs_fsync_range() failed for a synchronous write 4319 * * number of bytes written, even for truncated writes 4320 */ 4321 ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 4322 { 4323 struct file *file = iocb->ki_filp; 4324 struct inode *inode = file->f_mapping->host; 4325 ssize_t ret; 4326 4327 inode_lock(inode); 4328 ret = generic_write_checks(iocb, from); 4329 if (ret > 0) 4330 ret = __generic_file_write_iter(iocb, from); 4331 inode_unlock(inode); 4332 4333 if (ret > 0) 4334 ret = generic_write_sync(iocb, ret); 4335 return ret; 4336 } 4337 EXPORT_SYMBOL(generic_file_write_iter); 4338 4339 /** 4340 * filemap_release_folio() - Release fs-specific metadata on a folio. 4341 * @folio: The folio which the kernel is trying to free. 4342 * @gfp: Memory allocation flags (and I/O mode). 4343 * 4344 * The address_space is trying to release any data attached to a folio 4345 * (presumably at folio->private). 4346 * 4347 * This will also be called if the private_2 flag is set on a page, 4348 * indicating that the folio has other metadata associated with it. 4349 * 4350 * The @gfp argument specifies whether I/O may be performed to release 4351 * this page (__GFP_IO), and whether the call may block 4352 * (__GFP_RECLAIM & __GFP_FS). 4353 * 4354 * Return: %true if the release was successful, otherwise %false. 4355 */ 4356 bool filemap_release_folio(struct folio *folio, gfp_t gfp) 4357 { 4358 struct address_space * const mapping = folio->mapping; 4359 4360 BUG_ON(!folio_test_locked(folio)); 4361 if (!folio_needs_release(folio)) 4362 return true; 4363 if (folio_test_writeback(folio)) 4364 return false; 4365 4366 if (mapping && mapping->a_ops->release_folio) 4367 return mapping->a_ops->release_folio(folio, gfp); 4368 return try_to_free_buffers(folio); 4369 } 4370 EXPORT_SYMBOL(filemap_release_folio); 4371 4372 /** 4373 * filemap_invalidate_inode - Invalidate/forcibly write back a range of an inode's pagecache 4374 * @inode: The inode to flush 4375 * @flush: Set to write back rather than simply invalidate. 4376 * @start: First byte to in range. 4377 * @end: Last byte in range (inclusive), or LLONG_MAX for everything from start 4378 * onwards. 4379 * 4380 * Invalidate all the folios on an inode that contribute to the specified 4381 * range, possibly writing them back first. Whilst the operation is 4382 * undertaken, the invalidate lock is held to prevent new folios from being 4383 * installed. 4384 */ 4385 int filemap_invalidate_inode(struct inode *inode, bool flush, 4386 loff_t start, loff_t end) 4387 { 4388 struct address_space *mapping = inode->i_mapping; 4389 pgoff_t first = start >> PAGE_SHIFT; 4390 pgoff_t last = end >> PAGE_SHIFT; 4391 pgoff_t nr = end == LLONG_MAX ? ULONG_MAX : last - first + 1; 4392 4393 if (!mapping || !mapping->nrpages || end < start) 4394 goto out; 4395 4396 /* Prevent new folios from being added to the inode. */ 4397 filemap_invalidate_lock(mapping); 4398 4399 if (!mapping->nrpages) 4400 goto unlock; 4401 4402 unmap_mapping_pages(mapping, first, nr, false); 4403 4404 /* Write back the data if we're asked to. */ 4405 if (flush) { 4406 struct writeback_control wbc = { 4407 .sync_mode = WB_SYNC_ALL, 4408 .nr_to_write = LONG_MAX, 4409 .range_start = start, 4410 .range_end = end, 4411 }; 4412 4413 filemap_fdatawrite_wbc(mapping, &wbc); 4414 } 4415 4416 /* Wait for writeback to complete on all folios and discard. */ 4417 invalidate_inode_pages2_range(mapping, start / PAGE_SIZE, end / PAGE_SIZE); 4418 4419 unlock: 4420 filemap_invalidate_unlock(mapping); 4421 out: 4422 return filemap_check_errors(mapping); 4423 } 4424 EXPORT_SYMBOL_GPL(filemap_invalidate_inode); 4425 4426 #ifdef CONFIG_CACHESTAT_SYSCALL 4427 /** 4428 * filemap_cachestat() - compute the page cache statistics of a mapping 4429 * @mapping: The mapping to compute the statistics for. 4430 * @first_index: The starting page cache index. 4431 * @last_index: The final page index (inclusive). 4432 * @cs: the cachestat struct to write the result to. 4433 * 4434 * This will query the page cache statistics of a mapping in the 4435 * page range of [first_index, last_index] (inclusive). The statistics 4436 * queried include: number of dirty pages, number of pages marked for 4437 * writeback, and the number of (recently) evicted pages. 4438 */ 4439 static void filemap_cachestat(struct address_space *mapping, 4440 pgoff_t first_index, pgoff_t last_index, struct cachestat *cs) 4441 { 4442 XA_STATE(xas, &mapping->i_pages, first_index); 4443 struct folio *folio; 4444 4445 /* Flush stats (and potentially sleep) outside the RCU read section. */ 4446 mem_cgroup_flush_stats_ratelimited(NULL); 4447 4448 rcu_read_lock(); 4449 xas_for_each(&xas, folio, last_index) { 4450 int order; 4451 unsigned long nr_pages; 4452 pgoff_t folio_first_index, folio_last_index; 4453 4454 /* 4455 * Don't deref the folio. It is not pinned, and might 4456 * get freed (and reused) underneath us. 4457 * 4458 * We *could* pin it, but that would be expensive for 4459 * what should be a fast and lightweight syscall. 4460 * 4461 * Instead, derive all information of interest from 4462 * the rcu-protected xarray. 4463 */ 4464 4465 if (xas_retry(&xas, folio)) 4466 continue; 4467 4468 order = xas_get_order(&xas); 4469 nr_pages = 1 << order; 4470 folio_first_index = round_down(xas.xa_index, 1 << order); 4471 folio_last_index = folio_first_index + nr_pages - 1; 4472 4473 /* Folios might straddle the range boundaries, only count covered pages */ 4474 if (folio_first_index < first_index) 4475 nr_pages -= first_index - folio_first_index; 4476 4477 if (folio_last_index > last_index) 4478 nr_pages -= folio_last_index - last_index; 4479 4480 if (xa_is_value(folio)) { 4481 /* page is evicted */ 4482 void *shadow = (void *)folio; 4483 bool workingset; /* not used */ 4484 4485 cs->nr_evicted += nr_pages; 4486 4487 #ifdef CONFIG_SWAP /* implies CONFIG_MMU */ 4488 if (shmem_mapping(mapping)) { 4489 /* shmem file - in swap cache */ 4490 swp_entry_t swp = radix_to_swp_entry(folio); 4491 4492 /* swapin error results in poisoned entry */ 4493 if (non_swap_entry(swp)) 4494 goto resched; 4495 4496 /* 4497 * Getting a swap entry from the shmem 4498 * inode means we beat 4499 * shmem_unuse(). rcu_read_lock() 4500 * ensures swapoff waits for us before 4501 * freeing the swapper space. However, 4502 * we can race with swapping and 4503 * invalidation, so there might not be 4504 * a shadow in the swapcache (yet). 4505 */ 4506 shadow = get_shadow_from_swap_cache(swp); 4507 if (!shadow) 4508 goto resched; 4509 } 4510 #endif 4511 if (workingset_test_recent(shadow, true, &workingset, false)) 4512 cs->nr_recently_evicted += nr_pages; 4513 4514 goto resched; 4515 } 4516 4517 /* page is in cache */ 4518 cs->nr_cache += nr_pages; 4519 4520 if (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY)) 4521 cs->nr_dirty += nr_pages; 4522 4523 if (xas_get_mark(&xas, PAGECACHE_TAG_WRITEBACK)) 4524 cs->nr_writeback += nr_pages; 4525 4526 resched: 4527 if (need_resched()) { 4528 xas_pause(&xas); 4529 cond_resched_rcu(); 4530 } 4531 } 4532 rcu_read_unlock(); 4533 } 4534 4535 /* 4536 * See mincore: reveal pagecache information only for files 4537 * that the calling process has write access to, or could (if 4538 * tried) open for writing. 4539 */ 4540 static inline bool can_do_cachestat(struct file *f) 4541 { 4542 if (f->f_mode & FMODE_WRITE) 4543 return true; 4544 if (inode_owner_or_capable(file_mnt_idmap(f), file_inode(f))) 4545 return true; 4546 return file_permission(f, MAY_WRITE) == 0; 4547 } 4548 4549 /* 4550 * The cachestat(2) system call. 4551 * 4552 * cachestat() returns the page cache statistics of a file in the 4553 * bytes range specified by `off` and `len`: number of cached pages, 4554 * number of dirty pages, number of pages marked for writeback, 4555 * number of evicted pages, and number of recently evicted pages. 4556 * 4557 * An evicted page is a page that is previously in the page cache 4558 * but has been evicted since. A page is recently evicted if its last 4559 * eviction was recent enough that its reentry to the cache would 4560 * indicate that it is actively being used by the system, and that 4561 * there is memory pressure on the system. 4562 * 4563 * `off` and `len` must be non-negative integers. If `len` > 0, 4564 * the queried range is [`off`, `off` + `len`]. If `len` == 0, 4565 * we will query in the range from `off` to the end of the file. 4566 * 4567 * The `flags` argument is unused for now, but is included for future 4568 * extensibility. User should pass 0 (i.e no flag specified). 4569 * 4570 * Currently, hugetlbfs is not supported. 4571 * 4572 * Because the status of a page can change after cachestat() checks it 4573 * but before it returns to the application, the returned values may 4574 * contain stale information. 4575 * 4576 * return values: 4577 * zero - success 4578 * -EFAULT - cstat or cstat_range points to an illegal address 4579 * -EINVAL - invalid flags 4580 * -EBADF - invalid file descriptor 4581 * -EOPNOTSUPP - file descriptor is of a hugetlbfs file 4582 */ 4583 SYSCALL_DEFINE4(cachestat, unsigned int, fd, 4584 struct cachestat_range __user *, cstat_range, 4585 struct cachestat __user *, cstat, unsigned int, flags) 4586 { 4587 CLASS(fd, f)(fd); 4588 struct address_space *mapping; 4589 struct cachestat_range csr; 4590 struct cachestat cs; 4591 pgoff_t first_index, last_index; 4592 4593 if (fd_empty(f)) 4594 return -EBADF; 4595 4596 if (copy_from_user(&csr, cstat_range, 4597 sizeof(struct cachestat_range))) 4598 return -EFAULT; 4599 4600 /* hugetlbfs is not supported */ 4601 if (is_file_hugepages(fd_file(f))) 4602 return -EOPNOTSUPP; 4603 4604 if (!can_do_cachestat(fd_file(f))) 4605 return -EPERM; 4606 4607 if (flags != 0) 4608 return -EINVAL; 4609 4610 first_index = csr.off >> PAGE_SHIFT; 4611 last_index = 4612 csr.len == 0 ? ULONG_MAX : (csr.off + csr.len - 1) >> PAGE_SHIFT; 4613 memset(&cs, 0, sizeof(struct cachestat)); 4614 mapping = fd_file(f)->f_mapping; 4615 filemap_cachestat(mapping, first_index, last_index, &cs); 4616 4617 if (copy_to_user(cstat, &cs, sizeof(struct cachestat))) 4618 return -EFAULT; 4619 4620 return 0; 4621 } 4622 #endif /* CONFIG_CACHESTAT_SYSCALL */ 4623