Lines Matching +full:wait +full:- +full:retry +full:- +full:us

1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1994-1999 Linus Torvalds
30 #include <linux/error-injection.h>
33 #include <linux/backing-dev.h>
72 * finished 'unifying' the page and buffer cache and SMP-threaded the
73 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
75 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
81 * ->i_mmap_rwsem (truncate_pagecache)
82 * ->private_lock (__free_pte->block_dirty_folio)
83 * ->swap_lock (exclusive_swap_page, others)
84 * ->i_pages lock
86 * ->i_rwsem
87 * ->invalidate_lock (acquired by fs in truncate path)
88 * ->i_mmap_rwsem (truncate->unmap_mapping_range)
90 * ->mmap_lock
91 * ->i_mmap_rwsem
92 * ->page_table_lock or pte_lock (various, mainly in memory.c)
93 * ->i_pages lock (arch-dependent flush_dcache_mmap_lock)
95 * ->mmap_lock
96 * ->invalidate_lock (filemap_fault)
97 * ->lock_page (filemap_fault, access_process_vm)
99 * ->i_rwsem (generic_perform_write)
100 * ->mmap_lock (fault_in_readable->do_page_fault)
102 * bdi->wb.list_lock
103 * sb_lock (fs/fs-writeback.c)
104 * ->i_pages lock (__sync_single_inode)
106 * ->i_mmap_rwsem
107 * ->anon_vma.lock (vma_merge)
109 * ->anon_vma.lock
110 * ->page_table_lock or pte_lock (anon_vma_prepare and various)
112 * ->page_table_lock or pte_lock
113 * ->swap_lock (try_to_unmap_one)
114 * ->private_lock (try_to_unmap_one)
115 * ->i_pages lock (try_to_unmap_one)
116 * ->lruvec->lru_lock (follow_page_mask->mark_page_accessed)
117 * ->lruvec->lru_lock (check_pte_range->folio_isolate_lru)
118 * ->private_lock (folio_remove_rmap_pte->set_page_dirty)
119 * ->i_pages lock (folio_remove_rmap_pte->set_page_dirty)
120 * bdi.wb->list_lock (folio_remove_rmap_pte->set_page_dirty)
121 * ->inode->i_lock (folio_remove_rmap_pte->set_page_dirty)
122 * ->memcg->move_lock (folio_remove_rmap_pte->folio_memcg_lock)
123 * bdi.wb->list_lock (zap_pte_range->set_page_dirty)
124 * ->inode->i_lock (zap_pte_range->set_page_dirty)
125 * ->private_lock (zap_pte_range->block_dirty_folio)
140 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete()
145 xas_set_order(&xas, folio->index, folio_order(folio)); in page_cache_delete()
153 folio->mapping = NULL; in page_cache_delete()
154 /* Leave page->index set: truncation lookup relies upon it */ in page_cache_delete()
155 mapping->nrpages -= nr; in page_cache_delete()
166 current->comm, folio_pfn(folio)); in filemap_unaccount_folio()
167 dump_page(&folio->page, "still mapped when deleted"); in filemap_unaccount_folio()
181 atomic_set(&folio->_mapcount, -1); in filemap_unaccount_folio()
193 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); in filemap_unaccount_folio()
195 __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); in filemap_unaccount_folio()
197 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr); in filemap_unaccount_folio()
199 __lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr); in filemap_unaccount_folio()
206 * unwritten data - on ordinary filesystems. in filemap_unaccount_folio()
208 * But it's harmless on in-memory filesystems like tmpfs; and can in filemap_unaccount_folio()
219 folio_account_cleaned(folio, inode_to_wb(mapping->host)); in filemap_unaccount_folio()
224 * sure the page is locked and that nobody else uses it - or that usage
229 struct address_space *mapping = folio->mapping; in __filemap_remove_folio()
241 free_folio = mapping->a_ops->free_folio; in filemap_free_folio()
251 * filemap_remove_folio - Remove folio from page cache.
260 struct address_space *mapping = folio->mapping; in filemap_remove_folio()
263 spin_lock(&mapping->host->i_lock); in filemap_remove_folio()
264 xa_lock_irq(&mapping->i_pages); in filemap_remove_folio()
266 xa_unlock_irq(&mapping->i_pages); in filemap_remove_folio()
268 inode_add_lru(mapping->host); in filemap_remove_folio()
269 spin_unlock(&mapping->host->i_lock); in filemap_remove_folio()
275 * page_cache_delete_batch - delete several folios from page cache
279 * The function walks over mapping->i_pages and removes folios passed in
290 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); in page_cache_delete_batch()
310 if (folio != fbatch->folios[i]) { in page_cache_delete_batch()
311 VM_BUG_ON_FOLIO(folio->index > in page_cache_delete_batch()
312 fbatch->folios[i]->index, folio); in page_cache_delete_batch()
318 folio->mapping = NULL; in page_cache_delete_batch()
319 /* Leave folio->index set: truncation lookup relies on it */ in page_cache_delete_batch()
325 mapping->nrpages -= total_pages; in page_cache_delete_batch()
336 spin_lock(&mapping->host->i_lock); in delete_from_page_cache_batch()
337 xa_lock_irq(&mapping->i_pages); in delete_from_page_cache_batch()
339 struct folio *folio = fbatch->folios[i]; in delete_from_page_cache_batch()
345 xa_unlock_irq(&mapping->i_pages); in delete_from_page_cache_batch()
347 inode_add_lru(mapping->host); in delete_from_page_cache_batch()
348 spin_unlock(&mapping->host->i_lock); in delete_from_page_cache_batch()
351 filemap_free_folio(mapping, fbatch->folios[i]); in delete_from_page_cache_batch()
358 if (test_bit(AS_ENOSPC, &mapping->flags) && in filemap_check_errors()
359 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) in filemap_check_errors()
360 ret = -ENOSPC; in filemap_check_errors()
361 if (test_bit(AS_EIO, &mapping->flags) && in filemap_check_errors()
362 test_and_clear_bit(AS_EIO, &mapping->flags)) in filemap_check_errors()
363 ret = -EIO; in filemap_check_errors()
371 if (test_bit(AS_EIO, &mapping->flags)) in filemap_check_and_keep_errors()
372 return -EIO; in filemap_check_and_keep_errors()
373 if (test_bit(AS_ENOSPC, &mapping->flags)) in filemap_check_and_keep_errors()
374 return -ENOSPC; in filemap_check_and_keep_errors()
379 * filemap_fdatawrite_wbc - start writeback on mapping dirty pages in range
397 wbc_attach_fdatawrite_inode(wbc, mapping->host); in filemap_fdatawrite_wbc()
405 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
454 * filemap_flush - mostly a non-blocking flush
457 * This is a mostly non-blocking flush. Not suitable for data-integrity
458 * purposes - I/O may not be started against all dirty pages.
469 * filemap_range_has_page - check if a page exists in range.
484 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_page()
541 * filemap_fdatawait_range - wait for writeback to complete
542 * @mapping: address space structure to wait for
546 * Walk the list of under-writeback pages of the given address space
547 * in the given range and wait for all of them. Check error status of
565 * filemap_fdatawait_range_keep_errors - wait for writeback to complete
566 * @mapping: address space structure to wait for
570 * Walk the list of under-writeback pages of the given address space in the
571 * given range and wait for all of them. Unlike filemap_fdatawait_range(),
575 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
587 * file_fdatawait_range - wait for writeback to complete
588 * @file: file pointing to address space structure to wait for
592 * Walk the list of under-writeback pages of the address space that file
593 * refers to, in the given range and wait for all of them. Check error
594 * status of the address space vs. the file->f_wb_err cursor and return it.
600 * Return: error status of the address space vs. the file->f_wb_err cursor.
604 struct address_space *mapping = file->f_mapping; in file_fdatawait_range()
612 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors
613 * @mapping: address space structure to wait for
615 * Walk the list of under-writeback pages of the given address space
616 * and wait for all of them. Unlike filemap_fdatawait(), this function
620 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
635 return mapping->nrpages; in mapping_needs_writeback()
641 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_writeback()
664 * filemap_write_and_wait_range - write out & wait on a file range
669 * Write out and wait upon file offsets lstart->lend, inclusive.
672 * that this function can be used to write to the very end-of-file (end = -1).
689 * written partially (e.g. -ENOSPC), so we wait for it. in filemap_write_and_wait_range()
690 * But the -EIO is special case, it may indicate the worst in filemap_write_and_wait_range()
693 if (err != -EIO) in filemap_write_and_wait_range()
705 errseq_t eseq = errseq_set(&mapping->wb_err, err); in __filemap_set_wb_err()
712 * file_check_and_advance_wb_err - report wb error (if any) that was previously
724 * it and try to swap it into place. If it works, or another task beat us
729 * While we handle mapping->wb_err with atomic operations, the f_wb_err
738 errseq_t old = READ_ONCE(file->f_wb_err); in file_check_and_advance_wb_err()
739 struct address_space *mapping = file->f_mapping; in file_check_and_advance_wb_err()
742 if (errseq_check(&mapping->wb_err, old)) { in file_check_and_advance_wb_err()
744 spin_lock(&file->f_lock); in file_check_and_advance_wb_err()
745 old = file->f_wb_err; in file_check_and_advance_wb_err()
746 err = errseq_check_and_advance(&mapping->wb_err, in file_check_and_advance_wb_err()
747 &file->f_wb_err); in file_check_and_advance_wb_err()
749 spin_unlock(&file->f_lock); in file_check_and_advance_wb_err()
757 clear_bit(AS_EIO, &mapping->flags); in file_check_and_advance_wb_err()
758 clear_bit(AS_ENOSPC, &mapping->flags); in file_check_and_advance_wb_err()
764 * file_write_and_wait_range - write out & wait on a file range
769 * Write out and wait upon file offsets lstart->lend, inclusive.
772 * that this function can be used to write to the very end-of-file (end = -1).
782 struct address_space *mapping = file->f_mapping; in file_write_and_wait_range()
791 if (err != -EIO) in file_write_and_wait_range()
802 * replace_page_cache_folio - replace a pagecache folio with a new one
816 struct address_space *mapping = old->mapping; in replace_page_cache_folio()
817 void (*free_folio)(struct folio *) = mapping->a_ops->free_folio; in replace_page_cache_folio()
818 pgoff_t offset = old->index; in replace_page_cache_folio()
819 XA_STATE(xas, &mapping->i_pages, offset); in replace_page_cache_folio()
823 VM_BUG_ON_FOLIO(new->mapping, new); in replace_page_cache_folio()
826 new->mapping = mapping; in replace_page_cache_folio()
827 new->index = offset; in replace_page_cache_folio()
834 old->mapping = NULL; in replace_page_cache_folio()
854 XA_STATE(xas, &mapping->i_pages, index); in __filemap_add_folio()
866 VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio); in __filemap_add_folio()
873 folio->mapping = mapping; in __filemap_add_folio()
874 folio->index = xas.xa_index; in __filemap_add_folio()
877 int order = -1, split_order = 0; in __filemap_add_folio()
884 xas_set_err(&xas, -EEXIST); in __filemap_add_folio()
891 if (order == -1) in __filemap_add_folio()
895 /* entry may have changed before we re-acquire the lock */ in __filemap_add_folio()
920 mapping->nrpages += nr; in __filemap_add_folio()
933 /* split needed, alloc here and retry. */ in __filemap_add_folio()
954 folio->mapping = NULL; in __filemap_add_folio()
955 /* Leave page->index set: truncation relies upon it */ in __filemap_add_folio()
1016 * filemap_invalidate_lock_two - lock invalidate_lock for two mappings
1029 down_write(&mapping1->invalidate_lock); in filemap_invalidate_lock_two()
1031 down_write_nested(&mapping2->invalidate_lock, 1); in filemap_invalidate_lock_two()
1036 * filemap_invalidate_unlock_two - unlock invalidate_lock for two mappings
1047 up_write(&mapping1->invalidate_lock); in filemap_invalidate_unlock_two()
1049 up_write(&mapping2->invalidate_lock); in filemap_invalidate_unlock_two()
1054 * In order to wait for pages to become available there must be
1083 * The page wait code treats the "wait->flags" somewhat unusually, because
1093 * and remove it from the wait queue.
1101 * WQ_FLAG_WOKEN bit, wake it up, and remove it from the wait queue.
1103 * This is the traditional exclusive wait.
1109 * cannot be taken, we stop walking the wait queue without waking
1116 static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg) in wake_page_function() argument
1121 = container_of(wait, struct wait_page_queue, wait); in wake_page_function()
1127 * If it's a lock handoff wait, we get the bit for it, and in wake_page_function()
1130 flags = wait->flags; in wake_page_function()
1132 if (test_bit(key->bit_nr, &key->folio->flags)) in wake_page_function()
1133 return -1; in wake_page_function()
1135 if (test_and_set_bit(key->bit_nr, &key->folio->flags)) in wake_page_function()
1136 return -1; in wake_page_function()
1142 * We are holding the wait-queue lock, but the waiter that in wake_page_function()
1147 * afterwards to avoid any races. This store-release pairs in wake_page_function()
1148 * with the load-acquire in folio_wait_bit_common(). in wake_page_function()
1150 smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN); in wake_page_function()
1151 wake_up_state(wait->private, mode); in wake_page_function()
1155 * and we can unconditionally remove the wait entry. in wake_page_function()
1159 * After this list_del_init(&wait->entry) the wait entry in wake_page_function()
1160 * might be de-allocated and the process might even have in wake_page_function()
1163 list_del_init_careful(&wait->entry); in wake_page_function()
1177 spin_lock_irqsave(&q->lock, flags); in folio_wake_bit()
1192 spin_unlock_irqrestore(&q->lock, flags); in folio_wake_bit()
1205 DROP, /* Drop ref to page before wait, no check when woken,
1211 * Attempt to check (or get) the folio flag, and mark us done
1215 struct wait_queue_entry *wait) in folio_trylock_flag() argument
1217 if (wait->flags & WQ_FLAG_EXCLUSIVE) { in folio_trylock_flag()
1218 if (test_and_set_bit(bit_nr, &folio->flags)) in folio_trylock_flag()
1220 } else if (test_bit(bit_nr, &folio->flags)) in folio_trylock_flag()
1223 wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE; in folio_trylock_flag()
1236 wait_queue_entry_t *wait = &wait_page.wait; in folio_wait_bit_common() local
1248 init_wait(wait); in folio_wait_bit_common()
1249 wait->func = wake_page_function; in folio_wait_bit_common()
1254 wait->flags = 0; in folio_wait_bit_common()
1256 wait->flags = WQ_FLAG_EXCLUSIVE; in folio_wait_bit_common()
1257 if (--unfairness < 0) in folio_wait_bit_common()
1258 wait->flags |= WQ_FLAG_CUSTOM; in folio_wait_bit_common()
1267 * need to wake us up (otherwise they'll never in folio_wait_bit_common()
1269 * page queue), and add ourselves to the wait in folio_wait_bit_common()
1275 spin_lock_irq(&q->lock); in folio_wait_bit_common()
1277 if (!folio_trylock_flag(folio, bit_nr, wait)) in folio_wait_bit_common()
1278 __add_wait_queue_entry_tail(q, wait); in folio_wait_bit_common()
1279 spin_unlock_irq(&q->lock); in folio_wait_bit_common()
1295 * be very careful with the 'wait->flags', because in folio_wait_bit_common()
1304 flags = smp_load_acquire(&wait->flags); in folio_wait_bit_common()
1313 /* If we were non-exclusive, we're done */ in folio_wait_bit_common()
1317 /* If the waker got the lock for us, we're done */ in folio_wait_bit_common()
1325 * And if that fails, we'll have to retry this all. in folio_wait_bit_common()
1330 wait->flags |= WQ_FLAG_DONE; in folio_wait_bit_common()
1336 * waiter from the wait-queues, but the folio waiters bit will remain in folio_wait_bit_common()
1340 finish_wait(q, wait); in folio_wait_bit_common()
1348 * NOTE! The wait->flags weren't stable until we've done the in folio_wait_bit_common()
1357 * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive in folio_wait_bit_common()
1361 return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR; in folio_wait_bit_common()
1363 return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR; in folio_wait_bit_common()
1368 * migration_entry_wait_on_locked - Wait for a migration entry to be removed
1372 * Wait for a migration entry referencing the given page to be removed. This is
1387 wait_queue_entry_t *wait = &wait_page.wait; in migration_entry_wait_on_locked() local
1401 init_wait(wait); in migration_entry_wait_on_locked()
1402 wait->func = wake_page_function; in migration_entry_wait_on_locked()
1405 wait->flags = 0; in migration_entry_wait_on_locked()
1407 spin_lock_irq(&q->lock); in migration_entry_wait_on_locked()
1409 if (!folio_trylock_flag(folio, PG_locked, wait)) in migration_entry_wait_on_locked()
1410 __add_wait_queue_entry_tail(q, wait); in migration_entry_wait_on_locked()
1411 spin_unlock_irq(&q->lock); in migration_entry_wait_on_locked()
1426 flags = smp_load_acquire(&wait->flags); in migration_entry_wait_on_locked()
1437 finish_wait(q, wait); in migration_entry_wait_on_locked()
1459 * folio_put_wait_locked - Drop a reference and wait for it to be unlocked
1460 * @folio: The folio to wait for.
1469 * Return: 0 if the folio was unlocked or -EINTR if interrupted by a signal.
1477 * folio_add_wait_queue - Add an arbitrary waiter to a folio's wait queue
1478 * @folio: Folio defining the wait queue of interest
1481 * Add an arbitrary @waiter to the wait queue for the nominated @folio.
1488 spin_lock_irqsave(&q->lock, flags); in folio_add_wait_queue()
1491 spin_unlock_irqrestore(&q->lock, flags); in folio_add_wait_queue()
1496 * folio_unlock - Unlock a locked folio.
1516 * folio_end_read - End read on a folio.
1546 * folio_end_private_2 - Clear PG_private_2 and wake any waiters.
1566 * folio_wait_private_2 - Wait for PG_private_2 to be cleared on a folio.
1567 * @folio: The folio to wait on.
1569 * Wait for PG_private_2 to be cleared on a folio.
1579 * folio_wait_private_2_killable - Wait for PG_private_2 to be cleared on a folio.
1580 * @folio: The folio to wait on.
1582 * Wait for PG_private_2 to be cleared on a folio or until a fatal signal is
1586 * - 0 if successful.
1587 * - -EINTR if a fatal signal was encountered.
1604 * folio_end_writeback - End writeback against a folio.
1629 * on truncation to wait for the clearing of PG_writeback. in folio_end_writeback()
1642 * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it.
1659 static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait) in __folio_lock_async() argument
1664 wait->folio = folio; in __folio_lock_async()
1665 wait->bit_nr = PG_locked; in __folio_lock_async()
1667 spin_lock_irq(&q->lock); in __folio_lock_async()
1668 __add_wait_queue_entry_tail(q, &wait->wait); in __folio_lock_async()
1678 __remove_wait_queue(q, &wait->wait); in __folio_lock_async()
1680 ret = -EIOCBQUEUED; in __folio_lock_async()
1681 spin_unlock_irq(&q->lock); in __folio_lock_async()
1687 * 0 - folio is locked.
1688 * non-zero - folio is not locked.
1689 * mmap_lock or per-VMA lock has been released (mmap_read_unlock() or
1694 * with the folio locked and the mmap_lock/per-VMA lock is left unperturbed.
1698 unsigned int flags = vmf->flags; in __folio_lock_or_retry()
1702 * CAUTION! In this case, mmap_lock/per-VMA lock is not in __folio_lock_or_retry()
1731 * page_cache_next_miss() - Find the next gap in the page cache.
1736 * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the
1746 * range specified (in which case 'return - index >= max_scan' will be true).
1747 * In the rare case of index wrap-around, 0 will be returned.
1752 XA_STATE(xas, &mapping->i_pages, index); in page_cache_next_miss()
1754 while (max_scan--) { in page_cache_next_miss()
1767 * page_cache_prev_miss() - Find the previous gap in the page cache.
1772 * Search the range [max(index - max_scan + 1, 0), index] for the
1782 * range specified (in which case 'index - return >= max_scan' will be true).
1783 * In the rare case of wrap-around, ULONG_MAX will be returned.
1788 XA_STATE(xas, &mapping->i_pages, index); in page_cache_prev_miss()
1790 while (max_scan--) { in page_cache_prev_miss()
1807 * 3. If the folio is not found by xas_reload(), put the refcount and retry
1815 * increased by a speculative page cache (or GUP-fast) lookup as it can
1823 * filemap_get_entry - Get a page cache entry.
1836 XA_STATE(xas, &mapping->i_pages, index); in filemap_get_entry()
1866 * __filemap_get_folio - Find and get a reference to a folio.
1897 return ERR_PTR(-EAGAIN); in __filemap_get_folio()
1904 if (unlikely(folio->mapping != mapping)) { in __filemap_get_folio()
1943 if (index & ((1UL << order) - 1)) in __filemap_get_folio()
1949 err = -ENOMEM; in __filemap_get_folio()
1965 } while (order-- > min_order); in __filemap_get_folio()
1967 if (err == -EEXIST) in __filemap_get_folio()
1980 return ERR_PTR(-ENOENT); in __filemap_get_folio()
1990 retry: in find_get_entry()
1997 goto retry; in find_get_entry()
2017 goto retry; in find_get_entry()
2021 * find_get_entries - gang pagecache lookup
2033 * due to not-present entries or large folios.
2043 XA_STATE(xas, &mapping->i_pages, *start); in find_get_entries()
2048 indices[fbatch->nr] = xas.xa_index; in find_get_entries()
2055 int idx = folio_batch_count(fbatch) - 1; in find_get_entries()
2057 folio = fbatch->folios[idx]; in find_get_entries()
2061 nr = 1 << xa_get_order(&mapping->i_pages, indices[idx]); in find_get_entries()
2070 * find_lock_entries - Find a batch of pagecache entries.
2084 * due to not-present entries, large folios, folios which could not be
2092 XA_STATE(xas, &mapping->i_pages, *start); in find_lock_entries()
2102 base = folio->index; in find_lock_entries()
2107 if (base + nr - 1 > end) in find_lock_entries()
2111 if (folio->mapping != mapping || in find_lock_entries()
2118 base = xas.xa_index & ~(nr - 1); in find_lock_entries()
2123 if (base + nr - 1 > end) in find_lock_entries()
2129 indices[fbatch->nr] = xas.xa_index; in find_lock_entries()
2144 * filemap_get_folios - Get a batch of folios
2165 * filemap_get_folios_contig - Get a batch of contiguous folios
2182 XA_STATE(xas, &mapping->i_pages, *start); in filemap_get_folios_contig()
2204 goto retry; in filemap_get_folios_contig()
2211 *start = folio->index + nr; in filemap_get_folios_contig()
2218 retry: in filemap_get_folios_contig()
2226 folio = fbatch->folios[nr - 1]; in filemap_get_folios_contig()
2236 * filemap_get_folios_tag - Get a batch of folios matching @tag
2257 XA_STATE(xas, &mapping->i_pages, *start); in filemap_get_folios_tag()
2271 *start = folio->index + nr; in filemap_get_folios_tag()
2278 * breaks the iteration when there is a page at index -1 but that is in filemap_get_folios_tag()
2281 if (end == (pgoff_t)-1) in filemap_get_folios_tag()
2282 *start = (pgoff_t)-1; in filemap_get_folios_tag()
2296 * ---R__________________________________________B__________
2309 ra->ra_pages /= 4; in shrink_readahead_size_eio()
2313 * filemap_get_read_batch - Get a batch of folios for read
2324 XA_STATE(xas, &mapping->i_pages, index); in filemap_get_read_batch()
2336 goto retry; in filemap_get_read_batch()
2347 xas_advance(&xas, folio_next_index(folio) - 1); in filemap_get_read_batch()
2351 retry: in filemap_get_read_batch()
2379 shrink_readahead_size_eio(&file->f_ra); in filemap_read_folio()
2380 return -EIO; in filemap_read_folio()
2392 if (!mapping->a_ops->is_partially_uptodate) in filemap_range_uptodate()
2394 if (mapping->host->i_blkbits >= folio_shift(folio)) in filemap_range_uptodate()
2398 count -= folio_pos(folio) - pos; in filemap_range_uptodate()
2401 pos -= folio_pos(folio); in filemap_range_uptodate()
2404 return mapping->a_ops->is_partially_uptodate(folio, pos, count); in filemap_range_uptodate()
2413 if (iocb->ki_flags & IOCB_NOWAIT) { in filemap_update_page()
2415 return -EAGAIN; in filemap_update_page()
2421 error = -EAGAIN; in filemap_update_page()
2422 if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) in filemap_update_page()
2424 if (!(iocb->ki_flags & IOCB_WAITQ)) { in filemap_update_page()
2433 error = __folio_lock_async(folio, iocb->ki_waitq); in filemap_update_page()
2439 if (!folio->mapping) in filemap_update_page()
2443 if (filemap_range_uptodate(mapping, iocb->ki_pos, count, folio, in filemap_update_page()
2447 error = -EAGAIN; in filemap_update_page()
2448 if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ)) in filemap_update_page()
2451 error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio, in filemap_update_page()
2474 return -ENOMEM; in filemap_create_folio()
2485 * pages or ->readahead() that need to hold invalidate_lock in filemap_create_folio()
2493 if (error == -EEXIST) in filemap_create_folio()
2498 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); in filemap_create_folio()
2515 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index); in filemap_readahead()
2517 if (iocb->ki_flags & IOCB_NOIO) in filemap_readahead()
2518 return -EAGAIN; in filemap_readahead()
2519 page_cache_async_ra(&ractl, folio, last_index - folio->index); in filemap_readahead()
2526 struct file *filp = iocb->ki_filp; in filemap_get_pages()
2527 struct address_space *mapping = filp->f_mapping; in filemap_get_pages()
2528 struct file_ra_state *ra = &filp->f_ra; in filemap_get_pages()
2529 pgoff_t index = iocb->ki_pos >> PAGE_SHIFT; in filemap_get_pages()
2536 last_index = DIV_ROUND_UP(iocb->ki_pos + count, PAGE_SIZE); in filemap_get_pages()
2537 retry: in filemap_get_pages()
2539 return -EINTR; in filemap_get_pages()
2541 filemap_get_read_batch(mapping, index, last_index - 1, fbatch); in filemap_get_pages()
2543 if (iocb->ki_flags & IOCB_NOIO) in filemap_get_pages()
2544 return -EAGAIN; in filemap_get_pages()
2545 if (iocb->ki_flags & IOCB_NOWAIT) in filemap_get_pages()
2548 last_index - index); in filemap_get_pages()
2549 if (iocb->ki_flags & IOCB_NOWAIT) in filemap_get_pages()
2551 filemap_get_read_batch(mapping, index, last_index - 1, fbatch); in filemap_get_pages()
2554 if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ)) in filemap_get_pages()
2555 return -EAGAIN; in filemap_get_pages()
2556 err = filemap_create_folio(filp, mapping, iocb->ki_pos, fbatch); in filemap_get_pages()
2558 goto retry; in filemap_get_pages()
2562 folio = fbatch->folios[folio_batch_count(fbatch) - 1]; in filemap_get_pages()
2569 if ((iocb->ki_flags & IOCB_WAITQ) && in filemap_get_pages()
2571 iocb->ki_flags |= IOCB_NOWAIT; in filemap_get_pages()
2578 trace_mm_filemap_get_pages(mapping, index, last_index - 1); in filemap_get_pages()
2583 if (likely(--fbatch->nr)) in filemap_get_pages()
2586 goto retry; in filemap_get_pages()
2598 * filemap_read - Read data from the page cache.
2613 struct file *filp = iocb->ki_filp; in filemap_read()
2614 struct file_ra_state *ra = &filp->f_ra; in filemap_read()
2615 struct address_space *mapping = filp->f_mapping; in filemap_read()
2616 struct inode *inode = mapping->host; in filemap_read()
2621 loff_t last_pos = ra->prev_pos; in filemap_read()
2623 if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes)) in filemap_read()
2628 iov_iter_truncate(iter, inode->i_sb->s_maxbytes); in filemap_read()
2636 * can no longer safely return -EIOCBQUEUED. Hence mark in filemap_read()
2639 if ((iocb->ki_flags & IOCB_WAITQ) && already_read) in filemap_read()
2640 iocb->ki_flags |= IOCB_NOWAIT; in filemap_read()
2642 if (unlikely(iocb->ki_pos >= i_size_read(inode))) in filemap_read()
2645 error = filemap_get_pages(iocb, iter->count, &fbatch, false); in filemap_read()
2652 * Checking i_size after the check allows us to calculate in filemap_read()
2653 * the correct value for "nr", which means the zero-filled in filemap_read()
2655 * another truncate extends the file - this is desired though). in filemap_read()
2658 if (unlikely(iocb->ki_pos >= isize)) in filemap_read()
2660 end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count); in filemap_read()
2672 if (!pos_same_folio(iocb->ki_pos, last_pos - 1, in filemap_read()
2679 size_t offset = iocb->ki_pos & (fsize - 1); in filemap_read()
2680 size_t bytes = min_t(loff_t, end_offset - iocb->ki_pos, in filemap_read()
2681 fsize - offset); in filemap_read()
2699 iocb->ki_pos += copied; in filemap_read()
2700 last_pos = iocb->ki_pos; in filemap_read()
2703 error = -EFAULT; in filemap_read()
2711 } while (iov_iter_count(iter) && iocb->ki_pos < isize && !error); in filemap_read()
2714 ra->prev_pos = last_pos; in filemap_read()
2721 struct address_space *mapping = iocb->ki_filp->f_mapping; in kiocb_write_and_wait()
2722 loff_t pos = iocb->ki_pos; in kiocb_write_and_wait()
2723 loff_t end = pos + count - 1; in kiocb_write_and_wait()
2725 if (iocb->ki_flags & IOCB_NOWAIT) { in kiocb_write_and_wait()
2727 return -EAGAIN; in kiocb_write_and_wait()
2743 return -EAGAIN; in filemap_invalidate_pages()
2754 * without clobbering -EIOCBQUEUED from ->direct_IO(). in filemap_invalidate_pages()
2762 struct address_space *mapping = iocb->ki_filp->f_mapping; in kiocb_invalidate_pages()
2764 return filemap_invalidate_pages(mapping, iocb->ki_pos, in kiocb_invalidate_pages()
2765 iocb->ki_pos + count - 1, in kiocb_invalidate_pages()
2766 iocb->ki_flags & IOCB_NOWAIT); in kiocb_invalidate_pages()
2771 * generic_file_read_iter - generic filesystem read routine
2778 * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall
2782 * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O
2784 * can be read, -EAGAIN shall be returned. When readahead would be
2800 if (iocb->ki_flags & IOCB_DIRECT) { in generic_file_read_iter()
2801 struct file *file = iocb->ki_filp; in generic_file_read_iter()
2802 struct address_space *mapping = file->f_mapping; in generic_file_read_iter()
2803 struct inode *inode = mapping->host; in generic_file_read_iter()
2810 retval = mapping->a_ops->direct_IO(iocb, iter); in generic_file_read_iter()
2812 iocb->ki_pos += retval; in generic_file_read_iter()
2813 count -= retval; in generic_file_read_iter()
2815 if (retval != -EIOCBQUEUED) in generic_file_read_iter()
2816 iov_iter_revert(iter, count - iov_iter_count(iter)); in generic_file_read_iter()
2829 if (iocb->ki_pos >= i_size_read(inode)) in generic_file_read_iter()
2847 size = min(size, folio_size(folio) - offset); in splice_folio_into_pipe()
2851 !pipe_full(pipe->head, pipe->tail, pipe->max_usage)) { in splice_folio_into_pipe()
2853 size_t part = min_t(size_t, PAGE_SIZE - offset, size - spliced); in splice_folio_into_pipe()
2862 pipe->head++; in splice_folio_into_pipe()
2872 * filemap_splice_read - Splice data from a file's pagecache into a pipe
2885 * to be read; -EAGAIN will be returned if the pipe had no space, and some
2901 if (unlikely(*ppos >= in->f_mapping->host->i_sb->s_maxbytes)) in filemap_splice_read()
2908 used = pipe_occupancy(pipe->head, pipe->tail); in filemap_splice_read()
2909 npages = max_t(ssize_t, pipe->max_usage - used, 0); in filemap_splice_read()
2917 if (*ppos >= i_size_read(in->f_mapping->host)) in filemap_splice_read()
2928 * Checking i_size after the check allows us to calculate in filemap_splice_read()
2929 * the correct value for "nr", which means the zero-filled in filemap_splice_read()
2931 * another truncate extends the file - this is desired though). in filemap_splice_read()
2933 isize = i_size_read(in->f_mapping->host); in filemap_splice_read()
2942 writably_mapped = mapping_writably_mapped(in->f_mapping); in filemap_splice_read()
2960 n = min_t(loff_t, len, isize - *ppos); in filemap_splice_read()
2964 len -= n; in filemap_splice_read()
2967 in->f_ra.prev_pos = *ppos; in filemap_splice_read()
2968 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) in filemap_splice_read()
2987 const struct address_space_operations *ops = mapping->a_ops; in folio_seek_hole_data()
2988 size_t offset, bsz = i_blocksize(mapping->host); in folio_seek_hole_data()
2992 if (!ops->is_partially_uptodate) in folio_seek_hole_data()
2998 if (unlikely(folio->mapping != mapping)) in folio_seek_hole_data()
3001 offset = offset_in_folio(folio, start) & ~(bsz - 1); in folio_seek_hole_data()
3004 if (ops->is_partially_uptodate(folio, offset, bsz) == in folio_seek_hole_data()
3007 start = (start + bsz) & ~(bsz - 1); in folio_seek_hole_data()
3024 * mapping_seek_hole_data - Seek for SEEK_DATA / SEEK_HOLE in the page cache.
3033 * entirely memory-based such as tmpfs, and filesystems which support
3036 * Return: The requested offset on success, or -ENXIO if @whence specifies
3038 * after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start
3044 XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT); in mapping_seek_hole_data()
3045 pgoff_t max = (end - 1) >> PAGE_SHIFT; in mapping_seek_hole_data()
3050 return -ENXIO; in mapping_seek_hole_data()
3077 start = -ENXIO; in mapping_seek_hole_data()
3090 * lock_folio_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
3091 * @vmf - the vm_fault for this fault.
3092 * @folio - the folio to lock.
3093 * @fpin - the pointer to the file we may pin (or is already pinned).
3108 * NOTE! This will make us return with VM_FAULT_RETRY, but with in lock_folio_maybe_drop_mmap()
3112 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) in lock_folio_maybe_drop_mmap()
3116 if (vmf->flags & FAULT_FLAG_KILLABLE) { in lock_folio_maybe_drop_mmap()
3138 * to drop the mmap sem we return the file that was pinned in order for us to do
3144 struct file *file = vmf->vma->vm_file; in do_sync_mmap_readahead()
3145 struct file_ra_state *ra = &file->f_ra; in do_sync_mmap_readahead()
3146 struct address_space *mapping = file->f_mapping; in do_sync_mmap_readahead()
3147 DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); in do_sync_mmap_readahead()
3149 unsigned long vm_flags = vmf->vma->vm_flags; in do_sync_mmap_readahead()
3156 ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1); in do_sync_mmap_readahead()
3157 ra->size = HPAGE_PMD_NR; in do_sync_mmap_readahead()
3163 ra->size *= 2; in do_sync_mmap_readahead()
3164 ra->async_size = HPAGE_PMD_NR; in do_sync_mmap_readahead()
3170 /* If we don't want any read-ahead, don't bother */ in do_sync_mmap_readahead()
3173 if (!ra->ra_pages) in do_sync_mmap_readahead()
3178 page_cache_sync_ra(&ractl, ra->ra_pages); in do_sync_mmap_readahead()
3183 mmap_miss = READ_ONCE(ra->mmap_miss); in do_sync_mmap_readahead()
3185 WRITE_ONCE(ra->mmap_miss, ++mmap_miss); in do_sync_mmap_readahead()
3189 * stop bothering with read-ahead. It will only hurt. in do_sync_mmap_readahead()
3195 * mmap read-around in do_sync_mmap_readahead()
3198 ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2); in do_sync_mmap_readahead()
3199 ra->size = ra->ra_pages; in do_sync_mmap_readahead()
3200 ra->async_size = ra->ra_pages / 4; in do_sync_mmap_readahead()
3201 ractl._index = ra->start; in do_sync_mmap_readahead()
3214 struct file *file = vmf->vma->vm_file; in do_async_mmap_readahead()
3215 struct file_ra_state *ra = &file->f_ra; in do_async_mmap_readahead()
3216 DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, vmf->pgoff); in do_async_mmap_readahead()
3220 /* If we don't want any read-ahead, don't bother */ in do_async_mmap_readahead()
3221 if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages) in do_async_mmap_readahead()
3224 mmap_miss = READ_ONCE(ra->mmap_miss); in do_async_mmap_readahead()
3226 WRITE_ONCE(ra->mmap_miss, --mmap_miss); in do_async_mmap_readahead()
3230 page_cache_async_ra(&ractl, folio, ra->ra_pages); in do_async_mmap_readahead()
3237 struct vm_area_struct *vma = vmf->vma; in filemap_fault_recheck_pte_none()
3251 * scenario while holding the PT lock, to not degrade non-mlocked in filemap_fault_recheck_pte_none()
3255 if (!(vma->vm_flags & VM_LOCKED)) in filemap_fault_recheck_pte_none()
3258 if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)) in filemap_fault_recheck_pte_none()
3261 ptep = pte_offset_map_nolock(vma->vm_mm, vmf->pmd, vmf->address, in filemap_fault_recheck_pte_none()
3262 &vmf->ptl); in filemap_fault_recheck_pte_none()
3269 spin_lock(vmf->ptl); in filemap_fault_recheck_pte_none()
3272 spin_unlock(vmf->ptl); in filemap_fault_recheck_pte_none()
3279 * filemap_fault - read in file data for page fault handling
3289 * vma->vm_mm->mmap_lock must be held on entry.
3299 * Return: bitwise-OR of %VM_FAULT_ codes.
3304 struct file *file = vmf->vma->vm_file; in filemap_fault()
3306 struct address_space *mapping = file->f_mapping; in filemap_fault()
3307 struct inode *inode = mapping->host; in filemap_fault()
3308 pgoff_t max_idx, index = vmf->pgoff; in filemap_fault()
3328 if (!(vmf->flags & FAULT_FLAG_TRIED)) in filemap_fault()
3341 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); in filemap_fault()
3355 vmf->gfp_mask); in filemap_fault()
3368 if (unlikely(folio->mapping != mapping)) { in filemap_fault()
3377 * that it's up-to-date. If not, it is going to be due to an error, in filemap_fault()
3403 * time to return to the upper layer and have it re-find the vma and in filemap_fault()
3424 vmf->page = folio_file_page(folio, index); in filemap_fault()
3429 * Umm, take care of errors if the page isn't up-to-date. in filemap_fault()
3430 * Try to re-read it _once_. We do this synchronously, in filemap_fault()
3435 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); in filemap_fault()
3449 * re-find the vma and come back and find our hopefully still populated in filemap_fault()
3465 struct mm_struct *mm = vmf->vma->vm_mm; in filemap_map_pmd()
3468 if (pmd_trans_huge(*vmf->pmd)) { in filemap_map_pmd()
3474 if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) { in filemap_map_pmd()
3484 if (pmd_none(*vmf->pmd) && vmf->prealloc_pte) in filemap_map_pmd()
3485 pmd_install(mm, vmf->pmd, &vmf->prealloc_pte); in filemap_map_pmd()
3514 if (folio->mapping != mapping) in next_uptodate_folio()
3518 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); in next_uptodate_folio()
3519 if (xas->xa_index >= max_idx) in next_uptodate_folio()
3543 pte_t *old_ptep = vmf->pte; in filemap_map_folio_range()
3552 * In such situation, read-ahead is only a waste of IO. in filemap_map_folio_range()
3554 * we can stop read-ahead. in filemap_map_folio_range()
3562 * fault-around logic. in filemap_map_folio_range()
3564 if (!pte_none(ptep_get(&vmf->pte[count]))) in filemap_map_folio_range()
3574 if (in_range(vmf->address, addr, count * PAGE_SIZE)) in filemap_map_folio_range()
3580 vmf->pte += count; in filemap_map_folio_range()
3583 } while (--nr_pages > 0); in filemap_map_folio_range()
3589 if (in_range(vmf->address, addr, count * PAGE_SIZE)) in filemap_map_folio_range()
3593 vmf->pte = old_ptep; in filemap_map_folio_range()
3603 struct page *page = &folio->page; in filemap_map_order0_folio()
3615 * the fault-around logic. in filemap_map_order0_folio()
3617 if (!pte_none(ptep_get(vmf->pte))) in filemap_map_order0_folio()
3620 if (vmf->address == addr) in filemap_map_order0_folio()
3633 struct vm_area_struct *vma = vmf->vma; in filemap_map_pages()
3634 struct file *file = vma->vm_file; in filemap_map_pages()
3635 struct address_space *mapping = file->f_mapping; in filemap_map_pages()
3638 XA_STATE(xas, &mapping->i_pages, start_pgoff); in filemap_map_pages()
3654 addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT); in filemap_map_pages()
3655 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); in filemap_map_pages()
3656 if (!vmf->pte) { in filemap_map_pages()
3662 file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE) - 1; in filemap_map_pages()
3670 addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT; in filemap_map_pages()
3671 vmf->pte += xas.xa_index - last_pgoff; in filemap_map_pages()
3673 end = folio_next_index(folio) - 1; in filemap_map_pages()
3674 nr_pages = min(end, end_pgoff) - xas.xa_index + 1; in filemap_map_pages()
3681 xas.xa_index - folio->index, addr, in filemap_map_pages()
3687 add_mm_counter(vma->vm_mm, folio_type, rss); in filemap_map_pages()
3688 pte_unmap_unlock(vmf->pte, vmf->ptl); in filemap_map_pages()
3693 mmap_miss_saved = READ_ONCE(file->f_ra.mmap_miss); in filemap_map_pages()
3695 WRITE_ONCE(file->f_ra.mmap_miss, 0); in filemap_map_pages()
3697 WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss_saved - mmap_miss); in filemap_map_pages()
3705 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in filemap_page_mkwrite()
3706 struct folio *folio = page_folio(vmf->page); in filemap_page_mkwrite()
3709 sb_start_pagefault(mapping->host->i_sb); in filemap_page_mkwrite()
3710 file_update_time(vmf->vma->vm_file); in filemap_page_mkwrite()
3712 if (folio->mapping != mapping) { in filemap_page_mkwrite()
3725 sb_end_pagefault(mapping->host->i_sb); in filemap_page_mkwrite()
3739 struct address_space *mapping = file->f_mapping; in generic_file_mmap()
3741 if (!mapping->a_ops->read_folio) in generic_file_mmap()
3742 return -ENOEXEC; in generic_file_mmap()
3744 vma->vm_ops = &generic_file_vm_ops; in generic_file_mmap()
3749 * This is for filesystems which do not implement ->writepage.
3754 return -EINVAL; in generic_file_readonly_mmap()
3764 return -ENOSYS; in generic_file_mmap()
3768 return -ENOSYS; in generic_file_readonly_mmap()
3783 filler = mapping->a_ops->read_folio; in do_read_cache_folio()
3790 return ERR_PTR(-ENOMEM); in do_read_cache_folio()
3795 if (err == -EEXIST) in do_read_cache_folio()
3812 if (!folio->mapping) { in do_read_cache_folio()
3839 * read_cache_folio - Read into page cache, fill it if needed.
3842 * @filler: Function to perform the read, or NULL to use aops->read_folio().
3851 * Context: May sleep. Expects mapping->invalidate_lock to be held.
3863 * mapping_read_folio_gfp - Read into page cache, using specified allocation flags.
3872 * possible and so is EINTR. If ->read_folio returns another error,
3875 * The function expects mapping->invalidate_lock to be already held.
3893 return &folio->page; in do_read_cache_page()
3906 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
3914 * If the page does not get brought uptodate, return -EIO.
3916 * The function expects mapping->invalidate_lock to be already held.
3937 errseq_set(&filp->f_mapping->wb_err, -EIO); in dio_warn_stale_pagecache()
3943 pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid, in dio_warn_stale_pagecache()
3944 current->comm); in dio_warn_stale_pagecache()
3950 struct address_space *mapping = iocb->ki_filp->f_mapping; in kiocb_invalidate_post_direct_write()
3952 if (mapping->nrpages && in kiocb_invalidate_post_direct_write()
3954 iocb->ki_pos >> PAGE_SHIFT, in kiocb_invalidate_post_direct_write()
3955 (iocb->ki_pos + count - 1) >> PAGE_SHIFT)) in kiocb_invalidate_post_direct_write()
3956 dio_warn_stale_pagecache(iocb->ki_filp); in kiocb_invalidate_post_direct_write()
3962 struct address_space *mapping = iocb->ki_filp->f_mapping; in generic_file_direct_write()
3972 if (written == -EBUSY) in generic_file_direct_write()
3977 written = mapping->a_ops->direct_IO(iocb, from); in generic_file_direct_write()
3981 * cached by non-direct readahead, or faulted in by get_user_pages() in generic_file_direct_write()
3988 * the invalidation for us. However there are some file systems that in generic_file_direct_write()
3997 struct inode *inode = mapping->host; in generic_file_direct_write()
3998 loff_t pos = iocb->ki_pos; in generic_file_direct_write()
4002 write_len -= written; in generic_file_direct_write()
4003 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { in generic_file_direct_write()
4007 iocb->ki_pos = pos; in generic_file_direct_write()
4009 if (written != -EIOCBQUEUED) in generic_file_direct_write()
4010 iov_iter_revert(from, write_len - iov_iter_count(from)); in generic_file_direct_write()
4017 struct file *file = iocb->ki_filp; in generic_perform_write()
4018 loff_t pos = iocb->ki_pos; in generic_perform_write()
4019 struct address_space *mapping = file->f_mapping; in generic_perform_write()
4020 const struct address_space_operations *a_ops = mapping->a_ops; in generic_perform_write()
4033 retry: in generic_perform_write()
4034 offset = pos & (chunk - 1); in generic_perform_write()
4035 bytes = min(chunk - offset, bytes); in generic_perform_write()
4042 * up-to-date. in generic_perform_write()
4045 status = -EFAULT; in generic_perform_write()
4050 status = -EINTR; in generic_perform_write()
4054 status = a_ops->write_begin(file, mapping, pos, bytes, in generic_perform_write()
4060 if (bytes > folio_size(folio) - offset) in generic_perform_write()
4061 bytes = folio_size(folio) - offset; in generic_perform_write()
4069 status = a_ops->write_end(file, mapping, pos, bytes, copied, in generic_perform_write()
4072 iov_iter_revert(i, copied - max(status, 0L)); in generic_perform_write()
4080 * A short copy made ->write_end() reject the in generic_perform_write()
4089 goto retry; in generic_perform_write()
4099 iocb->ki_pos += written; in generic_perform_write()
4105 * __generic_file_write_iter - write data to a file
4127 struct file *file = iocb->ki_filp; in __generic_file_write_iter()
4128 struct address_space *mapping = file->f_mapping; in __generic_file_write_iter()
4129 struct inode *inode = mapping->host; in __generic_file_write_iter()
4140 if (iocb->ki_flags & IOCB_DIRECT) { in __generic_file_write_iter()
4147 * page-cache pages correctly). in __generic_file_write_iter()
4160 * generic_file_write_iter - write data to a file
4174 struct file *file = iocb->ki_filp; in generic_file_write_iter()
4175 struct inode *inode = file->f_mapping->host; in generic_file_write_iter()
4191 * filemap_release_folio() - Release fs-specific metadata on a folio.
4196 * (presumably at folio->private).
4209 struct address_space * const mapping = folio->mapping; in filemap_release_folio()
4217 if (mapping && mapping->a_ops->release_folio) in filemap_release_folio()
4218 return mapping->a_ops->release_folio(folio, gfp); in filemap_release_folio()
4224 * filemap_invalidate_inode - Invalidate/forcibly write back a range of an inode's pagecache
4239 struct address_space *mapping = inode->i_mapping; in filemap_invalidate_inode()
4242 pgoff_t nr = end == LLONG_MAX ? ULONG_MAX : last - first + 1; in filemap_invalidate_inode()
4244 if (!mapping || !mapping->nrpages || end < start) in filemap_invalidate_inode()
4250 if (!mapping->nrpages) in filemap_invalidate_inode()
4267 /* Wait for writeback to complete on all folios and discard. */ in filemap_invalidate_inode()
4279 * filemap_cachestat() - compute the page cache statistics of a mapping
4293 XA_STATE(xas, &mapping->i_pages, first_index); in filemap_cachestat()
4307 * get freed (and reused) underneath us. in filemap_cachestat()
4313 * the rcu-protected xarray. in filemap_cachestat()
4322 folio_last_index = folio_first_index + nr_pages - 1; in filemap_cachestat()
4326 nr_pages -= first_index - folio_first_index; in filemap_cachestat()
4329 nr_pages -= folio_last_index - last_index; in filemap_cachestat()
4336 cs->nr_evicted += nr_pages; in filemap_cachestat()
4340 /* shmem file - in swap cache */ in filemap_cachestat()
4351 * ensures swapoff waits for us before in filemap_cachestat()
4363 cs->nr_recently_evicted += nr_pages; in filemap_cachestat()
4369 cs->nr_cache += nr_pages; in filemap_cachestat()
4372 cs->nr_dirty += nr_pages; in filemap_cachestat()
4375 cs->nr_writeback += nr_pages; in filemap_cachestat()
4400 * `off` and `len` must be non-negative integers. If `len` > 0,
4414 * zero - success
4415 * -EFAULT - cstat or cstat_range points to an illegal address
4416 * -EINVAL - invalid flags
4417 * -EBADF - invalid file descriptor
4418 * -EOPNOTSUPP - file descriptor is of a hugetlbfs file
4431 return -EBADF; in SYSCALL_DEFINE4()
4436 return -EFAULT; in SYSCALL_DEFINE4()
4442 return -EOPNOTSUPP; in SYSCALL_DEFINE4()
4447 return -EINVAL; in SYSCALL_DEFINE4()
4452 csr.len == 0 ? ULONG_MAX : (csr.off + csr.len - 1) >> PAGE_SHIFT; in SYSCALL_DEFINE4()
4454 mapping = fd_file(f)->f_mapping; in SYSCALL_DEFINE4()
4459 return -EFAULT; in SYSCALL_DEFINE4()