1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PAGEMAP_H
3 #define _LINUX_PAGEMAP_H
4
5 /*
6 * Copyright 1995 Linus Torvalds
7 */
8 #include <linux/mm.h>
9 #include <linux/fs.h>
10 #include <linux/list.h>
11 #include <linux/highmem.h>
12 #include <linux/compiler.h>
13 #include <linux/uaccess.h>
14 #include <linux/gfp.h>
15 #include <linux/bitops.h>
16 #include <linux/hardirq.h> /* for in_interrupt() */
17 #include <linux/hugetlb_inline.h>
18
19 struct folio_batch;
20
21 unsigned long invalidate_mapping_pages(struct address_space *mapping,
22 pgoff_t start, pgoff_t end);
23
invalidate_remote_inode(struct inode * inode)24 static inline void invalidate_remote_inode(struct inode *inode)
25 {
26 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
27 S_ISLNK(inode->i_mode))
28 invalidate_mapping_pages(inode->i_mapping, 0, -1);
29 }
30 int invalidate_inode_pages2(struct address_space *mapping);
31 int invalidate_inode_pages2_range(struct address_space *mapping,
32 pgoff_t start, pgoff_t end);
33 int kiocb_invalidate_pages(struct kiocb *iocb, size_t count);
34 void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count);
35 int filemap_invalidate_pages(struct address_space *mapping,
36 loff_t pos, loff_t end, bool nowait);
37
38 int write_inode_now(struct inode *, int sync);
39 int filemap_fdatawrite(struct address_space *);
40 int filemap_flush(struct address_space *);
41 int filemap_flush_nr(struct address_space *mapping, long *nr_to_write);
42 int filemap_fdatawait_keep_errors(struct address_space *mapping);
43 int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend);
44 int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
45 loff_t start_byte, loff_t end_byte);
46 int filemap_invalidate_inode(struct inode *inode, bool flush,
47 loff_t start, loff_t end);
48
filemap_fdatawait(struct address_space * mapping)49 static inline int filemap_fdatawait(struct address_space *mapping)
50 {
51 return filemap_fdatawait_range(mapping, 0, LLONG_MAX);
52 }
53
54 bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend);
55 int filemap_write_and_wait_range(struct address_space *mapping,
56 loff_t lstart, loff_t lend);
57 int filemap_fdatawrite_range(struct address_space *mapping,
58 loff_t start, loff_t end);
59 int filemap_check_errors(struct address_space *mapping);
60 void __filemap_set_wb_err(struct address_space *mapping, int err);
61 int kiocb_write_and_wait(struct kiocb *iocb, size_t count);
62
filemap_write_and_wait(struct address_space * mapping)63 static inline int filemap_write_and_wait(struct address_space *mapping)
64 {
65 return filemap_write_and_wait_range(mapping, 0, LLONG_MAX);
66 }
67
68 /**
69 * filemap_set_wb_err - set a writeback error on an address_space
70 * @mapping: mapping in which to set writeback error
71 * @err: error to be set in mapping
72 *
73 * When writeback fails in some way, we must record that error so that
74 * userspace can be informed when fsync and the like are called. We endeavor
75 * to report errors on any file that was open at the time of the error. Some
76 * internal callers also need to know when writeback errors have occurred.
77 *
78 * When a writeback error occurs, most filesystems will want to call
79 * filemap_set_wb_err to record the error in the mapping so that it will be
80 * automatically reported whenever fsync is called on the file.
81 */
filemap_set_wb_err(struct address_space * mapping,int err)82 static inline void filemap_set_wb_err(struct address_space *mapping, int err)
83 {
84 /* Fastpath for common case of no error */
85 if (unlikely(err))
86 __filemap_set_wb_err(mapping, err);
87 }
88
89 /**
90 * filemap_check_wb_err - has an error occurred since the mark was sampled?
91 * @mapping: mapping to check for writeback errors
92 * @since: previously-sampled errseq_t
93 *
94 * Grab the errseq_t value from the mapping, and see if it has changed "since"
95 * the given value was sampled.
96 *
97 * If it has then report the latest error set, otherwise return 0.
98 */
filemap_check_wb_err(struct address_space * mapping,errseq_t since)99 static inline int filemap_check_wb_err(struct address_space *mapping,
100 errseq_t since)
101 {
102 return errseq_check(&mapping->wb_err, since);
103 }
104
105 /**
106 * filemap_sample_wb_err - sample the current errseq_t to test for later errors
107 * @mapping: mapping to be sampled
108 *
109 * Writeback errors are always reported relative to a particular sample point
110 * in the past. This function provides those sample points.
111 */
filemap_sample_wb_err(struct address_space * mapping)112 static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
113 {
114 return errseq_sample(&mapping->wb_err);
115 }
116
117 /**
118 * file_sample_sb_err - sample the current errseq_t to test for later errors
119 * @file: file pointer to be sampled
120 *
121 * Grab the most current superblock-level errseq_t value for the given
122 * struct file.
123 */
file_sample_sb_err(struct file * file)124 static inline errseq_t file_sample_sb_err(struct file *file)
125 {
126 return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err);
127 }
128
129 /*
130 * Flush file data before changing attributes. Caller must hold any locks
131 * required to prevent further writes to this file until we're done setting
132 * flags.
133 */
inode_drain_writes(struct inode * inode)134 static inline int inode_drain_writes(struct inode *inode)
135 {
136 inode_dio_wait(inode);
137 return filemap_write_and_wait(inode->i_mapping);
138 }
139
mapping_empty(const struct address_space * mapping)140 static inline bool mapping_empty(const struct address_space *mapping)
141 {
142 return xa_empty(&mapping->i_pages);
143 }
144
145 /*
146 * mapping_shrinkable - test if page cache state allows inode reclaim
147 * @mapping: the page cache mapping
148 *
149 * This checks the mapping's cache state for the pupose of inode
150 * reclaim and LRU management.
151 *
152 * The caller is expected to hold the i_lock, but is not required to
153 * hold the i_pages lock, which usually protects cache state. That's
154 * because the i_lock and the list_lru lock that protect the inode and
155 * its LRU state don't nest inside the irq-safe i_pages lock.
156 *
157 * Cache deletions are performed under the i_lock, which ensures that
158 * when an inode goes empty, it will reliably get queued on the LRU.
159 *
160 * Cache additions do not acquire the i_lock and may race with this
161 * check, in which case we'll report the inode as shrinkable when it
162 * has cache pages. This is okay: the shrinker also checks the
163 * refcount and the referenced bit, which will be elevated or set in
164 * the process of adding new cache pages to an inode.
165 */
mapping_shrinkable(const struct address_space * mapping)166 static inline bool mapping_shrinkable(const struct address_space *mapping)
167 {
168 void *head;
169
170 /*
171 * On highmem systems, there could be lowmem pressure from the
172 * inodes before there is highmem pressure from the page
173 * cache. Make inodes shrinkable regardless of cache state.
174 */
175 if (IS_ENABLED(CONFIG_HIGHMEM))
176 return true;
177
178 /* Cache completely empty? Shrink away. */
179 head = rcu_access_pointer(mapping->i_pages.xa_head);
180 if (!head)
181 return true;
182
183 /*
184 * The xarray stores single offset-0 entries directly in the
185 * head pointer, which allows non-resident page cache entries
186 * to escape the shadow shrinker's list of xarray nodes. The
187 * inode shrinker needs to pick them up under memory pressure.
188 */
189 if (!xa_is_node(head) && xa_is_value(head))
190 return true;
191
192 return false;
193 }
194
195 /*
196 * Bits in mapping->flags.
197 */
198 enum mapping_flags {
199 AS_EIO = 0, /* IO error on async write */
200 AS_ENOSPC = 1, /* ENOSPC on async write */
201 AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */
202 AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */
203 AS_EXITING = 4, /* final truncate in progress */
204 /* writeback related tags are not used */
205 AS_NO_WRITEBACK_TAGS = 5,
206 AS_RELEASE_ALWAYS = 6, /* Call ->release_folio(), even if no private data */
207 AS_STABLE_WRITES = 7, /* must wait for writeback before modifying
208 folio contents */
209 AS_INACCESSIBLE = 8, /* Do not attempt direct R/W access to the mapping */
210 AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM = 9,
211 AS_KERNEL_FILE = 10, /* mapping for a fake kernel file that shouldn't
212 account usage to user cgroups */
213 /* Bits 16-25 are used for FOLIO_ORDER */
214 AS_FOLIO_ORDER_BITS = 5,
215 AS_FOLIO_ORDER_MIN = 16,
216 AS_FOLIO_ORDER_MAX = AS_FOLIO_ORDER_MIN + AS_FOLIO_ORDER_BITS,
217 };
218
219 #define AS_FOLIO_ORDER_BITS_MASK ((1u << AS_FOLIO_ORDER_BITS) - 1)
220 #define AS_FOLIO_ORDER_MIN_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MIN)
221 #define AS_FOLIO_ORDER_MAX_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MAX)
222 #define AS_FOLIO_ORDER_MASK (AS_FOLIO_ORDER_MIN_MASK | AS_FOLIO_ORDER_MAX_MASK)
223
224 /**
225 * mapping_set_error - record a writeback error in the address_space
226 * @mapping: the mapping in which an error should be set
227 * @error: the error to set in the mapping
228 *
229 * When writeback fails in some way, we must record that error so that
230 * userspace can be informed when fsync and the like are called. We endeavor
231 * to report errors on any file that was open at the time of the error. Some
232 * internal callers also need to know when writeback errors have occurred.
233 *
234 * When a writeback error occurs, most filesystems will want to call
235 * mapping_set_error to record the error in the mapping so that it can be
236 * reported when the application calls fsync(2).
237 */
mapping_set_error(struct address_space * mapping,int error)238 static inline void mapping_set_error(struct address_space *mapping, int error)
239 {
240 if (likely(!error))
241 return;
242
243 /* Record in wb_err for checkers using errseq_t based tracking */
244 __filemap_set_wb_err(mapping, error);
245
246 /* Record it in superblock */
247 if (mapping->host)
248 errseq_set(&mapping->host->i_sb->s_wb_err, error);
249
250 /* Record it in flags for now, for legacy callers */
251 if (error == -ENOSPC)
252 set_bit(AS_ENOSPC, &mapping->flags);
253 else
254 set_bit(AS_EIO, &mapping->flags);
255 }
256
mapping_set_unevictable(struct address_space * mapping)257 static inline void mapping_set_unevictable(struct address_space *mapping)
258 {
259 set_bit(AS_UNEVICTABLE, &mapping->flags);
260 }
261
mapping_clear_unevictable(struct address_space * mapping)262 static inline void mapping_clear_unevictable(struct address_space *mapping)
263 {
264 clear_bit(AS_UNEVICTABLE, &mapping->flags);
265 }
266
mapping_unevictable(const struct address_space * mapping)267 static inline bool mapping_unevictable(const struct address_space *mapping)
268 {
269 return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
270 }
271
mapping_set_exiting(struct address_space * mapping)272 static inline void mapping_set_exiting(struct address_space *mapping)
273 {
274 set_bit(AS_EXITING, &mapping->flags);
275 }
276
mapping_exiting(const struct address_space * mapping)277 static inline int mapping_exiting(const struct address_space *mapping)
278 {
279 return test_bit(AS_EXITING, &mapping->flags);
280 }
281
mapping_set_no_writeback_tags(struct address_space * mapping)282 static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
283 {
284 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
285 }
286
mapping_use_writeback_tags(const struct address_space * mapping)287 static inline int mapping_use_writeback_tags(const struct address_space *mapping)
288 {
289 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
290 }
291
mapping_release_always(const struct address_space * mapping)292 static inline bool mapping_release_always(const struct address_space *mapping)
293 {
294 return test_bit(AS_RELEASE_ALWAYS, &mapping->flags);
295 }
296
mapping_set_release_always(struct address_space * mapping)297 static inline void mapping_set_release_always(struct address_space *mapping)
298 {
299 set_bit(AS_RELEASE_ALWAYS, &mapping->flags);
300 }
301
mapping_clear_release_always(struct address_space * mapping)302 static inline void mapping_clear_release_always(struct address_space *mapping)
303 {
304 clear_bit(AS_RELEASE_ALWAYS, &mapping->flags);
305 }
306
mapping_stable_writes(const struct address_space * mapping)307 static inline bool mapping_stable_writes(const struct address_space *mapping)
308 {
309 return test_bit(AS_STABLE_WRITES, &mapping->flags);
310 }
311
mapping_set_stable_writes(struct address_space * mapping)312 static inline void mapping_set_stable_writes(struct address_space *mapping)
313 {
314 set_bit(AS_STABLE_WRITES, &mapping->flags);
315 }
316
mapping_clear_stable_writes(struct address_space * mapping)317 static inline void mapping_clear_stable_writes(struct address_space *mapping)
318 {
319 clear_bit(AS_STABLE_WRITES, &mapping->flags);
320 }
321
mapping_set_inaccessible(struct address_space * mapping)322 static inline void mapping_set_inaccessible(struct address_space *mapping)
323 {
324 /*
325 * It's expected inaccessible mappings are also unevictable. Compaction
326 * migrate scanner (isolate_migratepages_block()) relies on this to
327 * reduce page locking.
328 */
329 set_bit(AS_UNEVICTABLE, &mapping->flags);
330 set_bit(AS_INACCESSIBLE, &mapping->flags);
331 }
332
mapping_inaccessible(const struct address_space * mapping)333 static inline bool mapping_inaccessible(const struct address_space *mapping)
334 {
335 return test_bit(AS_INACCESSIBLE, &mapping->flags);
336 }
337
mapping_set_writeback_may_deadlock_on_reclaim(struct address_space * mapping)338 static inline void mapping_set_writeback_may_deadlock_on_reclaim(struct address_space *mapping)
339 {
340 set_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
341 }
342
mapping_writeback_may_deadlock_on_reclaim(const struct address_space * mapping)343 static inline bool mapping_writeback_may_deadlock_on_reclaim(const struct address_space *mapping)
344 {
345 return test_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
346 }
347
mapping_gfp_mask(const struct address_space * mapping)348 static inline gfp_t mapping_gfp_mask(const struct address_space *mapping)
349 {
350 return mapping->gfp_mask;
351 }
352
353 /* Restricts the given gfp_mask to what the mapping allows. */
mapping_gfp_constraint(const struct address_space * mapping,gfp_t gfp_mask)354 static inline gfp_t mapping_gfp_constraint(const struct address_space *mapping,
355 gfp_t gfp_mask)
356 {
357 return mapping_gfp_mask(mapping) & gfp_mask;
358 }
359
360 /*
361 * This is non-atomic. Only to be used before the mapping is activated.
362 * Probably needs a barrier...
363 */
mapping_set_gfp_mask(struct address_space * m,gfp_t mask)364 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
365 {
366 m->gfp_mask = mask;
367 }
368
369 /*
370 * There are some parts of the kernel which assume that PMD entries
371 * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
372 * limit the maximum allocation order to PMD size. I'm not aware of any
373 * assumptions about maximum order if THP are disabled, but 8 seems like
374 * a good order (that's 1MB if you're using 4kB pages)
375 */
376 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
377 #define PREFERRED_MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
378 #else
379 #define PREFERRED_MAX_PAGECACHE_ORDER 8
380 #endif
381
382 /*
383 * xas_split_alloc() does not support arbitrary orders. This implies no
384 * 512MB THP on ARM64 with 64KB base page size.
385 */
386 #define MAX_XAS_ORDER (XA_CHUNK_SHIFT * 2 - 1)
387 #define MAX_PAGECACHE_ORDER min(MAX_XAS_ORDER, PREFERRED_MAX_PAGECACHE_ORDER)
388
389 /*
390 * mapping_max_folio_size_supported() - Check the max folio size supported
391 *
392 * The filesystem should call this function at mount time if there is a
393 * requirement on the folio mapping size in the page cache.
394 */
mapping_max_folio_size_supported(void)395 static inline size_t mapping_max_folio_size_supported(void)
396 {
397 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
398 return 1U << (PAGE_SHIFT + MAX_PAGECACHE_ORDER);
399 return PAGE_SIZE;
400 }
401
402 /*
403 * mapping_set_folio_order_range() - Set the orders supported by a file.
404 * @mapping: The address space of the file.
405 * @min: Minimum folio order (between 0-MAX_PAGECACHE_ORDER inclusive).
406 * @max: Maximum folio order (between @min-MAX_PAGECACHE_ORDER inclusive).
407 *
408 * The filesystem should call this function in its inode constructor to
409 * indicate which base size (min) and maximum size (max) of folio the VFS
410 * can use to cache the contents of the file. This should only be used
411 * if the filesystem needs special handling of folio sizes (ie there is
412 * something the core cannot know).
413 * Do not tune it based on, eg, i_size.
414 *
415 * Context: This should not be called while the inode is active as it
416 * is non-atomic.
417 */
mapping_set_folio_order_range(struct address_space * mapping,unsigned int min,unsigned int max)418 static inline void mapping_set_folio_order_range(struct address_space *mapping,
419 unsigned int min,
420 unsigned int max)
421 {
422 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
423 return;
424
425 if (min > MAX_PAGECACHE_ORDER)
426 min = MAX_PAGECACHE_ORDER;
427
428 if (max > MAX_PAGECACHE_ORDER)
429 max = MAX_PAGECACHE_ORDER;
430
431 if (max < min)
432 max = min;
433
434 mapping->flags = (mapping->flags & ~AS_FOLIO_ORDER_MASK) |
435 (min << AS_FOLIO_ORDER_MIN) | (max << AS_FOLIO_ORDER_MAX);
436 }
437
mapping_set_folio_min_order(struct address_space * mapping,unsigned int min)438 static inline void mapping_set_folio_min_order(struct address_space *mapping,
439 unsigned int min)
440 {
441 mapping_set_folio_order_range(mapping, min, MAX_PAGECACHE_ORDER);
442 }
443
444 /**
445 * mapping_set_large_folios() - Indicate the file supports large folios.
446 * @mapping: The address space of the file.
447 *
448 * The filesystem should call this function in its inode constructor to
449 * indicate that the VFS can use large folios to cache the contents of
450 * the file.
451 *
452 * Context: This should not be called while the inode is active as it
453 * is non-atomic.
454 */
mapping_set_large_folios(struct address_space * mapping)455 static inline void mapping_set_large_folios(struct address_space *mapping)
456 {
457 mapping_set_folio_order_range(mapping, 0, MAX_PAGECACHE_ORDER);
458 }
459
460 static inline unsigned int
mapping_max_folio_order(const struct address_space * mapping)461 mapping_max_folio_order(const struct address_space *mapping)
462 {
463 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
464 return 0;
465 return (mapping->flags & AS_FOLIO_ORDER_MAX_MASK) >> AS_FOLIO_ORDER_MAX;
466 }
467
468 static inline unsigned int
mapping_min_folio_order(const struct address_space * mapping)469 mapping_min_folio_order(const struct address_space *mapping)
470 {
471 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
472 return 0;
473 return (mapping->flags & AS_FOLIO_ORDER_MIN_MASK) >> AS_FOLIO_ORDER_MIN;
474 }
475
476 static inline unsigned long
mapping_min_folio_nrpages(const struct address_space * mapping)477 mapping_min_folio_nrpages(const struct address_space *mapping)
478 {
479 return 1UL << mapping_min_folio_order(mapping);
480 }
481
482 static inline unsigned long
mapping_min_folio_nrbytes(const struct address_space * mapping)483 mapping_min_folio_nrbytes(const struct address_space *mapping)
484 {
485 return mapping_min_folio_nrpages(mapping) << PAGE_SHIFT;
486 }
487
488 /**
489 * mapping_align_index() - Align index for this mapping.
490 * @mapping: The address_space.
491 * @index: The page index.
492 *
493 * The index of a folio must be naturally aligned. If you are adding a
494 * new folio to the page cache and need to know what index to give it,
495 * call this function.
496 */
mapping_align_index(const struct address_space * mapping,pgoff_t index)497 static inline pgoff_t mapping_align_index(const struct address_space *mapping,
498 pgoff_t index)
499 {
500 return round_down(index, mapping_min_folio_nrpages(mapping));
501 }
502
503 /*
504 * Large folio support currently depends on THP. These dependencies are
505 * being worked on but are not yet fixed.
506 */
mapping_large_folio_support(const struct address_space * mapping)507 static inline bool mapping_large_folio_support(const struct address_space *mapping)
508 {
509 /* AS_FOLIO_ORDER is only reasonable for pagecache folios */
510 VM_WARN_ONCE((unsigned long)mapping & FOLIO_MAPPING_ANON,
511 "Anonymous mapping always supports large folio");
512
513 return mapping_max_folio_order(mapping) > 0;
514 }
515
516 /* Return the maximum folio size for this pagecache mapping, in bytes. */
mapping_max_folio_size(const struct address_space * mapping)517 static inline size_t mapping_max_folio_size(const struct address_space *mapping)
518 {
519 return PAGE_SIZE << mapping_max_folio_order(mapping);
520 }
521
filemap_nr_thps(const struct address_space * mapping)522 static inline int filemap_nr_thps(const struct address_space *mapping)
523 {
524 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
525 return atomic_read(&mapping->nr_thps);
526 #else
527 return 0;
528 #endif
529 }
530
filemap_nr_thps_inc(struct address_space * mapping)531 static inline void filemap_nr_thps_inc(struct address_space *mapping)
532 {
533 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
534 if (!mapping_large_folio_support(mapping))
535 atomic_inc(&mapping->nr_thps);
536 #else
537 WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0);
538 #endif
539 }
540
filemap_nr_thps_dec(struct address_space * mapping)541 static inline void filemap_nr_thps_dec(struct address_space *mapping)
542 {
543 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
544 if (!mapping_large_folio_support(mapping))
545 atomic_dec(&mapping->nr_thps);
546 #else
547 WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0);
548 #endif
549 }
550
551 struct address_space *folio_mapping(const struct folio *folio);
552
553 /**
554 * folio_flush_mapping - Find the file mapping this folio belongs to.
555 * @folio: The folio.
556 *
557 * For folios which are in the page cache, return the mapping that this
558 * page belongs to. Anonymous folios return NULL, even if they're in
559 * the swap cache. Other kinds of folio also return NULL.
560 *
561 * This is ONLY used by architecture cache flushing code. If you aren't
562 * writing cache flushing code, you want either folio_mapping() or
563 * folio_file_mapping().
564 */
folio_flush_mapping(struct folio * folio)565 static inline struct address_space *folio_flush_mapping(struct folio *folio)
566 {
567 if (unlikely(folio_test_swapcache(folio)))
568 return NULL;
569
570 return folio_mapping(folio);
571 }
572
573 /**
574 * folio_inode - Get the host inode for this folio.
575 * @folio: The folio.
576 *
577 * For folios which are in the page cache, return the inode that this folio
578 * belongs to.
579 *
580 * Do not call this for folios which aren't in the page cache.
581 */
folio_inode(struct folio * folio)582 static inline struct inode *folio_inode(struct folio *folio)
583 {
584 return folio->mapping->host;
585 }
586
587 /**
588 * folio_attach_private - Attach private data to a folio.
589 * @folio: Folio to attach data to.
590 * @data: Data to attach to folio.
591 *
592 * Attaching private data to a folio increments the page's reference count.
593 * The data must be detached before the folio will be freed.
594 */
folio_attach_private(struct folio * folio,void * data)595 static inline void folio_attach_private(struct folio *folio, void *data)
596 {
597 folio_get(folio);
598 folio->private = data;
599 folio_set_private(folio);
600 }
601
602 /**
603 * folio_change_private - Change private data on a folio.
604 * @folio: Folio to change the data on.
605 * @data: Data to set on the folio.
606 *
607 * Change the private data attached to a folio and return the old
608 * data. The page must previously have had data attached and the data
609 * must be detached before the folio will be freed.
610 *
611 * Return: Data that was previously attached to the folio.
612 */
folio_change_private(struct folio * folio,void * data)613 static inline void *folio_change_private(struct folio *folio, void *data)
614 {
615 void *old = folio_get_private(folio);
616
617 folio->private = data;
618 return old;
619 }
620
621 /**
622 * folio_detach_private - Detach private data from a folio.
623 * @folio: Folio to detach data from.
624 *
625 * Removes the data that was previously attached to the folio and decrements
626 * the refcount on the page.
627 *
628 * Return: Data that was attached to the folio.
629 */
folio_detach_private(struct folio * folio)630 static inline void *folio_detach_private(struct folio *folio)
631 {
632 void *data = folio_get_private(folio);
633
634 if (!folio_test_private(folio))
635 return NULL;
636 folio_clear_private(folio);
637 folio->private = NULL;
638 folio_put(folio);
639
640 return data;
641 }
642
attach_page_private(struct page * page,void * data)643 static inline void attach_page_private(struct page *page, void *data)
644 {
645 folio_attach_private(page_folio(page), data);
646 }
647
detach_page_private(struct page * page)648 static inline void *detach_page_private(struct page *page)
649 {
650 return folio_detach_private(page_folio(page));
651 }
652
653 #ifdef CONFIG_NUMA
654 struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order);
655 #else
filemap_alloc_folio_noprof(gfp_t gfp,unsigned int order)656 static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order)
657 {
658 return folio_alloc_noprof(gfp, order);
659 }
660 #endif
661
662 #define filemap_alloc_folio(...) \
663 alloc_hooks(filemap_alloc_folio_noprof(__VA_ARGS__))
664
__page_cache_alloc(gfp_t gfp)665 static inline struct page *__page_cache_alloc(gfp_t gfp)
666 {
667 return &filemap_alloc_folio(gfp, 0)->page;
668 }
669
readahead_gfp_mask(struct address_space * x)670 static inline gfp_t readahead_gfp_mask(struct address_space *x)
671 {
672 return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
673 }
674
675 typedef int filler_t(struct file *, struct folio *);
676
677 pgoff_t page_cache_next_miss(struct address_space *mapping,
678 pgoff_t index, unsigned long max_scan);
679 pgoff_t page_cache_prev_miss(struct address_space *mapping,
680 pgoff_t index, unsigned long max_scan);
681
682 /**
683 * typedef fgf_t - Flags for getting folios from the page cache.
684 *
685 * Most users of the page cache will not need to use these flags;
686 * there are convenience functions such as filemap_get_folio() and
687 * filemap_lock_folio(). For users which need more control over exactly
688 * what is done with the folios, these flags to __filemap_get_folio()
689 * are available.
690 *
691 * * %FGP_ACCESSED - The folio will be marked accessed.
692 * * %FGP_LOCK - The folio is returned locked.
693 * * %FGP_CREAT - If no folio is present then a new folio is allocated,
694 * added to the page cache and the VM's LRU list. The folio is
695 * returned locked.
696 * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the
697 * folio is already in cache. If the folio was allocated, unlock it
698 * before returning so the caller can do the same dance.
699 * * %FGP_WRITE - The folio will be written to by the caller.
700 * * %FGP_NOFS - __GFP_FS will get cleared in gfp.
701 * * %FGP_NOWAIT - Don't block on the folio lock.
702 * * %FGP_STABLE - Wait for the folio to be stable (finished writeback)
703 * * %FGP_DONTCACHE - Uncached buffered IO
704 * * %FGP_WRITEBEGIN - The flags to use in a filesystem write_begin()
705 * implementation.
706 */
707 typedef unsigned int __bitwise fgf_t;
708
709 #define FGP_ACCESSED ((__force fgf_t)0x00000001)
710 #define FGP_LOCK ((__force fgf_t)0x00000002)
711 #define FGP_CREAT ((__force fgf_t)0x00000004)
712 #define FGP_WRITE ((__force fgf_t)0x00000008)
713 #define FGP_NOFS ((__force fgf_t)0x00000010)
714 #define FGP_NOWAIT ((__force fgf_t)0x00000020)
715 #define FGP_FOR_MMAP ((__force fgf_t)0x00000040)
716 #define FGP_STABLE ((__force fgf_t)0x00000080)
717 #define FGP_DONTCACHE ((__force fgf_t)0x00000100)
718 #define FGF_GET_ORDER(fgf) (((__force unsigned)fgf) >> 26) /* top 6 bits */
719
720 #define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE)
721
filemap_get_order(size_t size)722 static inline unsigned int filemap_get_order(size_t size)
723 {
724 unsigned int shift = ilog2(size);
725
726 if (shift <= PAGE_SHIFT)
727 return 0;
728
729 return shift - PAGE_SHIFT;
730 }
731
732 /**
733 * fgf_set_order - Encode a length in the fgf_t flags.
734 * @size: The suggested size of the folio to create.
735 *
736 * The caller of __filemap_get_folio() can use this to suggest a preferred
737 * size for the folio that is created. If there is already a folio at
738 * the index, it will be returned, no matter what its size. If a folio
739 * is freshly created, it may be of a different size than requested
740 * due to alignment constraints, memory pressure, or the presence of
741 * other folios at nearby indices.
742 */
fgf_set_order(size_t size)743 static inline fgf_t fgf_set_order(size_t size)
744 {
745 unsigned int order = filemap_get_order(size);
746
747 if (!order)
748 return 0;
749 return (__force fgf_t)(order << 26);
750 }
751
752 void *filemap_get_entry(struct address_space *mapping, pgoff_t index);
753 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
754 fgf_t fgp_flags, gfp_t gfp);
755 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
756 fgf_t fgp_flags, gfp_t gfp);
757
758 /**
759 * write_begin_get_folio - Get folio for write_begin with flags.
760 * @iocb: The kiocb passed from write_begin (may be NULL).
761 * @mapping: The address space to search.
762 * @index: The page cache index.
763 * @len: Length of data being written.
764 *
765 * This is a helper for filesystem write_begin() implementations.
766 * It wraps __filemap_get_folio(), setting appropriate flags in
767 * the write begin context.
768 *
769 * Return: A folio or an ERR_PTR.
770 */
write_begin_get_folio(const struct kiocb * iocb,struct address_space * mapping,pgoff_t index,size_t len)771 static inline struct folio *write_begin_get_folio(const struct kiocb *iocb,
772 struct address_space *mapping, pgoff_t index, size_t len)
773 {
774 fgf_t fgp_flags = FGP_WRITEBEGIN;
775
776 fgp_flags |= fgf_set_order(len);
777
778 if (iocb && iocb->ki_flags & IOCB_DONTCACHE)
779 fgp_flags |= FGP_DONTCACHE;
780
781 return __filemap_get_folio(mapping, index, fgp_flags,
782 mapping_gfp_mask(mapping));
783 }
784
785 /**
786 * filemap_get_folio - Find and get a folio.
787 * @mapping: The address_space to search.
788 * @index: The page index.
789 *
790 * Looks up the page cache entry at @mapping & @index. If a folio is
791 * present, it is returned with an increased refcount.
792 *
793 * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for
794 * this index. Will not return a shadow, swap or DAX entry.
795 */
filemap_get_folio(struct address_space * mapping,pgoff_t index)796 static inline struct folio *filemap_get_folio(struct address_space *mapping,
797 pgoff_t index)
798 {
799 return __filemap_get_folio(mapping, index, 0, 0);
800 }
801
802 /**
803 * filemap_lock_folio - Find and lock a folio.
804 * @mapping: The address_space to search.
805 * @index: The page index.
806 *
807 * Looks up the page cache entry at @mapping & @index. If a folio is
808 * present, it is returned locked with an increased refcount.
809 *
810 * Context: May sleep.
811 * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for
812 * this index. Will not return a shadow, swap or DAX entry.
813 */
filemap_lock_folio(struct address_space * mapping,pgoff_t index)814 static inline struct folio *filemap_lock_folio(struct address_space *mapping,
815 pgoff_t index)
816 {
817 return __filemap_get_folio(mapping, index, FGP_LOCK, 0);
818 }
819
820 /**
821 * filemap_grab_folio - grab a folio from the page cache
822 * @mapping: The address space to search
823 * @index: The page index
824 *
825 * Looks up the page cache entry at @mapping & @index. If no folio is found,
826 * a new folio is created. The folio is locked, marked as accessed, and
827 * returned.
828 *
829 * Return: A found or created folio. ERR_PTR(-ENOMEM) if no folio is found
830 * and failed to create a folio.
831 */
filemap_grab_folio(struct address_space * mapping,pgoff_t index)832 static inline struct folio *filemap_grab_folio(struct address_space *mapping,
833 pgoff_t index)
834 {
835 return __filemap_get_folio(mapping, index,
836 FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
837 mapping_gfp_mask(mapping));
838 }
839
840 /**
841 * find_get_page - find and get a page reference
842 * @mapping: the address_space to search
843 * @offset: the page index
844 *
845 * Looks up the page cache slot at @mapping & @offset. If there is a
846 * page cache page, it is returned with an increased refcount.
847 *
848 * Otherwise, %NULL is returned.
849 */
find_get_page(struct address_space * mapping,pgoff_t offset)850 static inline struct page *find_get_page(struct address_space *mapping,
851 pgoff_t offset)
852 {
853 return pagecache_get_page(mapping, offset, 0, 0);
854 }
855
find_get_page_flags(struct address_space * mapping,pgoff_t offset,fgf_t fgp_flags)856 static inline struct page *find_get_page_flags(struct address_space *mapping,
857 pgoff_t offset, fgf_t fgp_flags)
858 {
859 return pagecache_get_page(mapping, offset, fgp_flags, 0);
860 }
861
862 /**
863 * find_lock_page - locate, pin and lock a pagecache page
864 * @mapping: the address_space to search
865 * @index: the page index
866 *
867 * Looks up the page cache entry at @mapping & @index. If there is a
868 * page cache page, it is returned locked and with an increased
869 * refcount.
870 *
871 * Context: May sleep.
872 * Return: A struct page or %NULL if there is no page in the cache for this
873 * index.
874 */
find_lock_page(struct address_space * mapping,pgoff_t index)875 static inline struct page *find_lock_page(struct address_space *mapping,
876 pgoff_t index)
877 {
878 return pagecache_get_page(mapping, index, FGP_LOCK, 0);
879 }
880
881 /**
882 * find_or_create_page - locate or add a pagecache page
883 * @mapping: the page's address_space
884 * @index: the page's index into the mapping
885 * @gfp_mask: page allocation mode
886 *
887 * Looks up the page cache slot at @mapping & @offset. If there is a
888 * page cache page, it is returned locked and with an increased
889 * refcount.
890 *
891 * If the page is not present, a new page is allocated using @gfp_mask
892 * and added to the page cache and the VM's LRU list. The page is
893 * returned locked and with an increased refcount.
894 *
895 * On memory exhaustion, %NULL is returned.
896 *
897 * find_or_create_page() may sleep, even if @gfp_flags specifies an
898 * atomic allocation!
899 */
find_or_create_page(struct address_space * mapping,pgoff_t index,gfp_t gfp_mask)900 static inline struct page *find_or_create_page(struct address_space *mapping,
901 pgoff_t index, gfp_t gfp_mask)
902 {
903 return pagecache_get_page(mapping, index,
904 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
905 gfp_mask);
906 }
907
908 /**
909 * grab_cache_page_nowait - returns locked page at given index in given cache
910 * @mapping: target address_space
911 * @index: the page index
912 *
913 * Returns locked page at given index in given cache, creating it if
914 * needed, but do not wait if the page is locked or to reclaim memory.
915 * This is intended for speculative data generators, where the data can
916 * be regenerated if the page couldn't be grabbed. This routine should
917 * be safe to call while holding the lock for another page.
918 *
919 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
920 * and deadlock against the caller's locked page.
921 */
grab_cache_page_nowait(struct address_space * mapping,pgoff_t index)922 static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
923 pgoff_t index)
924 {
925 return pagecache_get_page(mapping, index,
926 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
927 mapping_gfp_mask(mapping));
928 }
929
930 /**
931 * folio_next_index - Get the index of the next folio.
932 * @folio: The current folio.
933 *
934 * Return: The index of the folio which follows this folio in the file.
935 */
folio_next_index(const struct folio * folio)936 static inline pgoff_t folio_next_index(const struct folio *folio)
937 {
938 return folio->index + folio_nr_pages(folio);
939 }
940
941 /**
942 * folio_next_pos - Get the file position of the next folio.
943 * @folio: The current folio.
944 *
945 * Return: The position of the folio which follows this folio in the file.
946 */
folio_next_pos(const struct folio * folio)947 static inline loff_t folio_next_pos(const struct folio *folio)
948 {
949 return (loff_t)folio_next_index(folio) << PAGE_SHIFT;
950 }
951
952 /**
953 * folio_file_page - The page for a particular index.
954 * @folio: The folio which contains this index.
955 * @index: The index we want to look up.
956 *
957 * Sometimes after looking up a folio in the page cache, we need to
958 * obtain the specific page for an index (eg a page fault).
959 *
960 * Return: The page containing the file data for this index.
961 */
folio_file_page(struct folio * folio,pgoff_t index)962 static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
963 {
964 return folio_page(folio, index & (folio_nr_pages(folio) - 1));
965 }
966
967 /**
968 * folio_contains - Does this folio contain this index?
969 * @folio: The folio.
970 * @index: The page index within the file.
971 *
972 * Context: The caller should have the folio locked and ensure
973 * e.g., shmem did not move this folio to the swap cache.
974 * Return: true or false.
975 */
folio_contains(const struct folio * folio,pgoff_t index)976 static inline bool folio_contains(const struct folio *folio, pgoff_t index)
977 {
978 VM_WARN_ON_ONCE_FOLIO(folio_test_swapcache(folio), folio);
979 return index - folio->index < folio_nr_pages(folio);
980 }
981
982 unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
983 pgoff_t end, struct folio_batch *fbatch);
984 unsigned filemap_get_folios_contig(struct address_space *mapping,
985 pgoff_t *start, pgoff_t end, struct folio_batch *fbatch);
986 unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
987 pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch);
988 unsigned filemap_get_folios_dirty(struct address_space *mapping,
989 pgoff_t *start, pgoff_t end, struct folio_batch *fbatch);
990
991 struct folio *read_cache_folio(struct address_space *, pgoff_t index,
992 filler_t *filler, struct file *file);
993 struct folio *mapping_read_folio_gfp(struct address_space *, pgoff_t index,
994 gfp_t flags);
995 struct page *read_cache_page(struct address_space *, pgoff_t index,
996 filler_t *filler, struct file *file);
997 extern struct page * read_cache_page_gfp(struct address_space *mapping,
998 pgoff_t index, gfp_t gfp_mask);
999
read_mapping_page(struct address_space * mapping,pgoff_t index,struct file * file)1000 static inline struct page *read_mapping_page(struct address_space *mapping,
1001 pgoff_t index, struct file *file)
1002 {
1003 return read_cache_page(mapping, index, NULL, file);
1004 }
1005
read_mapping_folio(struct address_space * mapping,pgoff_t index,struct file * file)1006 static inline struct folio *read_mapping_folio(struct address_space *mapping,
1007 pgoff_t index, struct file *file)
1008 {
1009 return read_cache_folio(mapping, index, NULL, file);
1010 }
1011
1012 /**
1013 * page_pgoff - Calculate the logical page offset of this page.
1014 * @folio: The folio containing this page.
1015 * @page: The page which we need the offset of.
1016 *
1017 * For file pages, this is the offset from the beginning of the file
1018 * in units of PAGE_SIZE. For anonymous pages, this is the offset from
1019 * the beginning of the anon_vma in units of PAGE_SIZE. This will
1020 * return nonsense for KSM pages.
1021 *
1022 * Context: Caller must have a reference on the folio or otherwise
1023 * prevent it from being split or freed.
1024 *
1025 * Return: The offset in units of PAGE_SIZE.
1026 */
page_pgoff(const struct folio * folio,const struct page * page)1027 static inline pgoff_t page_pgoff(const struct folio *folio,
1028 const struct page *page)
1029 {
1030 return folio->index + folio_page_idx(folio, page);
1031 }
1032
1033 /**
1034 * folio_pos - Returns the byte position of this folio in its file.
1035 * @folio: The folio.
1036 */
folio_pos(const struct folio * folio)1037 static inline loff_t folio_pos(const struct folio *folio)
1038 {
1039 return ((loff_t)folio->index) * PAGE_SIZE;
1040 }
1041
1042 /*
1043 * Return byte-offset into filesystem object for page.
1044 */
page_offset(struct page * page)1045 static inline loff_t page_offset(struct page *page)
1046 {
1047 struct folio *folio = page_folio(page);
1048
1049 return folio_pos(folio) + folio_page_idx(folio, page) * PAGE_SIZE;
1050 }
1051
1052 /*
1053 * Get the offset in PAGE_SIZE (even for hugetlb folios).
1054 */
folio_pgoff(const struct folio * folio)1055 static inline pgoff_t folio_pgoff(const struct folio *folio)
1056 {
1057 return folio->index;
1058 }
1059
linear_page_index(const struct vm_area_struct * vma,const unsigned long address)1060 static inline pgoff_t linear_page_index(const struct vm_area_struct *vma,
1061 const unsigned long address)
1062 {
1063 pgoff_t pgoff;
1064 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
1065 pgoff += vma->vm_pgoff;
1066 return pgoff;
1067 }
1068
1069 struct wait_page_key {
1070 struct folio *folio;
1071 int bit_nr;
1072 int page_match;
1073 };
1074
1075 struct wait_page_queue {
1076 struct folio *folio;
1077 int bit_nr;
1078 wait_queue_entry_t wait;
1079 };
1080
wake_page_match(struct wait_page_queue * wait_page,struct wait_page_key * key)1081 static inline bool wake_page_match(struct wait_page_queue *wait_page,
1082 struct wait_page_key *key)
1083 {
1084 if (wait_page->folio != key->folio)
1085 return false;
1086 key->page_match = 1;
1087
1088 if (wait_page->bit_nr != key->bit_nr)
1089 return false;
1090
1091 return true;
1092 }
1093
1094 void __folio_lock(struct folio *folio);
1095 int __folio_lock_killable(struct folio *folio);
1096 vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf);
1097 void unlock_page(struct page *page);
1098 void folio_unlock(struct folio *folio);
1099
1100 /**
1101 * folio_trylock() - Attempt to lock a folio.
1102 * @folio: The folio to attempt to lock.
1103 *
1104 * Sometimes it is undesirable to wait for a folio to be unlocked (eg
1105 * when the locks are being taken in the wrong order, or if making
1106 * progress through a batch of folios is more important than processing
1107 * them in order). Usually folio_lock() is the correct function to call.
1108 *
1109 * Context: Any context.
1110 * Return: Whether the lock was successfully acquired.
1111 */
folio_trylock(struct folio * folio)1112 static inline bool folio_trylock(struct folio *folio)
1113 {
1114 return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0)));
1115 }
1116
1117 /*
1118 * Return true if the page was successfully locked
1119 */
trylock_page(struct page * page)1120 static inline bool trylock_page(struct page *page)
1121 {
1122 return folio_trylock(page_folio(page));
1123 }
1124
1125 /**
1126 * folio_lock() - Lock this folio.
1127 * @folio: The folio to lock.
1128 *
1129 * The folio lock protects against many things, probably more than it
1130 * should. It is primarily held while a folio is being brought uptodate,
1131 * either from its backing file or from swap. It is also held while a
1132 * folio is being truncated from its address_space, so holding the lock
1133 * is sufficient to keep folio->mapping stable.
1134 *
1135 * The folio lock is also held while write() is modifying the page to
1136 * provide POSIX atomicity guarantees (as long as the write does not
1137 * cross a page boundary). Other modifications to the data in the folio
1138 * do not hold the folio lock and can race with writes, eg DMA and stores
1139 * to mapped pages.
1140 *
1141 * Context: May sleep. If you need to acquire the locks of two or
1142 * more folios, they must be in order of ascending index, if they are
1143 * in the same address_space. If they are in different address_spaces,
1144 * acquire the lock of the folio which belongs to the address_space which
1145 * has the lowest address in memory first.
1146 */
folio_lock(struct folio * folio)1147 static inline void folio_lock(struct folio *folio)
1148 {
1149 might_sleep();
1150 if (!folio_trylock(folio))
1151 __folio_lock(folio);
1152 }
1153
1154 /**
1155 * lock_page() - Lock the folio containing this page.
1156 * @page: The page to lock.
1157 *
1158 * See folio_lock() for a description of what the lock protects.
1159 * This is a legacy function and new code should probably use folio_lock()
1160 * instead.
1161 *
1162 * Context: May sleep. Pages in the same folio share a lock, so do not
1163 * attempt to lock two pages which share a folio.
1164 */
lock_page(struct page * page)1165 static inline void lock_page(struct page *page)
1166 {
1167 struct folio *folio;
1168 might_sleep();
1169
1170 folio = page_folio(page);
1171 if (!folio_trylock(folio))
1172 __folio_lock(folio);
1173 }
1174
1175 /**
1176 * folio_lock_killable() - Lock this folio, interruptible by a fatal signal.
1177 * @folio: The folio to lock.
1178 *
1179 * Attempts to lock the folio, like folio_lock(), except that the sleep
1180 * to acquire the lock is interruptible by a fatal signal.
1181 *
1182 * Context: May sleep; see folio_lock().
1183 * Return: 0 if the lock was acquired; -EINTR if a fatal signal was received.
1184 */
folio_lock_killable(struct folio * folio)1185 static inline int folio_lock_killable(struct folio *folio)
1186 {
1187 might_sleep();
1188 if (!folio_trylock(folio))
1189 return __folio_lock_killable(folio);
1190 return 0;
1191 }
1192
1193 /*
1194 * folio_lock_or_retry - Lock the folio, unless this would block and the
1195 * caller indicated that it can handle a retry.
1196 *
1197 * Return value and mmap_lock implications depend on flags; see
1198 * __folio_lock_or_retry().
1199 */
folio_lock_or_retry(struct folio * folio,struct vm_fault * vmf)1200 static inline vm_fault_t folio_lock_or_retry(struct folio *folio,
1201 struct vm_fault *vmf)
1202 {
1203 might_sleep();
1204 if (!folio_trylock(folio))
1205 return __folio_lock_or_retry(folio, vmf);
1206 return 0;
1207 }
1208
1209 /*
1210 * This is exported only for folio_wait_locked/folio_wait_writeback, etc.,
1211 * and should not be used directly.
1212 */
1213 void folio_wait_bit(struct folio *folio, int bit_nr);
1214 int folio_wait_bit_killable(struct folio *folio, int bit_nr);
1215
1216 /*
1217 * Wait for a folio to be unlocked.
1218 *
1219 * This must be called with the caller "holding" the folio,
1220 * ie with increased folio reference count so that the folio won't
1221 * go away during the wait.
1222 */
folio_wait_locked(struct folio * folio)1223 static inline void folio_wait_locked(struct folio *folio)
1224 {
1225 if (folio_test_locked(folio))
1226 folio_wait_bit(folio, PG_locked);
1227 }
1228
folio_wait_locked_killable(struct folio * folio)1229 static inline int folio_wait_locked_killable(struct folio *folio)
1230 {
1231 if (!folio_test_locked(folio))
1232 return 0;
1233 return folio_wait_bit_killable(folio, PG_locked);
1234 }
1235
1236 void folio_end_read(struct folio *folio, bool success);
1237 void wait_on_page_writeback(struct page *page);
1238 void folio_wait_writeback(struct folio *folio);
1239 int folio_wait_writeback_killable(struct folio *folio);
1240 void end_page_writeback(struct page *page);
1241 void folio_end_writeback(struct folio *folio);
1242 void folio_end_writeback_no_dropbehind(struct folio *folio);
1243 void folio_end_dropbehind(struct folio *folio);
1244 void folio_wait_stable(struct folio *folio);
1245 void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn);
1246 void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb);
1247 void __folio_cancel_dirty(struct folio *folio);
folio_cancel_dirty(struct folio * folio)1248 static inline void folio_cancel_dirty(struct folio *folio)
1249 {
1250 /* Avoid atomic ops, locking, etc. when not actually needed. */
1251 if (folio_test_dirty(folio))
1252 __folio_cancel_dirty(folio);
1253 }
1254 bool folio_clear_dirty_for_io(struct folio *folio);
1255 bool clear_page_dirty_for_io(struct page *page);
1256 void folio_invalidate(struct folio *folio, size_t offset, size_t length);
1257 bool noop_dirty_folio(struct address_space *mapping, struct folio *folio);
1258
1259 #ifdef CONFIG_MIGRATION
1260 int filemap_migrate_folio(struct address_space *mapping, struct folio *dst,
1261 struct folio *src, enum migrate_mode mode);
1262 #else
1263 #define filemap_migrate_folio NULL
1264 #endif
1265 void folio_end_private_2(struct folio *folio);
1266 void folio_wait_private_2(struct folio *folio);
1267 int folio_wait_private_2_killable(struct folio *folio);
1268
1269 /*
1270 * Fault in userspace address range.
1271 */
1272 size_t fault_in_writeable(char __user *uaddr, size_t size);
1273 size_t fault_in_subpage_writeable(char __user *uaddr, size_t size);
1274 size_t fault_in_safe_writeable(const char __user *uaddr, size_t size);
1275 size_t fault_in_readable(const char __user *uaddr, size_t size);
1276
1277 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
1278 pgoff_t index, gfp_t gfp);
1279 int filemap_add_folio(struct address_space *mapping, struct folio *folio,
1280 pgoff_t index, gfp_t gfp);
1281 void filemap_remove_folio(struct folio *folio);
1282 void __filemap_remove_folio(struct folio *folio, void *shadow);
1283 void replace_page_cache_folio(struct folio *old, struct folio *new);
1284 void delete_from_page_cache_batch(struct address_space *mapping,
1285 struct folio_batch *fbatch);
1286 bool filemap_release_folio(struct folio *folio, gfp_t gfp);
1287 loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,
1288 int whence);
1289
1290 /* Must be non-static for BPF error injection */
1291 int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
1292 pgoff_t index, gfp_t gfp, void **shadowp);
1293
1294 bool filemap_range_has_writeback(struct address_space *mapping,
1295 loff_t start_byte, loff_t end_byte);
1296
1297 /**
1298 * filemap_range_needs_writeback - check if range potentially needs writeback
1299 * @mapping: address space within which to check
1300 * @start_byte: offset in bytes where the range starts
1301 * @end_byte: offset in bytes where the range ends (inclusive)
1302 *
1303 * Find at least one page in the range supplied, usually used to check if
1304 * direct writing in this range will trigger a writeback. Used by O_DIRECT
1305 * read/write with IOCB_NOWAIT, to see if the caller needs to do
1306 * filemap_write_and_wait_range() before proceeding.
1307 *
1308 * Return: %true if the caller should do filemap_write_and_wait_range() before
1309 * doing O_DIRECT to a page in this range, %false otherwise.
1310 */
filemap_range_needs_writeback(struct address_space * mapping,loff_t start_byte,loff_t end_byte)1311 static inline bool filemap_range_needs_writeback(struct address_space *mapping,
1312 loff_t start_byte,
1313 loff_t end_byte)
1314 {
1315 if (!mapping->nrpages)
1316 return false;
1317 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
1318 !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
1319 return false;
1320 return filemap_range_has_writeback(mapping, start_byte, end_byte);
1321 }
1322
1323 /**
1324 * struct readahead_control - Describes a readahead request.
1325 *
1326 * A readahead request is for consecutive pages. Filesystems which
1327 * implement the ->readahead method should call readahead_folio() or
1328 * __readahead_batch() in a loop and attempt to start reads into each
1329 * folio in the request.
1330 *
1331 * Most of the fields in this struct are private and should be accessed
1332 * by the functions below.
1333 *
1334 * @file: The file, used primarily by network filesystems for authentication.
1335 * May be NULL if invoked internally by the filesystem.
1336 * @mapping: Readahead this filesystem object.
1337 * @ra: File readahead state. May be NULL.
1338 */
1339 struct readahead_control {
1340 struct file *file;
1341 struct address_space *mapping;
1342 struct file_ra_state *ra;
1343 /* private: use the readahead_* accessors instead */
1344 pgoff_t _index;
1345 unsigned int _nr_pages;
1346 unsigned int _batch_count;
1347 bool dropbehind;
1348 bool _workingset;
1349 unsigned long _pflags;
1350 };
1351
1352 #define DEFINE_READAHEAD(ractl, f, r, m, i) \
1353 struct readahead_control ractl = { \
1354 .file = f, \
1355 .mapping = m, \
1356 .ra = r, \
1357 ._index = i, \
1358 }
1359
1360 #define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
1361
1362 void page_cache_ra_unbounded(struct readahead_control *,
1363 unsigned long nr_to_read, unsigned long lookahead_count);
1364 void page_cache_sync_ra(struct readahead_control *, unsigned long req_count);
1365 void page_cache_async_ra(struct readahead_control *, struct folio *,
1366 unsigned long req_count);
1367 void readahead_expand(struct readahead_control *ractl,
1368 loff_t new_start, size_t new_len);
1369
1370 /**
1371 * page_cache_sync_readahead - generic file readahead
1372 * @mapping: address_space which holds the pagecache and I/O vectors
1373 * @ra: file_ra_state which holds the readahead state
1374 * @file: Used by the filesystem for authentication.
1375 * @index: Index of first page to be read.
1376 * @req_count: Total number of pages being read by the caller.
1377 *
1378 * page_cache_sync_readahead() should be called when a cache miss happened:
1379 * it will submit the read. The readahead logic may decide to piggyback more
1380 * pages onto the read request if access patterns suggest it will improve
1381 * performance.
1382 */
1383 static inline
page_cache_sync_readahead(struct address_space * mapping,struct file_ra_state * ra,struct file * file,pgoff_t index,unsigned long req_count)1384 void page_cache_sync_readahead(struct address_space *mapping,
1385 struct file_ra_state *ra, struct file *file, pgoff_t index,
1386 unsigned long req_count)
1387 {
1388 DEFINE_READAHEAD(ractl, file, ra, mapping, index);
1389 page_cache_sync_ra(&ractl, req_count);
1390 }
1391
1392 /**
1393 * page_cache_async_readahead - file readahead for marked pages
1394 * @mapping: address_space which holds the pagecache and I/O vectors
1395 * @ra: file_ra_state which holds the readahead state
1396 * @file: Used by the filesystem for authentication.
1397 * @folio: The folio which triggered the readahead call.
1398 * @req_count: Total number of pages being read by the caller.
1399 *
1400 * page_cache_async_readahead() should be called when a page is used which
1401 * is marked as PageReadahead; this is a marker to suggest that the application
1402 * has used up enough of the readahead window that we should start pulling in
1403 * more pages.
1404 */
1405 static inline
page_cache_async_readahead(struct address_space * mapping,struct file_ra_state * ra,struct file * file,struct folio * folio,unsigned long req_count)1406 void page_cache_async_readahead(struct address_space *mapping,
1407 struct file_ra_state *ra, struct file *file,
1408 struct folio *folio, unsigned long req_count)
1409 {
1410 DEFINE_READAHEAD(ractl, file, ra, mapping, folio->index);
1411 page_cache_async_ra(&ractl, folio, req_count);
1412 }
1413
__readahead_folio(struct readahead_control * ractl)1414 static inline struct folio *__readahead_folio(struct readahead_control *ractl)
1415 {
1416 struct folio *folio;
1417
1418 BUG_ON(ractl->_batch_count > ractl->_nr_pages);
1419 ractl->_nr_pages -= ractl->_batch_count;
1420 ractl->_index += ractl->_batch_count;
1421
1422 if (!ractl->_nr_pages) {
1423 ractl->_batch_count = 0;
1424 return NULL;
1425 }
1426
1427 folio = xa_load(&ractl->mapping->i_pages, ractl->_index);
1428 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1429 ractl->_batch_count = folio_nr_pages(folio);
1430
1431 return folio;
1432 }
1433
1434 /**
1435 * readahead_folio - Get the next folio to read.
1436 * @ractl: The current readahead request.
1437 *
1438 * Context: The folio is locked. The caller should unlock the folio once
1439 * all I/O to that folio has completed.
1440 * Return: A pointer to the next folio, or %NULL if we are done.
1441 */
readahead_folio(struct readahead_control * ractl)1442 static inline struct folio *readahead_folio(struct readahead_control *ractl)
1443 {
1444 struct folio *folio = __readahead_folio(ractl);
1445
1446 if (folio)
1447 folio_put(folio);
1448 return folio;
1449 }
1450
__readahead_batch(struct readahead_control * rac,struct page ** array,unsigned int array_sz)1451 static inline unsigned int __readahead_batch(struct readahead_control *rac,
1452 struct page **array, unsigned int array_sz)
1453 {
1454 unsigned int i = 0;
1455 XA_STATE(xas, &rac->mapping->i_pages, 0);
1456 struct folio *folio;
1457
1458 BUG_ON(rac->_batch_count > rac->_nr_pages);
1459 rac->_nr_pages -= rac->_batch_count;
1460 rac->_index += rac->_batch_count;
1461 rac->_batch_count = 0;
1462
1463 xas_set(&xas, rac->_index);
1464 rcu_read_lock();
1465 xas_for_each(&xas, folio, rac->_index + rac->_nr_pages - 1) {
1466 if (xas_retry(&xas, folio))
1467 continue;
1468 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1469 array[i++] = folio_page(folio, 0);
1470 rac->_batch_count += folio_nr_pages(folio);
1471 if (i == array_sz)
1472 break;
1473 }
1474 rcu_read_unlock();
1475
1476 return i;
1477 }
1478
1479 /**
1480 * readahead_pos - The byte offset into the file of this readahead request.
1481 * @rac: The readahead request.
1482 */
readahead_pos(const struct readahead_control * rac)1483 static inline loff_t readahead_pos(const struct readahead_control *rac)
1484 {
1485 return (loff_t)rac->_index * PAGE_SIZE;
1486 }
1487
1488 /**
1489 * readahead_length - The number of bytes in this readahead request.
1490 * @rac: The readahead request.
1491 */
readahead_length(const struct readahead_control * rac)1492 static inline size_t readahead_length(const struct readahead_control *rac)
1493 {
1494 return rac->_nr_pages * PAGE_SIZE;
1495 }
1496
1497 /**
1498 * readahead_index - The index of the first page in this readahead request.
1499 * @rac: The readahead request.
1500 */
readahead_index(const struct readahead_control * rac)1501 static inline pgoff_t readahead_index(const struct readahead_control *rac)
1502 {
1503 return rac->_index;
1504 }
1505
1506 /**
1507 * readahead_count - The number of pages in this readahead request.
1508 * @rac: The readahead request.
1509 */
readahead_count(const struct readahead_control * rac)1510 static inline unsigned int readahead_count(const struct readahead_control *rac)
1511 {
1512 return rac->_nr_pages;
1513 }
1514
1515 /**
1516 * readahead_batch_length - The number of bytes in the current batch.
1517 * @rac: The readahead request.
1518 */
readahead_batch_length(const struct readahead_control * rac)1519 static inline size_t readahead_batch_length(const struct readahead_control *rac)
1520 {
1521 return rac->_batch_count * PAGE_SIZE;
1522 }
1523
dir_pages(const struct inode * inode)1524 static inline unsigned long dir_pages(const struct inode *inode)
1525 {
1526 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
1527 PAGE_SHIFT;
1528 }
1529
1530 /**
1531 * folio_mkwrite_check_truncate - check if folio was truncated
1532 * @folio: the folio to check
1533 * @inode: the inode to check the folio against
1534 *
1535 * Return: the number of bytes in the folio up to EOF,
1536 * or -EFAULT if the folio was truncated.
1537 */
folio_mkwrite_check_truncate(const struct folio * folio,const struct inode * inode)1538 static inline ssize_t folio_mkwrite_check_truncate(const struct folio *folio,
1539 const struct inode *inode)
1540 {
1541 loff_t size = i_size_read(inode);
1542 pgoff_t index = size >> PAGE_SHIFT;
1543 size_t offset = offset_in_folio(folio, size);
1544
1545 if (!folio->mapping)
1546 return -EFAULT;
1547
1548 /* folio is wholly inside EOF */
1549 if (folio_next_index(folio) - 1 < index)
1550 return folio_size(folio);
1551 /* folio is wholly past EOF */
1552 if (folio->index > index || !offset)
1553 return -EFAULT;
1554 /* folio is partially inside EOF */
1555 return offset;
1556 }
1557
1558 /**
1559 * i_blocks_per_folio - How many blocks fit in this folio.
1560 * @inode: The inode which contains the blocks.
1561 * @folio: The folio.
1562 *
1563 * If the block size is larger than the size of this folio, return zero.
1564 *
1565 * Context: The caller should hold a refcount on the folio to prevent it
1566 * from being split.
1567 * Return: The number of filesystem blocks covered by this folio.
1568 */
1569 static inline
i_blocks_per_folio(const struct inode * inode,const struct folio * folio)1570 unsigned int i_blocks_per_folio(const struct inode *inode,
1571 const struct folio *folio)
1572 {
1573 return folio_size(folio) >> inode->i_blkbits;
1574 }
1575 #endif /* _LINUX_PAGEMAP_H */
1576