1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_PAGEMAP_H 3 #define _LINUX_PAGEMAP_H 4 5 /* 6 * Copyright 1995 Linus Torvalds 7 */ 8 #include <linux/mm.h> 9 #include <linux/fs.h> 10 #include <linux/list.h> 11 #include <linux/highmem.h> 12 #include <linux/compiler.h> 13 #include <linux/uaccess.h> 14 #include <linux/gfp.h> 15 #include <linux/bitops.h> 16 #include <linux/hardirq.h> /* for in_interrupt() */ 17 #include <linux/hugetlb_inline.h> 18 19 struct folio_batch; 20 21 unsigned long invalidate_mapping_pages(struct address_space *mapping, 22 pgoff_t start, pgoff_t end); 23 24 static inline void invalidate_remote_inode(struct inode *inode) 25 { 26 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 27 S_ISLNK(inode->i_mode)) 28 invalidate_mapping_pages(inode->i_mapping, 0, -1); 29 } 30 int invalidate_inode_pages2(struct address_space *mapping); 31 int invalidate_inode_pages2_range(struct address_space *mapping, 32 pgoff_t start, pgoff_t end); 33 int kiocb_invalidate_pages(struct kiocb *iocb, size_t count); 34 void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count); 35 36 int write_inode_now(struct inode *, int sync); 37 int filemap_fdatawrite(struct address_space *); 38 int filemap_flush(struct address_space *); 39 int filemap_fdatawait_keep_errors(struct address_space *mapping); 40 int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend); 41 int filemap_fdatawait_range_keep_errors(struct address_space *mapping, 42 loff_t start_byte, loff_t end_byte); 43 int filemap_invalidate_inode(struct inode *inode, bool flush, 44 loff_t start, loff_t end); 45 46 static inline int filemap_fdatawait(struct address_space *mapping) 47 { 48 return filemap_fdatawait_range(mapping, 0, LLONG_MAX); 49 } 50 51 bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend); 52 int filemap_write_and_wait_range(struct address_space *mapping, 53 loff_t lstart, loff_t lend); 54 int __filemap_fdatawrite_range(struct address_space *mapping, 55 loff_t start, loff_t end, int sync_mode); 56 int filemap_fdatawrite_range(struct address_space *mapping, 57 loff_t start, loff_t end); 58 int filemap_check_errors(struct address_space *mapping); 59 void __filemap_set_wb_err(struct address_space *mapping, int err); 60 int filemap_fdatawrite_wbc(struct address_space *mapping, 61 struct writeback_control *wbc); 62 int kiocb_write_and_wait(struct kiocb *iocb, size_t count); 63 64 static inline int filemap_write_and_wait(struct address_space *mapping) 65 { 66 return filemap_write_and_wait_range(mapping, 0, LLONG_MAX); 67 } 68 69 /** 70 * filemap_set_wb_err - set a writeback error on an address_space 71 * @mapping: mapping in which to set writeback error 72 * @err: error to be set in mapping 73 * 74 * When writeback fails in some way, we must record that error so that 75 * userspace can be informed when fsync and the like are called. We endeavor 76 * to report errors on any file that was open at the time of the error. Some 77 * internal callers also need to know when writeback errors have occurred. 78 * 79 * When a writeback error occurs, most filesystems will want to call 80 * filemap_set_wb_err to record the error in the mapping so that it will be 81 * automatically reported whenever fsync is called on the file. 82 */ 83 static inline void filemap_set_wb_err(struct address_space *mapping, int err) 84 { 85 /* Fastpath for common case of no error */ 86 if (unlikely(err)) 87 __filemap_set_wb_err(mapping, err); 88 } 89 90 /** 91 * filemap_check_wb_err - has an error occurred since the mark was sampled? 92 * @mapping: mapping to check for writeback errors 93 * @since: previously-sampled errseq_t 94 * 95 * Grab the errseq_t value from the mapping, and see if it has changed "since" 96 * the given value was sampled. 97 * 98 * If it has then report the latest error set, otherwise return 0. 99 */ 100 static inline int filemap_check_wb_err(struct address_space *mapping, 101 errseq_t since) 102 { 103 return errseq_check(&mapping->wb_err, since); 104 } 105 106 /** 107 * filemap_sample_wb_err - sample the current errseq_t to test for later errors 108 * @mapping: mapping to be sampled 109 * 110 * Writeback errors are always reported relative to a particular sample point 111 * in the past. This function provides those sample points. 112 */ 113 static inline errseq_t filemap_sample_wb_err(struct address_space *mapping) 114 { 115 return errseq_sample(&mapping->wb_err); 116 } 117 118 /** 119 * file_sample_sb_err - sample the current errseq_t to test for later errors 120 * @file: file pointer to be sampled 121 * 122 * Grab the most current superblock-level errseq_t value for the given 123 * struct file. 124 */ 125 static inline errseq_t file_sample_sb_err(struct file *file) 126 { 127 return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err); 128 } 129 130 /* 131 * Flush file data before changing attributes. Caller must hold any locks 132 * required to prevent further writes to this file until we're done setting 133 * flags. 134 */ 135 static inline int inode_drain_writes(struct inode *inode) 136 { 137 inode_dio_wait(inode); 138 return filemap_write_and_wait(inode->i_mapping); 139 } 140 141 static inline bool mapping_empty(struct address_space *mapping) 142 { 143 return xa_empty(&mapping->i_pages); 144 } 145 146 /* 147 * mapping_shrinkable - test if page cache state allows inode reclaim 148 * @mapping: the page cache mapping 149 * 150 * This checks the mapping's cache state for the pupose of inode 151 * reclaim and LRU management. 152 * 153 * The caller is expected to hold the i_lock, but is not required to 154 * hold the i_pages lock, which usually protects cache state. That's 155 * because the i_lock and the list_lru lock that protect the inode and 156 * its LRU state don't nest inside the irq-safe i_pages lock. 157 * 158 * Cache deletions are performed under the i_lock, which ensures that 159 * when an inode goes empty, it will reliably get queued on the LRU. 160 * 161 * Cache additions do not acquire the i_lock and may race with this 162 * check, in which case we'll report the inode as shrinkable when it 163 * has cache pages. This is okay: the shrinker also checks the 164 * refcount and the referenced bit, which will be elevated or set in 165 * the process of adding new cache pages to an inode. 166 */ 167 static inline bool mapping_shrinkable(struct address_space *mapping) 168 { 169 void *head; 170 171 /* 172 * On highmem systems, there could be lowmem pressure from the 173 * inodes before there is highmem pressure from the page 174 * cache. Make inodes shrinkable regardless of cache state. 175 */ 176 if (IS_ENABLED(CONFIG_HIGHMEM)) 177 return true; 178 179 /* Cache completely empty? Shrink away. */ 180 head = rcu_access_pointer(mapping->i_pages.xa_head); 181 if (!head) 182 return true; 183 184 /* 185 * The xarray stores single offset-0 entries directly in the 186 * head pointer, which allows non-resident page cache entries 187 * to escape the shadow shrinker's list of xarray nodes. The 188 * inode shrinker needs to pick them up under memory pressure. 189 */ 190 if (!xa_is_node(head) && xa_is_value(head)) 191 return true; 192 193 return false; 194 } 195 196 /* 197 * Bits in mapping->flags. 198 */ 199 enum mapping_flags { 200 AS_EIO = 0, /* IO error on async write */ 201 AS_ENOSPC = 1, /* ENOSPC on async write */ 202 AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */ 203 AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */ 204 AS_EXITING = 4, /* final truncate in progress */ 205 /* writeback related tags are not used */ 206 AS_NO_WRITEBACK_TAGS = 5, 207 AS_LARGE_FOLIO_SUPPORT = 6, 208 AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */ 209 AS_STABLE_WRITES, /* must wait for writeback before modifying 210 folio contents */ 211 AS_UNMOVABLE, /* The mapping cannot be moved, ever */ 212 }; 213 214 /** 215 * mapping_set_error - record a writeback error in the address_space 216 * @mapping: the mapping in which an error should be set 217 * @error: the error to set in the mapping 218 * 219 * When writeback fails in some way, we must record that error so that 220 * userspace can be informed when fsync and the like are called. We endeavor 221 * to report errors on any file that was open at the time of the error. Some 222 * internal callers also need to know when writeback errors have occurred. 223 * 224 * When a writeback error occurs, most filesystems will want to call 225 * mapping_set_error to record the error in the mapping so that it can be 226 * reported when the application calls fsync(2). 227 */ 228 static inline void mapping_set_error(struct address_space *mapping, int error) 229 { 230 if (likely(!error)) 231 return; 232 233 /* Record in wb_err for checkers using errseq_t based tracking */ 234 __filemap_set_wb_err(mapping, error); 235 236 /* Record it in superblock */ 237 if (mapping->host) 238 errseq_set(&mapping->host->i_sb->s_wb_err, error); 239 240 /* Record it in flags for now, for legacy callers */ 241 if (error == -ENOSPC) 242 set_bit(AS_ENOSPC, &mapping->flags); 243 else 244 set_bit(AS_EIO, &mapping->flags); 245 } 246 247 static inline void mapping_set_unevictable(struct address_space *mapping) 248 { 249 set_bit(AS_UNEVICTABLE, &mapping->flags); 250 } 251 252 static inline void mapping_clear_unevictable(struct address_space *mapping) 253 { 254 clear_bit(AS_UNEVICTABLE, &mapping->flags); 255 } 256 257 static inline bool mapping_unevictable(struct address_space *mapping) 258 { 259 return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); 260 } 261 262 static inline void mapping_set_exiting(struct address_space *mapping) 263 { 264 set_bit(AS_EXITING, &mapping->flags); 265 } 266 267 static inline int mapping_exiting(struct address_space *mapping) 268 { 269 return test_bit(AS_EXITING, &mapping->flags); 270 } 271 272 static inline void mapping_set_no_writeback_tags(struct address_space *mapping) 273 { 274 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); 275 } 276 277 static inline int mapping_use_writeback_tags(struct address_space *mapping) 278 { 279 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); 280 } 281 282 static inline bool mapping_release_always(const struct address_space *mapping) 283 { 284 return test_bit(AS_RELEASE_ALWAYS, &mapping->flags); 285 } 286 287 static inline void mapping_set_release_always(struct address_space *mapping) 288 { 289 set_bit(AS_RELEASE_ALWAYS, &mapping->flags); 290 } 291 292 static inline void mapping_clear_release_always(struct address_space *mapping) 293 { 294 clear_bit(AS_RELEASE_ALWAYS, &mapping->flags); 295 } 296 297 static inline bool mapping_stable_writes(const struct address_space *mapping) 298 { 299 return test_bit(AS_STABLE_WRITES, &mapping->flags); 300 } 301 302 static inline void mapping_set_stable_writes(struct address_space *mapping) 303 { 304 set_bit(AS_STABLE_WRITES, &mapping->flags); 305 } 306 307 static inline void mapping_clear_stable_writes(struct address_space *mapping) 308 { 309 clear_bit(AS_STABLE_WRITES, &mapping->flags); 310 } 311 312 static inline void mapping_set_unmovable(struct address_space *mapping) 313 { 314 /* 315 * It's expected unmovable mappings are also unevictable. Compaction 316 * migrate scanner (isolate_migratepages_block()) relies on this to 317 * reduce page locking. 318 */ 319 set_bit(AS_UNEVICTABLE, &mapping->flags); 320 set_bit(AS_UNMOVABLE, &mapping->flags); 321 } 322 323 static inline bool mapping_unmovable(struct address_space *mapping) 324 { 325 return test_bit(AS_UNMOVABLE, &mapping->flags); 326 } 327 328 static inline gfp_t mapping_gfp_mask(struct address_space * mapping) 329 { 330 return mapping->gfp_mask; 331 } 332 333 /* Restricts the given gfp_mask to what the mapping allows. */ 334 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, 335 gfp_t gfp_mask) 336 { 337 return mapping_gfp_mask(mapping) & gfp_mask; 338 } 339 340 /* 341 * This is non-atomic. Only to be used before the mapping is activated. 342 * Probably needs a barrier... 343 */ 344 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) 345 { 346 m->gfp_mask = mask; 347 } 348 349 /* 350 * There are some parts of the kernel which assume that PMD entries 351 * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then, 352 * limit the maximum allocation order to PMD size. I'm not aware of any 353 * assumptions about maximum order if THP are disabled, but 8 seems like 354 * a good order (that's 1MB if you're using 4kB pages) 355 */ 356 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 357 #define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER 358 #else 359 #define MAX_PAGECACHE_ORDER 8 360 #endif 361 362 /** 363 * mapping_set_large_folios() - Indicate the file supports large folios. 364 * @mapping: The file. 365 * 366 * The filesystem should call this function in its inode constructor to 367 * indicate that the VFS can use large folios to cache the contents of 368 * the file. 369 * 370 * Context: This should not be called while the inode is active as it 371 * is non-atomic. 372 */ 373 static inline void mapping_set_large_folios(struct address_space *mapping) 374 { 375 __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); 376 } 377 378 /* 379 * Large folio support currently depends on THP. These dependencies are 380 * being worked on but are not yet fixed. 381 */ 382 static inline bool mapping_large_folio_support(struct address_space *mapping) 383 { 384 /* AS_LARGE_FOLIO_SUPPORT is only reasonable for pagecache folios */ 385 VM_WARN_ONCE((unsigned long)mapping & PAGE_MAPPING_ANON, 386 "Anonymous mapping always supports large folio"); 387 388 return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 389 test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); 390 } 391 392 /* Return the maximum folio size for this pagecache mapping, in bytes. */ 393 static inline size_t mapping_max_folio_size(struct address_space *mapping) 394 { 395 if (mapping_large_folio_support(mapping)) 396 return PAGE_SIZE << MAX_PAGECACHE_ORDER; 397 return PAGE_SIZE; 398 } 399 400 static inline int filemap_nr_thps(struct address_space *mapping) 401 { 402 #ifdef CONFIG_READ_ONLY_THP_FOR_FS 403 return atomic_read(&mapping->nr_thps); 404 #else 405 return 0; 406 #endif 407 } 408 409 static inline void filemap_nr_thps_inc(struct address_space *mapping) 410 { 411 #ifdef CONFIG_READ_ONLY_THP_FOR_FS 412 if (!mapping_large_folio_support(mapping)) 413 atomic_inc(&mapping->nr_thps); 414 #else 415 WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0); 416 #endif 417 } 418 419 static inline void filemap_nr_thps_dec(struct address_space *mapping) 420 { 421 #ifdef CONFIG_READ_ONLY_THP_FOR_FS 422 if (!mapping_large_folio_support(mapping)) 423 atomic_dec(&mapping->nr_thps); 424 #else 425 WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0); 426 #endif 427 } 428 429 struct address_space *page_mapping(struct page *); 430 struct address_space *folio_mapping(struct folio *); 431 struct address_space *swapcache_mapping(struct folio *); 432 433 /** 434 * folio_file_mapping - Find the mapping this folio belongs to. 435 * @folio: The folio. 436 * 437 * For folios which are in the page cache, return the mapping that this 438 * page belongs to. Folios in the swap cache return the mapping of the 439 * swap file or swap device where the data is stored. This is different 440 * from the mapping returned by folio_mapping(). The only reason to 441 * use it is if, like NFS, you return 0 from ->activate_swapfile. 442 * 443 * Do not call this for folios which aren't in the page cache or swap cache. 444 */ 445 static inline struct address_space *folio_file_mapping(struct folio *folio) 446 { 447 if (unlikely(folio_test_swapcache(folio))) 448 return swapcache_mapping(folio); 449 450 return folio->mapping; 451 } 452 453 /** 454 * folio_flush_mapping - Find the file mapping this folio belongs to. 455 * @folio: The folio. 456 * 457 * For folios which are in the page cache, return the mapping that this 458 * page belongs to. Anonymous folios return NULL, even if they're in 459 * the swap cache. Other kinds of folio also return NULL. 460 * 461 * This is ONLY used by architecture cache flushing code. If you aren't 462 * writing cache flushing code, you want either folio_mapping() or 463 * folio_file_mapping(). 464 */ 465 static inline struct address_space *folio_flush_mapping(struct folio *folio) 466 { 467 if (unlikely(folio_test_swapcache(folio))) 468 return NULL; 469 470 return folio_mapping(folio); 471 } 472 473 static inline struct address_space *page_file_mapping(struct page *page) 474 { 475 return folio_file_mapping(page_folio(page)); 476 } 477 478 /** 479 * folio_inode - Get the host inode for this folio. 480 * @folio: The folio. 481 * 482 * For folios which are in the page cache, return the inode that this folio 483 * belongs to. 484 * 485 * Do not call this for folios which aren't in the page cache. 486 */ 487 static inline struct inode *folio_inode(struct folio *folio) 488 { 489 return folio->mapping->host; 490 } 491 492 /** 493 * folio_attach_private - Attach private data to a folio. 494 * @folio: Folio to attach data to. 495 * @data: Data to attach to folio. 496 * 497 * Attaching private data to a folio increments the page's reference count. 498 * The data must be detached before the folio will be freed. 499 */ 500 static inline void folio_attach_private(struct folio *folio, void *data) 501 { 502 folio_get(folio); 503 folio->private = data; 504 folio_set_private(folio); 505 } 506 507 /** 508 * folio_change_private - Change private data on a folio. 509 * @folio: Folio to change the data on. 510 * @data: Data to set on the folio. 511 * 512 * Change the private data attached to a folio and return the old 513 * data. The page must previously have had data attached and the data 514 * must be detached before the folio will be freed. 515 * 516 * Return: Data that was previously attached to the folio. 517 */ 518 static inline void *folio_change_private(struct folio *folio, void *data) 519 { 520 void *old = folio_get_private(folio); 521 522 folio->private = data; 523 return old; 524 } 525 526 /** 527 * folio_detach_private - Detach private data from a folio. 528 * @folio: Folio to detach data from. 529 * 530 * Removes the data that was previously attached to the folio and decrements 531 * the refcount on the page. 532 * 533 * Return: Data that was attached to the folio. 534 */ 535 static inline void *folio_detach_private(struct folio *folio) 536 { 537 void *data = folio_get_private(folio); 538 539 if (!folio_test_private(folio)) 540 return NULL; 541 folio_clear_private(folio); 542 folio->private = NULL; 543 folio_put(folio); 544 545 return data; 546 } 547 548 static inline void attach_page_private(struct page *page, void *data) 549 { 550 folio_attach_private(page_folio(page), data); 551 } 552 553 static inline void *detach_page_private(struct page *page) 554 { 555 return folio_detach_private(page_folio(page)); 556 } 557 558 #ifdef CONFIG_NUMA 559 struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order); 560 #else 561 static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) 562 { 563 return folio_alloc_noprof(gfp, order); 564 } 565 #endif 566 567 #define filemap_alloc_folio(...) \ 568 alloc_hooks(filemap_alloc_folio_noprof(__VA_ARGS__)) 569 570 static inline struct page *__page_cache_alloc(gfp_t gfp) 571 { 572 return &filemap_alloc_folio(gfp, 0)->page; 573 } 574 575 static inline gfp_t readahead_gfp_mask(struct address_space *x) 576 { 577 return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN; 578 } 579 580 typedef int filler_t(struct file *, struct folio *); 581 582 pgoff_t page_cache_next_miss(struct address_space *mapping, 583 pgoff_t index, unsigned long max_scan); 584 pgoff_t page_cache_prev_miss(struct address_space *mapping, 585 pgoff_t index, unsigned long max_scan); 586 587 /** 588 * typedef fgf_t - Flags for getting folios from the page cache. 589 * 590 * Most users of the page cache will not need to use these flags; 591 * there are convenience functions such as filemap_get_folio() and 592 * filemap_lock_folio(). For users which need more control over exactly 593 * what is done with the folios, these flags to __filemap_get_folio() 594 * are available. 595 * 596 * * %FGP_ACCESSED - The folio will be marked accessed. 597 * * %FGP_LOCK - The folio is returned locked. 598 * * %FGP_CREAT - If no folio is present then a new folio is allocated, 599 * added to the page cache and the VM's LRU list. The folio is 600 * returned locked. 601 * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the 602 * folio is already in cache. If the folio was allocated, unlock it 603 * before returning so the caller can do the same dance. 604 * * %FGP_WRITE - The folio will be written to by the caller. 605 * * %FGP_NOFS - __GFP_FS will get cleared in gfp. 606 * * %FGP_NOWAIT - Don't block on the folio lock. 607 * * %FGP_STABLE - Wait for the folio to be stable (finished writeback) 608 * * %FGP_WRITEBEGIN - The flags to use in a filesystem write_begin() 609 * implementation. 610 */ 611 typedef unsigned int __bitwise fgf_t; 612 613 #define FGP_ACCESSED ((__force fgf_t)0x00000001) 614 #define FGP_LOCK ((__force fgf_t)0x00000002) 615 #define FGP_CREAT ((__force fgf_t)0x00000004) 616 #define FGP_WRITE ((__force fgf_t)0x00000008) 617 #define FGP_NOFS ((__force fgf_t)0x00000010) 618 #define FGP_NOWAIT ((__force fgf_t)0x00000020) 619 #define FGP_FOR_MMAP ((__force fgf_t)0x00000040) 620 #define FGP_STABLE ((__force fgf_t)0x00000080) 621 #define FGF_GET_ORDER(fgf) (((__force unsigned)fgf) >> 26) /* top 6 bits */ 622 623 #define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE) 624 625 /** 626 * fgf_set_order - Encode a length in the fgf_t flags. 627 * @size: The suggested size of the folio to create. 628 * 629 * The caller of __filemap_get_folio() can use this to suggest a preferred 630 * size for the folio that is created. If there is already a folio at 631 * the index, it will be returned, no matter what its size. If a folio 632 * is freshly created, it may be of a different size than requested 633 * due to alignment constraints, memory pressure, or the presence of 634 * other folios at nearby indices. 635 */ 636 static inline fgf_t fgf_set_order(size_t size) 637 { 638 unsigned int shift = ilog2(size); 639 640 if (shift <= PAGE_SHIFT) 641 return 0; 642 return (__force fgf_t)((shift - PAGE_SHIFT) << 26); 643 } 644 645 void *filemap_get_entry(struct address_space *mapping, pgoff_t index); 646 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, 647 fgf_t fgp_flags, gfp_t gfp); 648 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, 649 fgf_t fgp_flags, gfp_t gfp); 650 651 /** 652 * filemap_get_folio - Find and get a folio. 653 * @mapping: The address_space to search. 654 * @index: The page index. 655 * 656 * Looks up the page cache entry at @mapping & @index. If a folio is 657 * present, it is returned with an increased refcount. 658 * 659 * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for 660 * this index. Will not return a shadow, swap or DAX entry. 661 */ 662 static inline struct folio *filemap_get_folio(struct address_space *mapping, 663 pgoff_t index) 664 { 665 return __filemap_get_folio(mapping, index, 0, 0); 666 } 667 668 /** 669 * filemap_lock_folio - Find and lock a folio. 670 * @mapping: The address_space to search. 671 * @index: The page index. 672 * 673 * Looks up the page cache entry at @mapping & @index. If a folio is 674 * present, it is returned locked with an increased refcount. 675 * 676 * Context: May sleep. 677 * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for 678 * this index. Will not return a shadow, swap or DAX entry. 679 */ 680 static inline struct folio *filemap_lock_folio(struct address_space *mapping, 681 pgoff_t index) 682 { 683 return __filemap_get_folio(mapping, index, FGP_LOCK, 0); 684 } 685 686 /** 687 * filemap_grab_folio - grab a folio from the page cache 688 * @mapping: The address space to search 689 * @index: The page index 690 * 691 * Looks up the page cache entry at @mapping & @index. If no folio is found, 692 * a new folio is created. The folio is locked, marked as accessed, and 693 * returned. 694 * 695 * Return: A found or created folio. ERR_PTR(-ENOMEM) if no folio is found 696 * and failed to create a folio. 697 */ 698 static inline struct folio *filemap_grab_folio(struct address_space *mapping, 699 pgoff_t index) 700 { 701 return __filemap_get_folio(mapping, index, 702 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, 703 mapping_gfp_mask(mapping)); 704 } 705 706 /** 707 * find_get_page - find and get a page reference 708 * @mapping: the address_space to search 709 * @offset: the page index 710 * 711 * Looks up the page cache slot at @mapping & @offset. If there is a 712 * page cache page, it is returned with an increased refcount. 713 * 714 * Otherwise, %NULL is returned. 715 */ 716 static inline struct page *find_get_page(struct address_space *mapping, 717 pgoff_t offset) 718 { 719 return pagecache_get_page(mapping, offset, 0, 0); 720 } 721 722 static inline struct page *find_get_page_flags(struct address_space *mapping, 723 pgoff_t offset, fgf_t fgp_flags) 724 { 725 return pagecache_get_page(mapping, offset, fgp_flags, 0); 726 } 727 728 /** 729 * find_lock_page - locate, pin and lock a pagecache page 730 * @mapping: the address_space to search 731 * @index: the page index 732 * 733 * Looks up the page cache entry at @mapping & @index. If there is a 734 * page cache page, it is returned locked and with an increased 735 * refcount. 736 * 737 * Context: May sleep. 738 * Return: A struct page or %NULL if there is no page in the cache for this 739 * index. 740 */ 741 static inline struct page *find_lock_page(struct address_space *mapping, 742 pgoff_t index) 743 { 744 return pagecache_get_page(mapping, index, FGP_LOCK, 0); 745 } 746 747 /** 748 * find_or_create_page - locate or add a pagecache page 749 * @mapping: the page's address_space 750 * @index: the page's index into the mapping 751 * @gfp_mask: page allocation mode 752 * 753 * Looks up the page cache slot at @mapping & @offset. If there is a 754 * page cache page, it is returned locked and with an increased 755 * refcount. 756 * 757 * If the page is not present, a new page is allocated using @gfp_mask 758 * and added to the page cache and the VM's LRU list. The page is 759 * returned locked and with an increased refcount. 760 * 761 * On memory exhaustion, %NULL is returned. 762 * 763 * find_or_create_page() may sleep, even if @gfp_flags specifies an 764 * atomic allocation! 765 */ 766 static inline struct page *find_or_create_page(struct address_space *mapping, 767 pgoff_t index, gfp_t gfp_mask) 768 { 769 return pagecache_get_page(mapping, index, 770 FGP_LOCK|FGP_ACCESSED|FGP_CREAT, 771 gfp_mask); 772 } 773 774 /** 775 * grab_cache_page_nowait - returns locked page at given index in given cache 776 * @mapping: target address_space 777 * @index: the page index 778 * 779 * Same as grab_cache_page(), but do not wait if the page is unavailable. 780 * This is intended for speculative data generators, where the data can 781 * be regenerated if the page couldn't be grabbed. This routine should 782 * be safe to call while holding the lock for another page. 783 * 784 * Clear __GFP_FS when allocating the page to avoid recursion into the fs 785 * and deadlock against the caller's locked page. 786 */ 787 static inline struct page *grab_cache_page_nowait(struct address_space *mapping, 788 pgoff_t index) 789 { 790 return pagecache_get_page(mapping, index, 791 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, 792 mapping_gfp_mask(mapping)); 793 } 794 795 #define swapcache_index(folio) __page_file_index(&(folio)->page) 796 797 /** 798 * folio_index - File index of a folio. 799 * @folio: The folio. 800 * 801 * For a folio which is either in the page cache or the swap cache, 802 * return its index within the address_space it belongs to. If you know 803 * the page is definitely in the page cache, you can look at the folio's 804 * index directly. 805 * 806 * Return: The index (offset in units of pages) of a folio in its file. 807 */ 808 static inline pgoff_t folio_index(struct folio *folio) 809 { 810 if (unlikely(folio_test_swapcache(folio))) 811 return swapcache_index(folio); 812 return folio->index; 813 } 814 815 /** 816 * folio_next_index - Get the index of the next folio. 817 * @folio: The current folio. 818 * 819 * Return: The index of the folio which follows this folio in the file. 820 */ 821 static inline pgoff_t folio_next_index(struct folio *folio) 822 { 823 return folio->index + folio_nr_pages(folio); 824 } 825 826 /** 827 * folio_file_page - The page for a particular index. 828 * @folio: The folio which contains this index. 829 * @index: The index we want to look up. 830 * 831 * Sometimes after looking up a folio in the page cache, we need to 832 * obtain the specific page for an index (eg a page fault). 833 * 834 * Return: The page containing the file data for this index. 835 */ 836 static inline struct page *folio_file_page(struct folio *folio, pgoff_t index) 837 { 838 return folio_page(folio, index & (folio_nr_pages(folio) - 1)); 839 } 840 841 /** 842 * folio_contains - Does this folio contain this index? 843 * @folio: The folio. 844 * @index: The page index within the file. 845 * 846 * Context: The caller should have the page locked in order to prevent 847 * (eg) shmem from moving the page between the page cache and swap cache 848 * and changing its index in the middle of the operation. 849 * Return: true or false. 850 */ 851 static inline bool folio_contains(struct folio *folio, pgoff_t index) 852 { 853 return index - folio_index(folio) < folio_nr_pages(folio); 854 } 855 856 /* 857 * Given the page we found in the page cache, return the page corresponding 858 * to this index in the file 859 */ 860 static inline struct page *find_subpage(struct page *head, pgoff_t index) 861 { 862 /* HugeTLBfs wants the head page regardless */ 863 if (PageHuge(head)) 864 return head; 865 866 return head + (index & (thp_nr_pages(head) - 1)); 867 } 868 869 unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start, 870 pgoff_t end, struct folio_batch *fbatch); 871 unsigned filemap_get_folios_contig(struct address_space *mapping, 872 pgoff_t *start, pgoff_t end, struct folio_batch *fbatch); 873 unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start, 874 pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch); 875 876 struct page *grab_cache_page_write_begin(struct address_space *mapping, 877 pgoff_t index); 878 879 /* 880 * Returns locked page at given index in given cache, creating it if needed. 881 */ 882 static inline struct page *grab_cache_page(struct address_space *mapping, 883 pgoff_t index) 884 { 885 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); 886 } 887 888 struct folio *read_cache_folio(struct address_space *, pgoff_t index, 889 filler_t *filler, struct file *file); 890 struct folio *mapping_read_folio_gfp(struct address_space *, pgoff_t index, 891 gfp_t flags); 892 struct page *read_cache_page(struct address_space *, pgoff_t index, 893 filler_t *filler, struct file *file); 894 extern struct page * read_cache_page_gfp(struct address_space *mapping, 895 pgoff_t index, gfp_t gfp_mask); 896 897 static inline struct page *read_mapping_page(struct address_space *mapping, 898 pgoff_t index, struct file *file) 899 { 900 return read_cache_page(mapping, index, NULL, file); 901 } 902 903 static inline struct folio *read_mapping_folio(struct address_space *mapping, 904 pgoff_t index, struct file *file) 905 { 906 return read_cache_folio(mapping, index, NULL, file); 907 } 908 909 /* 910 * Get the offset in PAGE_SIZE (even for hugetlb pages). 911 */ 912 static inline pgoff_t page_to_pgoff(struct page *page) 913 { 914 struct page *head; 915 916 if (likely(!PageTransTail(page))) 917 return page->index; 918 919 head = compound_head(page); 920 /* 921 * We don't initialize ->index for tail pages: calculate based on 922 * head page 923 */ 924 return head->index + page - head; 925 } 926 927 /* 928 * Return byte-offset into filesystem object for page. 929 */ 930 static inline loff_t page_offset(struct page *page) 931 { 932 return ((loff_t)page->index) << PAGE_SHIFT; 933 } 934 935 static inline loff_t page_file_offset(struct page *page) 936 { 937 return ((loff_t)page_index(page)) << PAGE_SHIFT; 938 } 939 940 /** 941 * folio_pos - Returns the byte position of this folio in its file. 942 * @folio: The folio. 943 */ 944 static inline loff_t folio_pos(struct folio *folio) 945 { 946 return page_offset(&folio->page); 947 } 948 949 /** 950 * folio_file_pos - Returns the byte position of this folio in its file. 951 * @folio: The folio. 952 * 953 * This differs from folio_pos() for folios which belong to a swap file. 954 * NFS is the only filesystem today which needs to use folio_file_pos(). 955 */ 956 static inline loff_t folio_file_pos(struct folio *folio) 957 { 958 return page_file_offset(&folio->page); 959 } 960 961 /* 962 * Get the offset in PAGE_SIZE (even for hugetlb folios). 963 */ 964 static inline pgoff_t folio_pgoff(struct folio *folio) 965 { 966 return folio->index; 967 } 968 969 static inline pgoff_t linear_page_index(struct vm_area_struct *vma, 970 unsigned long address) 971 { 972 pgoff_t pgoff; 973 pgoff = (address - vma->vm_start) >> PAGE_SHIFT; 974 pgoff += vma->vm_pgoff; 975 return pgoff; 976 } 977 978 struct wait_page_key { 979 struct folio *folio; 980 int bit_nr; 981 int page_match; 982 }; 983 984 struct wait_page_queue { 985 struct folio *folio; 986 int bit_nr; 987 wait_queue_entry_t wait; 988 }; 989 990 static inline bool wake_page_match(struct wait_page_queue *wait_page, 991 struct wait_page_key *key) 992 { 993 if (wait_page->folio != key->folio) 994 return false; 995 key->page_match = 1; 996 997 if (wait_page->bit_nr != key->bit_nr) 998 return false; 999 1000 return true; 1001 } 1002 1003 void __folio_lock(struct folio *folio); 1004 int __folio_lock_killable(struct folio *folio); 1005 vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf); 1006 void unlock_page(struct page *page); 1007 void folio_unlock(struct folio *folio); 1008 1009 /** 1010 * folio_trylock() - Attempt to lock a folio. 1011 * @folio: The folio to attempt to lock. 1012 * 1013 * Sometimes it is undesirable to wait for a folio to be unlocked (eg 1014 * when the locks are being taken in the wrong order, or if making 1015 * progress through a batch of folios is more important than processing 1016 * them in order). Usually folio_lock() is the correct function to call. 1017 * 1018 * Context: Any context. 1019 * Return: Whether the lock was successfully acquired. 1020 */ 1021 static inline bool folio_trylock(struct folio *folio) 1022 { 1023 return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0))); 1024 } 1025 1026 /* 1027 * Return true if the page was successfully locked 1028 */ 1029 static inline bool trylock_page(struct page *page) 1030 { 1031 return folio_trylock(page_folio(page)); 1032 } 1033 1034 /** 1035 * folio_lock() - Lock this folio. 1036 * @folio: The folio to lock. 1037 * 1038 * The folio lock protects against many things, probably more than it 1039 * should. It is primarily held while a folio is being brought uptodate, 1040 * either from its backing file or from swap. It is also held while a 1041 * folio is being truncated from its address_space, so holding the lock 1042 * is sufficient to keep folio->mapping stable. 1043 * 1044 * The folio lock is also held while write() is modifying the page to 1045 * provide POSIX atomicity guarantees (as long as the write does not 1046 * cross a page boundary). Other modifications to the data in the folio 1047 * do not hold the folio lock and can race with writes, eg DMA and stores 1048 * to mapped pages. 1049 * 1050 * Context: May sleep. If you need to acquire the locks of two or 1051 * more folios, they must be in order of ascending index, if they are 1052 * in the same address_space. If they are in different address_spaces, 1053 * acquire the lock of the folio which belongs to the address_space which 1054 * has the lowest address in memory first. 1055 */ 1056 static inline void folio_lock(struct folio *folio) 1057 { 1058 might_sleep(); 1059 if (!folio_trylock(folio)) 1060 __folio_lock(folio); 1061 } 1062 1063 /** 1064 * lock_page() - Lock the folio containing this page. 1065 * @page: The page to lock. 1066 * 1067 * See folio_lock() for a description of what the lock protects. 1068 * This is a legacy function and new code should probably use folio_lock() 1069 * instead. 1070 * 1071 * Context: May sleep. Pages in the same folio share a lock, so do not 1072 * attempt to lock two pages which share a folio. 1073 */ 1074 static inline void lock_page(struct page *page) 1075 { 1076 struct folio *folio; 1077 might_sleep(); 1078 1079 folio = page_folio(page); 1080 if (!folio_trylock(folio)) 1081 __folio_lock(folio); 1082 } 1083 1084 /** 1085 * folio_lock_killable() - Lock this folio, interruptible by a fatal signal. 1086 * @folio: The folio to lock. 1087 * 1088 * Attempts to lock the folio, like folio_lock(), except that the sleep 1089 * to acquire the lock is interruptible by a fatal signal. 1090 * 1091 * Context: May sleep; see folio_lock(). 1092 * Return: 0 if the lock was acquired; -EINTR if a fatal signal was received. 1093 */ 1094 static inline int folio_lock_killable(struct folio *folio) 1095 { 1096 might_sleep(); 1097 if (!folio_trylock(folio)) 1098 return __folio_lock_killable(folio); 1099 return 0; 1100 } 1101 1102 /* 1103 * folio_lock_or_retry - Lock the folio, unless this would block and the 1104 * caller indicated that it can handle a retry. 1105 * 1106 * Return value and mmap_lock implications depend on flags; see 1107 * __folio_lock_or_retry(). 1108 */ 1109 static inline vm_fault_t folio_lock_or_retry(struct folio *folio, 1110 struct vm_fault *vmf) 1111 { 1112 might_sleep(); 1113 if (!folio_trylock(folio)) 1114 return __folio_lock_or_retry(folio, vmf); 1115 return 0; 1116 } 1117 1118 /* 1119 * This is exported only for folio_wait_locked/folio_wait_writeback, etc., 1120 * and should not be used directly. 1121 */ 1122 void folio_wait_bit(struct folio *folio, int bit_nr); 1123 int folio_wait_bit_killable(struct folio *folio, int bit_nr); 1124 1125 /* 1126 * Wait for a folio to be unlocked. 1127 * 1128 * This must be called with the caller "holding" the folio, 1129 * ie with increased folio reference count so that the folio won't 1130 * go away during the wait. 1131 */ 1132 static inline void folio_wait_locked(struct folio *folio) 1133 { 1134 if (folio_test_locked(folio)) 1135 folio_wait_bit(folio, PG_locked); 1136 } 1137 1138 static inline int folio_wait_locked_killable(struct folio *folio) 1139 { 1140 if (!folio_test_locked(folio)) 1141 return 0; 1142 return folio_wait_bit_killable(folio, PG_locked); 1143 } 1144 1145 static inline void wait_on_page_locked(struct page *page) 1146 { 1147 folio_wait_locked(page_folio(page)); 1148 } 1149 1150 void folio_end_read(struct folio *folio, bool success); 1151 void wait_on_page_writeback(struct page *page); 1152 void folio_wait_writeback(struct folio *folio); 1153 int folio_wait_writeback_killable(struct folio *folio); 1154 void end_page_writeback(struct page *page); 1155 void folio_end_writeback(struct folio *folio); 1156 void wait_for_stable_page(struct page *page); 1157 void folio_wait_stable(struct folio *folio); 1158 void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn); 1159 void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb); 1160 void __folio_cancel_dirty(struct folio *folio); 1161 static inline void folio_cancel_dirty(struct folio *folio) 1162 { 1163 /* Avoid atomic ops, locking, etc. when not actually needed. */ 1164 if (folio_test_dirty(folio)) 1165 __folio_cancel_dirty(folio); 1166 } 1167 bool folio_clear_dirty_for_io(struct folio *folio); 1168 bool clear_page_dirty_for_io(struct page *page); 1169 void folio_invalidate(struct folio *folio, size_t offset, size_t length); 1170 bool noop_dirty_folio(struct address_space *mapping, struct folio *folio); 1171 1172 #ifdef CONFIG_MIGRATION 1173 int filemap_migrate_folio(struct address_space *mapping, struct folio *dst, 1174 struct folio *src, enum migrate_mode mode); 1175 #else 1176 #define filemap_migrate_folio NULL 1177 #endif 1178 void folio_end_private_2(struct folio *folio); 1179 void folio_wait_private_2(struct folio *folio); 1180 int folio_wait_private_2_killable(struct folio *folio); 1181 1182 /* 1183 * Add an arbitrary waiter to a page's wait queue 1184 */ 1185 void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter); 1186 1187 /* 1188 * Fault in userspace address range. 1189 */ 1190 size_t fault_in_writeable(char __user *uaddr, size_t size); 1191 size_t fault_in_subpage_writeable(char __user *uaddr, size_t size); 1192 size_t fault_in_safe_writeable(const char __user *uaddr, size_t size); 1193 size_t fault_in_readable(const char __user *uaddr, size_t size); 1194 1195 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 1196 pgoff_t index, gfp_t gfp); 1197 int filemap_add_folio(struct address_space *mapping, struct folio *folio, 1198 pgoff_t index, gfp_t gfp); 1199 void filemap_remove_folio(struct folio *folio); 1200 void __filemap_remove_folio(struct folio *folio, void *shadow); 1201 void replace_page_cache_folio(struct folio *old, struct folio *new); 1202 void delete_from_page_cache_batch(struct address_space *mapping, 1203 struct folio_batch *fbatch); 1204 bool filemap_release_folio(struct folio *folio, gfp_t gfp); 1205 loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end, 1206 int whence); 1207 1208 /* Must be non-static for BPF error injection */ 1209 int __filemap_add_folio(struct address_space *mapping, struct folio *folio, 1210 pgoff_t index, gfp_t gfp, void **shadowp); 1211 1212 bool filemap_range_has_writeback(struct address_space *mapping, 1213 loff_t start_byte, loff_t end_byte); 1214 1215 /** 1216 * filemap_range_needs_writeback - check if range potentially needs writeback 1217 * @mapping: address space within which to check 1218 * @start_byte: offset in bytes where the range starts 1219 * @end_byte: offset in bytes where the range ends (inclusive) 1220 * 1221 * Find at least one page in the range supplied, usually used to check if 1222 * direct writing in this range will trigger a writeback. Used by O_DIRECT 1223 * read/write with IOCB_NOWAIT, to see if the caller needs to do 1224 * filemap_write_and_wait_range() before proceeding. 1225 * 1226 * Return: %true if the caller should do filemap_write_and_wait_range() before 1227 * doing O_DIRECT to a page in this range, %false otherwise. 1228 */ 1229 static inline bool filemap_range_needs_writeback(struct address_space *mapping, 1230 loff_t start_byte, 1231 loff_t end_byte) 1232 { 1233 if (!mapping->nrpages) 1234 return false; 1235 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 1236 !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) 1237 return false; 1238 return filemap_range_has_writeback(mapping, start_byte, end_byte); 1239 } 1240 1241 /** 1242 * struct readahead_control - Describes a readahead request. 1243 * 1244 * A readahead request is for consecutive pages. Filesystems which 1245 * implement the ->readahead method should call readahead_page() or 1246 * readahead_page_batch() in a loop and attempt to start I/O against 1247 * each page in the request. 1248 * 1249 * Most of the fields in this struct are private and should be accessed 1250 * by the functions below. 1251 * 1252 * @file: The file, used primarily by network filesystems for authentication. 1253 * May be NULL if invoked internally by the filesystem. 1254 * @mapping: Readahead this filesystem object. 1255 * @ra: File readahead state. May be NULL. 1256 */ 1257 struct readahead_control { 1258 struct file *file; 1259 struct address_space *mapping; 1260 struct file_ra_state *ra; 1261 /* private: use the readahead_* accessors instead */ 1262 pgoff_t _index; 1263 unsigned int _nr_pages; 1264 unsigned int _batch_count; 1265 bool _workingset; 1266 unsigned long _pflags; 1267 }; 1268 1269 #define DEFINE_READAHEAD(ractl, f, r, m, i) \ 1270 struct readahead_control ractl = { \ 1271 .file = f, \ 1272 .mapping = m, \ 1273 .ra = r, \ 1274 ._index = i, \ 1275 } 1276 1277 #define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) 1278 1279 void page_cache_ra_unbounded(struct readahead_control *, 1280 unsigned long nr_to_read, unsigned long lookahead_count); 1281 void page_cache_sync_ra(struct readahead_control *, unsigned long req_count); 1282 void page_cache_async_ra(struct readahead_control *, struct folio *, 1283 unsigned long req_count); 1284 void readahead_expand(struct readahead_control *ractl, 1285 loff_t new_start, size_t new_len); 1286 1287 /** 1288 * page_cache_sync_readahead - generic file readahead 1289 * @mapping: address_space which holds the pagecache and I/O vectors 1290 * @ra: file_ra_state which holds the readahead state 1291 * @file: Used by the filesystem for authentication. 1292 * @index: Index of first page to be read. 1293 * @req_count: Total number of pages being read by the caller. 1294 * 1295 * page_cache_sync_readahead() should be called when a cache miss happened: 1296 * it will submit the read. The readahead logic may decide to piggyback more 1297 * pages onto the read request if access patterns suggest it will improve 1298 * performance. 1299 */ 1300 static inline 1301 void page_cache_sync_readahead(struct address_space *mapping, 1302 struct file_ra_state *ra, struct file *file, pgoff_t index, 1303 unsigned long req_count) 1304 { 1305 DEFINE_READAHEAD(ractl, file, ra, mapping, index); 1306 page_cache_sync_ra(&ractl, req_count); 1307 } 1308 1309 /** 1310 * page_cache_async_readahead - file readahead for marked pages 1311 * @mapping: address_space which holds the pagecache and I/O vectors 1312 * @ra: file_ra_state which holds the readahead state 1313 * @file: Used by the filesystem for authentication. 1314 * @folio: The folio at @index which triggered the readahead call. 1315 * @index: Index of first page to be read. 1316 * @req_count: Total number of pages being read by the caller. 1317 * 1318 * page_cache_async_readahead() should be called when a page is used which 1319 * is marked as PageReadahead; this is a marker to suggest that the application 1320 * has used up enough of the readahead window that we should start pulling in 1321 * more pages. 1322 */ 1323 static inline 1324 void page_cache_async_readahead(struct address_space *mapping, 1325 struct file_ra_state *ra, struct file *file, 1326 struct folio *folio, pgoff_t index, unsigned long req_count) 1327 { 1328 DEFINE_READAHEAD(ractl, file, ra, mapping, index); 1329 page_cache_async_ra(&ractl, folio, req_count); 1330 } 1331 1332 static inline struct folio *__readahead_folio(struct readahead_control *ractl) 1333 { 1334 struct folio *folio; 1335 1336 BUG_ON(ractl->_batch_count > ractl->_nr_pages); 1337 ractl->_nr_pages -= ractl->_batch_count; 1338 ractl->_index += ractl->_batch_count; 1339 1340 if (!ractl->_nr_pages) { 1341 ractl->_batch_count = 0; 1342 return NULL; 1343 } 1344 1345 folio = xa_load(&ractl->mapping->i_pages, ractl->_index); 1346 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 1347 ractl->_batch_count = folio_nr_pages(folio); 1348 1349 return folio; 1350 } 1351 1352 /** 1353 * readahead_page - Get the next page to read. 1354 * @ractl: The current readahead request. 1355 * 1356 * Context: The page is locked and has an elevated refcount. The caller 1357 * should decreases the refcount once the page has been submitted for I/O 1358 * and unlock the page once all I/O to that page has completed. 1359 * Return: A pointer to the next page, or %NULL if we are done. 1360 */ 1361 static inline struct page *readahead_page(struct readahead_control *ractl) 1362 { 1363 struct folio *folio = __readahead_folio(ractl); 1364 1365 return &folio->page; 1366 } 1367 1368 /** 1369 * readahead_folio - Get the next folio to read. 1370 * @ractl: The current readahead request. 1371 * 1372 * Context: The folio is locked. The caller should unlock the folio once 1373 * all I/O to that folio has completed. 1374 * Return: A pointer to the next folio, or %NULL if we are done. 1375 */ 1376 static inline struct folio *readahead_folio(struct readahead_control *ractl) 1377 { 1378 struct folio *folio = __readahead_folio(ractl); 1379 1380 if (folio) 1381 folio_put(folio); 1382 return folio; 1383 } 1384 1385 static inline unsigned int __readahead_batch(struct readahead_control *rac, 1386 struct page **array, unsigned int array_sz) 1387 { 1388 unsigned int i = 0; 1389 XA_STATE(xas, &rac->mapping->i_pages, 0); 1390 struct page *page; 1391 1392 BUG_ON(rac->_batch_count > rac->_nr_pages); 1393 rac->_nr_pages -= rac->_batch_count; 1394 rac->_index += rac->_batch_count; 1395 rac->_batch_count = 0; 1396 1397 xas_set(&xas, rac->_index); 1398 rcu_read_lock(); 1399 xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) { 1400 if (xas_retry(&xas, page)) 1401 continue; 1402 VM_BUG_ON_PAGE(!PageLocked(page), page); 1403 VM_BUG_ON_PAGE(PageTail(page), page); 1404 array[i++] = page; 1405 rac->_batch_count += thp_nr_pages(page); 1406 if (i == array_sz) 1407 break; 1408 } 1409 rcu_read_unlock(); 1410 1411 return i; 1412 } 1413 1414 /** 1415 * readahead_page_batch - Get a batch of pages to read. 1416 * @rac: The current readahead request. 1417 * @array: An array of pointers to struct page. 1418 * 1419 * Context: The pages are locked and have an elevated refcount. The caller 1420 * should decreases the refcount once the page has been submitted for I/O 1421 * and unlock the page once all I/O to that page has completed. 1422 * Return: The number of pages placed in the array. 0 indicates the request 1423 * is complete. 1424 */ 1425 #define readahead_page_batch(rac, array) \ 1426 __readahead_batch(rac, array, ARRAY_SIZE(array)) 1427 1428 /** 1429 * readahead_pos - The byte offset into the file of this readahead request. 1430 * @rac: The readahead request. 1431 */ 1432 static inline loff_t readahead_pos(struct readahead_control *rac) 1433 { 1434 return (loff_t)rac->_index * PAGE_SIZE; 1435 } 1436 1437 /** 1438 * readahead_length - The number of bytes in this readahead request. 1439 * @rac: The readahead request. 1440 */ 1441 static inline size_t readahead_length(struct readahead_control *rac) 1442 { 1443 return rac->_nr_pages * PAGE_SIZE; 1444 } 1445 1446 /** 1447 * readahead_index - The index of the first page in this readahead request. 1448 * @rac: The readahead request. 1449 */ 1450 static inline pgoff_t readahead_index(struct readahead_control *rac) 1451 { 1452 return rac->_index; 1453 } 1454 1455 /** 1456 * readahead_count - The number of pages in this readahead request. 1457 * @rac: The readahead request. 1458 */ 1459 static inline unsigned int readahead_count(struct readahead_control *rac) 1460 { 1461 return rac->_nr_pages; 1462 } 1463 1464 /** 1465 * readahead_batch_length - The number of bytes in the current batch. 1466 * @rac: The readahead request. 1467 */ 1468 static inline size_t readahead_batch_length(struct readahead_control *rac) 1469 { 1470 return rac->_batch_count * PAGE_SIZE; 1471 } 1472 1473 static inline unsigned long dir_pages(struct inode *inode) 1474 { 1475 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> 1476 PAGE_SHIFT; 1477 } 1478 1479 /** 1480 * folio_mkwrite_check_truncate - check if folio was truncated 1481 * @folio: the folio to check 1482 * @inode: the inode to check the folio against 1483 * 1484 * Return: the number of bytes in the folio up to EOF, 1485 * or -EFAULT if the folio was truncated. 1486 */ 1487 static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio, 1488 struct inode *inode) 1489 { 1490 loff_t size = i_size_read(inode); 1491 pgoff_t index = size >> PAGE_SHIFT; 1492 size_t offset = offset_in_folio(folio, size); 1493 1494 if (!folio->mapping) 1495 return -EFAULT; 1496 1497 /* folio is wholly inside EOF */ 1498 if (folio_next_index(folio) - 1 < index) 1499 return folio_size(folio); 1500 /* folio is wholly past EOF */ 1501 if (folio->index > index || !offset) 1502 return -EFAULT; 1503 /* folio is partially inside EOF */ 1504 return offset; 1505 } 1506 1507 /** 1508 * page_mkwrite_check_truncate - check if page was truncated 1509 * @page: the page to check 1510 * @inode: the inode to check the page against 1511 * 1512 * Returns the number of bytes in the page up to EOF, 1513 * or -EFAULT if the page was truncated. 1514 */ 1515 static inline int page_mkwrite_check_truncate(struct page *page, 1516 struct inode *inode) 1517 { 1518 loff_t size = i_size_read(inode); 1519 pgoff_t index = size >> PAGE_SHIFT; 1520 int offset = offset_in_page(size); 1521 1522 if (page->mapping != inode->i_mapping) 1523 return -EFAULT; 1524 1525 /* page is wholly inside EOF */ 1526 if (page->index < index) 1527 return PAGE_SIZE; 1528 /* page is wholly past EOF */ 1529 if (page->index > index || !offset) 1530 return -EFAULT; 1531 /* page is partially inside EOF */ 1532 return offset; 1533 } 1534 1535 /** 1536 * i_blocks_per_folio - How many blocks fit in this folio. 1537 * @inode: The inode which contains the blocks. 1538 * @folio: The folio. 1539 * 1540 * If the block size is larger than the size of this folio, return zero. 1541 * 1542 * Context: The caller should hold a refcount on the folio to prevent it 1543 * from being split. 1544 * Return: The number of filesystem blocks covered by this folio. 1545 */ 1546 static inline 1547 unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio) 1548 { 1549 return folio_size(folio) >> inode->i_blkbits; 1550 } 1551 1552 static inline 1553 unsigned int i_blocks_per_page(struct inode *inode, struct page *page) 1554 { 1555 return i_blocks_per_folio(inode, page_folio(page)); 1556 } 1557 #endif /* _LINUX_PAGEMAP_H */ 1558