1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_PAGEMAP_H 3 #define _LINUX_PAGEMAP_H 4 5 /* 6 * Copyright 1995 Linus Torvalds 7 */ 8 #include <linux/mm.h> 9 #include <linux/fs.h> 10 #include <linux/list.h> 11 #include <linux/highmem.h> 12 #include <linux/compiler.h> 13 #include <linux/uaccess.h> 14 #include <linux/gfp.h> 15 #include <linux/bitops.h> 16 #include <linux/hardirq.h> /* for in_interrupt() */ 17 #include <linux/hugetlb_inline.h> 18 19 struct folio_batch; 20 21 unsigned long invalidate_mapping_pages(struct address_space *mapping, 22 pgoff_t start, pgoff_t end); 23 24 static inline void invalidate_remote_inode(struct inode *inode) 25 { 26 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 27 S_ISLNK(inode->i_mode)) 28 invalidate_mapping_pages(inode->i_mapping, 0, -1); 29 } 30 int invalidate_inode_pages2(struct address_space *mapping); 31 int invalidate_inode_pages2_range(struct address_space *mapping, 32 pgoff_t start, pgoff_t end); 33 int kiocb_invalidate_pages(struct kiocb *iocb, size_t count); 34 void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count); 35 36 int write_inode_now(struct inode *, int sync); 37 int filemap_fdatawrite(struct address_space *); 38 int filemap_flush(struct address_space *); 39 int filemap_fdatawait_keep_errors(struct address_space *mapping); 40 int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend); 41 int filemap_fdatawait_range_keep_errors(struct address_space *mapping, 42 loff_t start_byte, loff_t end_byte); 43 int filemap_invalidate_inode(struct inode *inode, bool flush, 44 loff_t start, loff_t end); 45 46 static inline int filemap_fdatawait(struct address_space *mapping) 47 { 48 return filemap_fdatawait_range(mapping, 0, LLONG_MAX); 49 } 50 51 bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend); 52 int filemap_write_and_wait_range(struct address_space *mapping, 53 loff_t lstart, loff_t lend); 54 int __filemap_fdatawrite_range(struct address_space *mapping, 55 loff_t start, loff_t end, int sync_mode); 56 int filemap_fdatawrite_range(struct address_space *mapping, 57 loff_t start, loff_t end); 58 int filemap_check_errors(struct address_space *mapping); 59 void __filemap_set_wb_err(struct address_space *mapping, int err); 60 int filemap_fdatawrite_wbc(struct address_space *mapping, 61 struct writeback_control *wbc); 62 int kiocb_write_and_wait(struct kiocb *iocb, size_t count); 63 64 static inline int filemap_write_and_wait(struct address_space *mapping) 65 { 66 return filemap_write_and_wait_range(mapping, 0, LLONG_MAX); 67 } 68 69 /** 70 * filemap_set_wb_err - set a writeback error on an address_space 71 * @mapping: mapping in which to set writeback error 72 * @err: error to be set in mapping 73 * 74 * When writeback fails in some way, we must record that error so that 75 * userspace can be informed when fsync and the like are called. We endeavor 76 * to report errors on any file that was open at the time of the error. Some 77 * internal callers also need to know when writeback errors have occurred. 78 * 79 * When a writeback error occurs, most filesystems will want to call 80 * filemap_set_wb_err to record the error in the mapping so that it will be 81 * automatically reported whenever fsync is called on the file. 82 */ 83 static inline void filemap_set_wb_err(struct address_space *mapping, int err) 84 { 85 /* Fastpath for common case of no error */ 86 if (unlikely(err)) 87 __filemap_set_wb_err(mapping, err); 88 } 89 90 /** 91 * filemap_check_wb_err - has an error occurred since the mark was sampled? 92 * @mapping: mapping to check for writeback errors 93 * @since: previously-sampled errseq_t 94 * 95 * Grab the errseq_t value from the mapping, and see if it has changed "since" 96 * the given value was sampled. 97 * 98 * If it has then report the latest error set, otherwise return 0. 99 */ 100 static inline int filemap_check_wb_err(struct address_space *mapping, 101 errseq_t since) 102 { 103 return errseq_check(&mapping->wb_err, since); 104 } 105 106 /** 107 * filemap_sample_wb_err - sample the current errseq_t to test for later errors 108 * @mapping: mapping to be sampled 109 * 110 * Writeback errors are always reported relative to a particular sample point 111 * in the past. This function provides those sample points. 112 */ 113 static inline errseq_t filemap_sample_wb_err(struct address_space *mapping) 114 { 115 return errseq_sample(&mapping->wb_err); 116 } 117 118 /** 119 * file_sample_sb_err - sample the current errseq_t to test for later errors 120 * @file: file pointer to be sampled 121 * 122 * Grab the most current superblock-level errseq_t value for the given 123 * struct file. 124 */ 125 static inline errseq_t file_sample_sb_err(struct file *file) 126 { 127 return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err); 128 } 129 130 /* 131 * Flush file data before changing attributes. Caller must hold any locks 132 * required to prevent further writes to this file until we're done setting 133 * flags. 134 */ 135 static inline int inode_drain_writes(struct inode *inode) 136 { 137 inode_dio_wait(inode); 138 return filemap_write_and_wait(inode->i_mapping); 139 } 140 141 static inline bool mapping_empty(struct address_space *mapping) 142 { 143 return xa_empty(&mapping->i_pages); 144 } 145 146 /* 147 * mapping_shrinkable - test if page cache state allows inode reclaim 148 * @mapping: the page cache mapping 149 * 150 * This checks the mapping's cache state for the pupose of inode 151 * reclaim and LRU management. 152 * 153 * The caller is expected to hold the i_lock, but is not required to 154 * hold the i_pages lock, which usually protects cache state. That's 155 * because the i_lock and the list_lru lock that protect the inode and 156 * its LRU state don't nest inside the irq-safe i_pages lock. 157 * 158 * Cache deletions are performed under the i_lock, which ensures that 159 * when an inode goes empty, it will reliably get queued on the LRU. 160 * 161 * Cache additions do not acquire the i_lock and may race with this 162 * check, in which case we'll report the inode as shrinkable when it 163 * has cache pages. This is okay: the shrinker also checks the 164 * refcount and the referenced bit, which will be elevated or set in 165 * the process of adding new cache pages to an inode. 166 */ 167 static inline bool mapping_shrinkable(struct address_space *mapping) 168 { 169 void *head; 170 171 /* 172 * On highmem systems, there could be lowmem pressure from the 173 * inodes before there is highmem pressure from the page 174 * cache. Make inodes shrinkable regardless of cache state. 175 */ 176 if (IS_ENABLED(CONFIG_HIGHMEM)) 177 return true; 178 179 /* Cache completely empty? Shrink away. */ 180 head = rcu_access_pointer(mapping->i_pages.xa_head); 181 if (!head) 182 return true; 183 184 /* 185 * The xarray stores single offset-0 entries directly in the 186 * head pointer, which allows non-resident page cache entries 187 * to escape the shadow shrinker's list of xarray nodes. The 188 * inode shrinker needs to pick them up under memory pressure. 189 */ 190 if (!xa_is_node(head) && xa_is_value(head)) 191 return true; 192 193 return false; 194 } 195 196 /* 197 * Bits in mapping->flags. 198 */ 199 enum mapping_flags { 200 AS_EIO = 0, /* IO error on async write */ 201 AS_ENOSPC = 1, /* ENOSPC on async write */ 202 AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */ 203 AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */ 204 AS_EXITING = 4, /* final truncate in progress */ 205 /* writeback related tags are not used */ 206 AS_NO_WRITEBACK_TAGS = 5, 207 AS_LARGE_FOLIO_SUPPORT = 6, 208 AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */ 209 AS_STABLE_WRITES, /* must wait for writeback before modifying 210 folio contents */ 211 AS_UNMOVABLE, /* The mapping cannot be moved, ever */ 212 }; 213 214 /** 215 * mapping_set_error - record a writeback error in the address_space 216 * @mapping: the mapping in which an error should be set 217 * @error: the error to set in the mapping 218 * 219 * When writeback fails in some way, we must record that error so that 220 * userspace can be informed when fsync and the like are called. We endeavor 221 * to report errors on any file that was open at the time of the error. Some 222 * internal callers also need to know when writeback errors have occurred. 223 * 224 * When a writeback error occurs, most filesystems will want to call 225 * mapping_set_error to record the error in the mapping so that it can be 226 * reported when the application calls fsync(2). 227 */ 228 static inline void mapping_set_error(struct address_space *mapping, int error) 229 { 230 if (likely(!error)) 231 return; 232 233 /* Record in wb_err for checkers using errseq_t based tracking */ 234 __filemap_set_wb_err(mapping, error); 235 236 /* Record it in superblock */ 237 if (mapping->host) 238 errseq_set(&mapping->host->i_sb->s_wb_err, error); 239 240 /* Record it in flags for now, for legacy callers */ 241 if (error == -ENOSPC) 242 set_bit(AS_ENOSPC, &mapping->flags); 243 else 244 set_bit(AS_EIO, &mapping->flags); 245 } 246 247 static inline void mapping_set_unevictable(struct address_space *mapping) 248 { 249 set_bit(AS_UNEVICTABLE, &mapping->flags); 250 } 251 252 static inline void mapping_clear_unevictable(struct address_space *mapping) 253 { 254 clear_bit(AS_UNEVICTABLE, &mapping->flags); 255 } 256 257 static inline bool mapping_unevictable(struct address_space *mapping) 258 { 259 return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); 260 } 261 262 static inline void mapping_set_exiting(struct address_space *mapping) 263 { 264 set_bit(AS_EXITING, &mapping->flags); 265 } 266 267 static inline int mapping_exiting(struct address_space *mapping) 268 { 269 return test_bit(AS_EXITING, &mapping->flags); 270 } 271 272 static inline void mapping_set_no_writeback_tags(struct address_space *mapping) 273 { 274 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); 275 } 276 277 static inline int mapping_use_writeback_tags(struct address_space *mapping) 278 { 279 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); 280 } 281 282 static inline bool mapping_release_always(const struct address_space *mapping) 283 { 284 return test_bit(AS_RELEASE_ALWAYS, &mapping->flags); 285 } 286 287 static inline void mapping_set_release_always(struct address_space *mapping) 288 { 289 set_bit(AS_RELEASE_ALWAYS, &mapping->flags); 290 } 291 292 static inline void mapping_clear_release_always(struct address_space *mapping) 293 { 294 clear_bit(AS_RELEASE_ALWAYS, &mapping->flags); 295 } 296 297 static inline bool mapping_stable_writes(const struct address_space *mapping) 298 { 299 return test_bit(AS_STABLE_WRITES, &mapping->flags); 300 } 301 302 static inline void mapping_set_stable_writes(struct address_space *mapping) 303 { 304 set_bit(AS_STABLE_WRITES, &mapping->flags); 305 } 306 307 static inline void mapping_clear_stable_writes(struct address_space *mapping) 308 { 309 clear_bit(AS_STABLE_WRITES, &mapping->flags); 310 } 311 312 static inline void mapping_set_unmovable(struct address_space *mapping) 313 { 314 /* 315 * It's expected unmovable mappings are also unevictable. Compaction 316 * migrate scanner (isolate_migratepages_block()) relies on this to 317 * reduce page locking. 318 */ 319 set_bit(AS_UNEVICTABLE, &mapping->flags); 320 set_bit(AS_UNMOVABLE, &mapping->flags); 321 } 322 323 static inline bool mapping_unmovable(struct address_space *mapping) 324 { 325 return test_bit(AS_UNMOVABLE, &mapping->flags); 326 } 327 328 static inline gfp_t mapping_gfp_mask(struct address_space * mapping) 329 { 330 return mapping->gfp_mask; 331 } 332 333 /* Restricts the given gfp_mask to what the mapping allows. */ 334 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, 335 gfp_t gfp_mask) 336 { 337 return mapping_gfp_mask(mapping) & gfp_mask; 338 } 339 340 /* 341 * This is non-atomic. Only to be used before the mapping is activated. 342 * Probably needs a barrier... 343 */ 344 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) 345 { 346 m->gfp_mask = mask; 347 } 348 349 /* 350 * There are some parts of the kernel which assume that PMD entries 351 * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then, 352 * limit the maximum allocation order to PMD size. I'm not aware of any 353 * assumptions about maximum order if THP are disabled, but 8 seems like 354 * a good order (that's 1MB if you're using 4kB pages) 355 */ 356 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 357 #define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER 358 #else 359 #define MAX_PAGECACHE_ORDER 8 360 #endif 361 362 /** 363 * mapping_set_large_folios() - Indicate the file supports large folios. 364 * @mapping: The file. 365 * 366 * The filesystem should call this function in its inode constructor to 367 * indicate that the VFS can use large folios to cache the contents of 368 * the file. 369 * 370 * Context: This should not be called while the inode is active as it 371 * is non-atomic. 372 */ 373 static inline void mapping_set_large_folios(struct address_space *mapping) 374 { 375 __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); 376 } 377 378 /* 379 * Large folio support currently depends on THP. These dependencies are 380 * being worked on but are not yet fixed. 381 */ 382 static inline bool mapping_large_folio_support(struct address_space *mapping) 383 { 384 return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 385 test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); 386 } 387 388 /* Return the maximum folio size for this pagecache mapping, in bytes. */ 389 static inline size_t mapping_max_folio_size(struct address_space *mapping) 390 { 391 if (mapping_large_folio_support(mapping)) 392 return PAGE_SIZE << MAX_PAGECACHE_ORDER; 393 return PAGE_SIZE; 394 } 395 396 static inline int filemap_nr_thps(struct address_space *mapping) 397 { 398 #ifdef CONFIG_READ_ONLY_THP_FOR_FS 399 return atomic_read(&mapping->nr_thps); 400 #else 401 return 0; 402 #endif 403 } 404 405 static inline void filemap_nr_thps_inc(struct address_space *mapping) 406 { 407 #ifdef CONFIG_READ_ONLY_THP_FOR_FS 408 if (!mapping_large_folio_support(mapping)) 409 atomic_inc(&mapping->nr_thps); 410 #else 411 WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0); 412 #endif 413 } 414 415 static inline void filemap_nr_thps_dec(struct address_space *mapping) 416 { 417 #ifdef CONFIG_READ_ONLY_THP_FOR_FS 418 if (!mapping_large_folio_support(mapping)) 419 atomic_dec(&mapping->nr_thps); 420 #else 421 WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0); 422 #endif 423 } 424 425 struct address_space *page_mapping(struct page *); 426 struct address_space *folio_mapping(struct folio *); 427 struct address_space *swapcache_mapping(struct folio *); 428 429 /** 430 * folio_file_mapping - Find the mapping this folio belongs to. 431 * @folio: The folio. 432 * 433 * For folios which are in the page cache, return the mapping that this 434 * page belongs to. Folios in the swap cache return the mapping of the 435 * swap file or swap device where the data is stored. This is different 436 * from the mapping returned by folio_mapping(). The only reason to 437 * use it is if, like NFS, you return 0 from ->activate_swapfile. 438 * 439 * Do not call this for folios which aren't in the page cache or swap cache. 440 */ 441 static inline struct address_space *folio_file_mapping(struct folio *folio) 442 { 443 if (unlikely(folio_test_swapcache(folio))) 444 return swapcache_mapping(folio); 445 446 return folio->mapping; 447 } 448 449 /** 450 * folio_flush_mapping - Find the file mapping this folio belongs to. 451 * @folio: The folio. 452 * 453 * For folios which are in the page cache, return the mapping that this 454 * page belongs to. Anonymous folios return NULL, even if they're in 455 * the swap cache. Other kinds of folio also return NULL. 456 * 457 * This is ONLY used by architecture cache flushing code. If you aren't 458 * writing cache flushing code, you want either folio_mapping() or 459 * folio_file_mapping(). 460 */ 461 static inline struct address_space *folio_flush_mapping(struct folio *folio) 462 { 463 if (unlikely(folio_test_swapcache(folio))) 464 return NULL; 465 466 return folio_mapping(folio); 467 } 468 469 static inline struct address_space *page_file_mapping(struct page *page) 470 { 471 return folio_file_mapping(page_folio(page)); 472 } 473 474 /** 475 * folio_inode - Get the host inode for this folio. 476 * @folio: The folio. 477 * 478 * For folios which are in the page cache, return the inode that this folio 479 * belongs to. 480 * 481 * Do not call this for folios which aren't in the page cache. 482 */ 483 static inline struct inode *folio_inode(struct folio *folio) 484 { 485 return folio->mapping->host; 486 } 487 488 /** 489 * folio_attach_private - Attach private data to a folio. 490 * @folio: Folio to attach data to. 491 * @data: Data to attach to folio. 492 * 493 * Attaching private data to a folio increments the page's reference count. 494 * The data must be detached before the folio will be freed. 495 */ 496 static inline void folio_attach_private(struct folio *folio, void *data) 497 { 498 folio_get(folio); 499 folio->private = data; 500 folio_set_private(folio); 501 } 502 503 /** 504 * folio_change_private - Change private data on a folio. 505 * @folio: Folio to change the data on. 506 * @data: Data to set on the folio. 507 * 508 * Change the private data attached to a folio and return the old 509 * data. The page must previously have had data attached and the data 510 * must be detached before the folio will be freed. 511 * 512 * Return: Data that was previously attached to the folio. 513 */ 514 static inline void *folio_change_private(struct folio *folio, void *data) 515 { 516 void *old = folio_get_private(folio); 517 518 folio->private = data; 519 return old; 520 } 521 522 /** 523 * folio_detach_private - Detach private data from a folio. 524 * @folio: Folio to detach data from. 525 * 526 * Removes the data that was previously attached to the folio and decrements 527 * the refcount on the page. 528 * 529 * Return: Data that was attached to the folio. 530 */ 531 static inline void *folio_detach_private(struct folio *folio) 532 { 533 void *data = folio_get_private(folio); 534 535 if (!folio_test_private(folio)) 536 return NULL; 537 folio_clear_private(folio); 538 folio->private = NULL; 539 folio_put(folio); 540 541 return data; 542 } 543 544 static inline void attach_page_private(struct page *page, void *data) 545 { 546 folio_attach_private(page_folio(page), data); 547 } 548 549 static inline void *detach_page_private(struct page *page) 550 { 551 return folio_detach_private(page_folio(page)); 552 } 553 554 #ifdef CONFIG_NUMA 555 struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order); 556 #else 557 static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) 558 { 559 return folio_alloc_noprof(gfp, order); 560 } 561 #endif 562 563 #define filemap_alloc_folio(...) \ 564 alloc_hooks(filemap_alloc_folio_noprof(__VA_ARGS__)) 565 566 static inline struct page *__page_cache_alloc(gfp_t gfp) 567 { 568 return &filemap_alloc_folio(gfp, 0)->page; 569 } 570 571 static inline gfp_t readahead_gfp_mask(struct address_space *x) 572 { 573 return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN; 574 } 575 576 typedef int filler_t(struct file *, struct folio *); 577 578 pgoff_t page_cache_next_miss(struct address_space *mapping, 579 pgoff_t index, unsigned long max_scan); 580 pgoff_t page_cache_prev_miss(struct address_space *mapping, 581 pgoff_t index, unsigned long max_scan); 582 583 /** 584 * typedef fgf_t - Flags for getting folios from the page cache. 585 * 586 * Most users of the page cache will not need to use these flags; 587 * there are convenience functions such as filemap_get_folio() and 588 * filemap_lock_folio(). For users which need more control over exactly 589 * what is done with the folios, these flags to __filemap_get_folio() 590 * are available. 591 * 592 * * %FGP_ACCESSED - The folio will be marked accessed. 593 * * %FGP_LOCK - The folio is returned locked. 594 * * %FGP_CREAT - If no folio is present then a new folio is allocated, 595 * added to the page cache and the VM's LRU list. The folio is 596 * returned locked. 597 * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the 598 * folio is already in cache. If the folio was allocated, unlock it 599 * before returning so the caller can do the same dance. 600 * * %FGP_WRITE - The folio will be written to by the caller. 601 * * %FGP_NOFS - __GFP_FS will get cleared in gfp. 602 * * %FGP_NOWAIT - Don't block on the folio lock. 603 * * %FGP_STABLE - Wait for the folio to be stable (finished writeback) 604 * * %FGP_WRITEBEGIN - The flags to use in a filesystem write_begin() 605 * implementation. 606 */ 607 typedef unsigned int __bitwise fgf_t; 608 609 #define FGP_ACCESSED ((__force fgf_t)0x00000001) 610 #define FGP_LOCK ((__force fgf_t)0x00000002) 611 #define FGP_CREAT ((__force fgf_t)0x00000004) 612 #define FGP_WRITE ((__force fgf_t)0x00000008) 613 #define FGP_NOFS ((__force fgf_t)0x00000010) 614 #define FGP_NOWAIT ((__force fgf_t)0x00000020) 615 #define FGP_FOR_MMAP ((__force fgf_t)0x00000040) 616 #define FGP_STABLE ((__force fgf_t)0x00000080) 617 #define FGF_GET_ORDER(fgf) (((__force unsigned)fgf) >> 26) /* top 6 bits */ 618 619 #define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE) 620 621 /** 622 * fgf_set_order - Encode a length in the fgf_t flags. 623 * @size: The suggested size of the folio to create. 624 * 625 * The caller of __filemap_get_folio() can use this to suggest a preferred 626 * size for the folio that is created. If there is already a folio at 627 * the index, it will be returned, no matter what its size. If a folio 628 * is freshly created, it may be of a different size than requested 629 * due to alignment constraints, memory pressure, or the presence of 630 * other folios at nearby indices. 631 */ 632 static inline fgf_t fgf_set_order(size_t size) 633 { 634 unsigned int shift = ilog2(size); 635 636 if (shift <= PAGE_SHIFT) 637 return 0; 638 return (__force fgf_t)((shift - PAGE_SHIFT) << 26); 639 } 640 641 void *filemap_get_entry(struct address_space *mapping, pgoff_t index); 642 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, 643 fgf_t fgp_flags, gfp_t gfp); 644 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, 645 fgf_t fgp_flags, gfp_t gfp); 646 647 /** 648 * filemap_get_folio - Find and get a folio. 649 * @mapping: The address_space to search. 650 * @index: The page index. 651 * 652 * Looks up the page cache entry at @mapping & @index. If a folio is 653 * present, it is returned with an increased refcount. 654 * 655 * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for 656 * this index. Will not return a shadow, swap or DAX entry. 657 */ 658 static inline struct folio *filemap_get_folio(struct address_space *mapping, 659 pgoff_t index) 660 { 661 return __filemap_get_folio(mapping, index, 0, 0); 662 } 663 664 /** 665 * filemap_lock_folio - Find and lock a folio. 666 * @mapping: The address_space to search. 667 * @index: The page index. 668 * 669 * Looks up the page cache entry at @mapping & @index. If a folio is 670 * present, it is returned locked with an increased refcount. 671 * 672 * Context: May sleep. 673 * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for 674 * this index. Will not return a shadow, swap or DAX entry. 675 */ 676 static inline struct folio *filemap_lock_folio(struct address_space *mapping, 677 pgoff_t index) 678 { 679 return __filemap_get_folio(mapping, index, FGP_LOCK, 0); 680 } 681 682 /** 683 * filemap_grab_folio - grab a folio from the page cache 684 * @mapping: The address space to search 685 * @index: The page index 686 * 687 * Looks up the page cache entry at @mapping & @index. If no folio is found, 688 * a new folio is created. The folio is locked, marked as accessed, and 689 * returned. 690 * 691 * Return: A found or created folio. ERR_PTR(-ENOMEM) if no folio is found 692 * and failed to create a folio. 693 */ 694 static inline struct folio *filemap_grab_folio(struct address_space *mapping, 695 pgoff_t index) 696 { 697 return __filemap_get_folio(mapping, index, 698 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, 699 mapping_gfp_mask(mapping)); 700 } 701 702 /** 703 * find_get_page - find and get a page reference 704 * @mapping: the address_space to search 705 * @offset: the page index 706 * 707 * Looks up the page cache slot at @mapping & @offset. If there is a 708 * page cache page, it is returned with an increased refcount. 709 * 710 * Otherwise, %NULL is returned. 711 */ 712 static inline struct page *find_get_page(struct address_space *mapping, 713 pgoff_t offset) 714 { 715 return pagecache_get_page(mapping, offset, 0, 0); 716 } 717 718 static inline struct page *find_get_page_flags(struct address_space *mapping, 719 pgoff_t offset, fgf_t fgp_flags) 720 { 721 return pagecache_get_page(mapping, offset, fgp_flags, 0); 722 } 723 724 /** 725 * find_lock_page - locate, pin and lock a pagecache page 726 * @mapping: the address_space to search 727 * @index: the page index 728 * 729 * Looks up the page cache entry at @mapping & @index. If there is a 730 * page cache page, it is returned locked and with an increased 731 * refcount. 732 * 733 * Context: May sleep. 734 * Return: A struct page or %NULL if there is no page in the cache for this 735 * index. 736 */ 737 static inline struct page *find_lock_page(struct address_space *mapping, 738 pgoff_t index) 739 { 740 return pagecache_get_page(mapping, index, FGP_LOCK, 0); 741 } 742 743 /** 744 * find_or_create_page - locate or add a pagecache page 745 * @mapping: the page's address_space 746 * @index: the page's index into the mapping 747 * @gfp_mask: page allocation mode 748 * 749 * Looks up the page cache slot at @mapping & @offset. If there is a 750 * page cache page, it is returned locked and with an increased 751 * refcount. 752 * 753 * If the page is not present, a new page is allocated using @gfp_mask 754 * and added to the page cache and the VM's LRU list. The page is 755 * returned locked and with an increased refcount. 756 * 757 * On memory exhaustion, %NULL is returned. 758 * 759 * find_or_create_page() may sleep, even if @gfp_flags specifies an 760 * atomic allocation! 761 */ 762 static inline struct page *find_or_create_page(struct address_space *mapping, 763 pgoff_t index, gfp_t gfp_mask) 764 { 765 return pagecache_get_page(mapping, index, 766 FGP_LOCK|FGP_ACCESSED|FGP_CREAT, 767 gfp_mask); 768 } 769 770 /** 771 * grab_cache_page_nowait - returns locked page at given index in given cache 772 * @mapping: target address_space 773 * @index: the page index 774 * 775 * Same as grab_cache_page(), but do not wait if the page is unavailable. 776 * This is intended for speculative data generators, where the data can 777 * be regenerated if the page couldn't be grabbed. This routine should 778 * be safe to call while holding the lock for another page. 779 * 780 * Clear __GFP_FS when allocating the page to avoid recursion into the fs 781 * and deadlock against the caller's locked page. 782 */ 783 static inline struct page *grab_cache_page_nowait(struct address_space *mapping, 784 pgoff_t index) 785 { 786 return pagecache_get_page(mapping, index, 787 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, 788 mapping_gfp_mask(mapping)); 789 } 790 791 #define swapcache_index(folio) __page_file_index(&(folio)->page) 792 793 /** 794 * folio_index - File index of a folio. 795 * @folio: The folio. 796 * 797 * For a folio which is either in the page cache or the swap cache, 798 * return its index within the address_space it belongs to. If you know 799 * the page is definitely in the page cache, you can look at the folio's 800 * index directly. 801 * 802 * Return: The index (offset in units of pages) of a folio in its file. 803 */ 804 static inline pgoff_t folio_index(struct folio *folio) 805 { 806 if (unlikely(folio_test_swapcache(folio))) 807 return swapcache_index(folio); 808 return folio->index; 809 } 810 811 /** 812 * folio_next_index - Get the index of the next folio. 813 * @folio: The current folio. 814 * 815 * Return: The index of the folio which follows this folio in the file. 816 */ 817 static inline pgoff_t folio_next_index(struct folio *folio) 818 { 819 return folio->index + folio_nr_pages(folio); 820 } 821 822 /** 823 * folio_file_page - The page for a particular index. 824 * @folio: The folio which contains this index. 825 * @index: The index we want to look up. 826 * 827 * Sometimes after looking up a folio in the page cache, we need to 828 * obtain the specific page for an index (eg a page fault). 829 * 830 * Return: The page containing the file data for this index. 831 */ 832 static inline struct page *folio_file_page(struct folio *folio, pgoff_t index) 833 { 834 return folio_page(folio, index & (folio_nr_pages(folio) - 1)); 835 } 836 837 /** 838 * folio_contains - Does this folio contain this index? 839 * @folio: The folio. 840 * @index: The page index within the file. 841 * 842 * Context: The caller should have the page locked in order to prevent 843 * (eg) shmem from moving the page between the page cache and swap cache 844 * and changing its index in the middle of the operation. 845 * Return: true or false. 846 */ 847 static inline bool folio_contains(struct folio *folio, pgoff_t index) 848 { 849 return index - folio_index(folio) < folio_nr_pages(folio); 850 } 851 852 /* 853 * Given the page we found in the page cache, return the page corresponding 854 * to this index in the file 855 */ 856 static inline struct page *find_subpage(struct page *head, pgoff_t index) 857 { 858 /* HugeTLBfs wants the head page regardless */ 859 if (PageHuge(head)) 860 return head; 861 862 return head + (index & (thp_nr_pages(head) - 1)); 863 } 864 865 unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start, 866 pgoff_t end, struct folio_batch *fbatch); 867 unsigned filemap_get_folios_contig(struct address_space *mapping, 868 pgoff_t *start, pgoff_t end, struct folio_batch *fbatch); 869 unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start, 870 pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch); 871 872 struct page *grab_cache_page_write_begin(struct address_space *mapping, 873 pgoff_t index); 874 875 /* 876 * Returns locked page at given index in given cache, creating it if needed. 877 */ 878 static inline struct page *grab_cache_page(struct address_space *mapping, 879 pgoff_t index) 880 { 881 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); 882 } 883 884 struct folio *read_cache_folio(struct address_space *, pgoff_t index, 885 filler_t *filler, struct file *file); 886 struct folio *mapping_read_folio_gfp(struct address_space *, pgoff_t index, 887 gfp_t flags); 888 struct page *read_cache_page(struct address_space *, pgoff_t index, 889 filler_t *filler, struct file *file); 890 extern struct page * read_cache_page_gfp(struct address_space *mapping, 891 pgoff_t index, gfp_t gfp_mask); 892 893 static inline struct page *read_mapping_page(struct address_space *mapping, 894 pgoff_t index, struct file *file) 895 { 896 return read_cache_page(mapping, index, NULL, file); 897 } 898 899 static inline struct folio *read_mapping_folio(struct address_space *mapping, 900 pgoff_t index, struct file *file) 901 { 902 return read_cache_folio(mapping, index, NULL, file); 903 } 904 905 /* 906 * Get the offset in PAGE_SIZE (even for hugetlb pages). 907 */ 908 static inline pgoff_t page_to_pgoff(struct page *page) 909 { 910 struct page *head; 911 912 if (likely(!PageTransTail(page))) 913 return page->index; 914 915 head = compound_head(page); 916 /* 917 * We don't initialize ->index for tail pages: calculate based on 918 * head page 919 */ 920 return head->index + page - head; 921 } 922 923 /* 924 * Return byte-offset into filesystem object for page. 925 */ 926 static inline loff_t page_offset(struct page *page) 927 { 928 return ((loff_t)page->index) << PAGE_SHIFT; 929 } 930 931 static inline loff_t page_file_offset(struct page *page) 932 { 933 return ((loff_t)page_index(page)) << PAGE_SHIFT; 934 } 935 936 /** 937 * folio_pos - Returns the byte position of this folio in its file. 938 * @folio: The folio. 939 */ 940 static inline loff_t folio_pos(struct folio *folio) 941 { 942 return page_offset(&folio->page); 943 } 944 945 /** 946 * folio_file_pos - Returns the byte position of this folio in its file. 947 * @folio: The folio. 948 * 949 * This differs from folio_pos() for folios which belong to a swap file. 950 * NFS is the only filesystem today which needs to use folio_file_pos(). 951 */ 952 static inline loff_t folio_file_pos(struct folio *folio) 953 { 954 return page_file_offset(&folio->page); 955 } 956 957 /* 958 * Get the offset in PAGE_SIZE (even for hugetlb folios). 959 */ 960 static inline pgoff_t folio_pgoff(struct folio *folio) 961 { 962 return folio->index; 963 } 964 965 static inline pgoff_t linear_page_index(struct vm_area_struct *vma, 966 unsigned long address) 967 { 968 pgoff_t pgoff; 969 pgoff = (address - vma->vm_start) >> PAGE_SHIFT; 970 pgoff += vma->vm_pgoff; 971 return pgoff; 972 } 973 974 struct wait_page_key { 975 struct folio *folio; 976 int bit_nr; 977 int page_match; 978 }; 979 980 struct wait_page_queue { 981 struct folio *folio; 982 int bit_nr; 983 wait_queue_entry_t wait; 984 }; 985 986 static inline bool wake_page_match(struct wait_page_queue *wait_page, 987 struct wait_page_key *key) 988 { 989 if (wait_page->folio != key->folio) 990 return false; 991 key->page_match = 1; 992 993 if (wait_page->bit_nr != key->bit_nr) 994 return false; 995 996 return true; 997 } 998 999 void __folio_lock(struct folio *folio); 1000 int __folio_lock_killable(struct folio *folio); 1001 vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf); 1002 void unlock_page(struct page *page); 1003 void folio_unlock(struct folio *folio); 1004 1005 /** 1006 * folio_trylock() - Attempt to lock a folio. 1007 * @folio: The folio to attempt to lock. 1008 * 1009 * Sometimes it is undesirable to wait for a folio to be unlocked (eg 1010 * when the locks are being taken in the wrong order, or if making 1011 * progress through a batch of folios is more important than processing 1012 * them in order). Usually folio_lock() is the correct function to call. 1013 * 1014 * Context: Any context. 1015 * Return: Whether the lock was successfully acquired. 1016 */ 1017 static inline bool folio_trylock(struct folio *folio) 1018 { 1019 return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0))); 1020 } 1021 1022 /* 1023 * Return true if the page was successfully locked 1024 */ 1025 static inline bool trylock_page(struct page *page) 1026 { 1027 return folio_trylock(page_folio(page)); 1028 } 1029 1030 /** 1031 * folio_lock() - Lock this folio. 1032 * @folio: The folio to lock. 1033 * 1034 * The folio lock protects against many things, probably more than it 1035 * should. It is primarily held while a folio is being brought uptodate, 1036 * either from its backing file or from swap. It is also held while a 1037 * folio is being truncated from its address_space, so holding the lock 1038 * is sufficient to keep folio->mapping stable. 1039 * 1040 * The folio lock is also held while write() is modifying the page to 1041 * provide POSIX atomicity guarantees (as long as the write does not 1042 * cross a page boundary). Other modifications to the data in the folio 1043 * do not hold the folio lock and can race with writes, eg DMA and stores 1044 * to mapped pages. 1045 * 1046 * Context: May sleep. If you need to acquire the locks of two or 1047 * more folios, they must be in order of ascending index, if they are 1048 * in the same address_space. If they are in different address_spaces, 1049 * acquire the lock of the folio which belongs to the address_space which 1050 * has the lowest address in memory first. 1051 */ 1052 static inline void folio_lock(struct folio *folio) 1053 { 1054 might_sleep(); 1055 if (!folio_trylock(folio)) 1056 __folio_lock(folio); 1057 } 1058 1059 /** 1060 * lock_page() - Lock the folio containing this page. 1061 * @page: The page to lock. 1062 * 1063 * See folio_lock() for a description of what the lock protects. 1064 * This is a legacy function and new code should probably use folio_lock() 1065 * instead. 1066 * 1067 * Context: May sleep. Pages in the same folio share a lock, so do not 1068 * attempt to lock two pages which share a folio. 1069 */ 1070 static inline void lock_page(struct page *page) 1071 { 1072 struct folio *folio; 1073 might_sleep(); 1074 1075 folio = page_folio(page); 1076 if (!folio_trylock(folio)) 1077 __folio_lock(folio); 1078 } 1079 1080 /** 1081 * folio_lock_killable() - Lock this folio, interruptible by a fatal signal. 1082 * @folio: The folio to lock. 1083 * 1084 * Attempts to lock the folio, like folio_lock(), except that the sleep 1085 * to acquire the lock is interruptible by a fatal signal. 1086 * 1087 * Context: May sleep; see folio_lock(). 1088 * Return: 0 if the lock was acquired; -EINTR if a fatal signal was received. 1089 */ 1090 static inline int folio_lock_killable(struct folio *folio) 1091 { 1092 might_sleep(); 1093 if (!folio_trylock(folio)) 1094 return __folio_lock_killable(folio); 1095 return 0; 1096 } 1097 1098 /* 1099 * folio_lock_or_retry - Lock the folio, unless this would block and the 1100 * caller indicated that it can handle a retry. 1101 * 1102 * Return value and mmap_lock implications depend on flags; see 1103 * __folio_lock_or_retry(). 1104 */ 1105 static inline vm_fault_t folio_lock_or_retry(struct folio *folio, 1106 struct vm_fault *vmf) 1107 { 1108 might_sleep(); 1109 if (!folio_trylock(folio)) 1110 return __folio_lock_or_retry(folio, vmf); 1111 return 0; 1112 } 1113 1114 /* 1115 * This is exported only for folio_wait_locked/folio_wait_writeback, etc., 1116 * and should not be used directly. 1117 */ 1118 void folio_wait_bit(struct folio *folio, int bit_nr); 1119 int folio_wait_bit_killable(struct folio *folio, int bit_nr); 1120 1121 /* 1122 * Wait for a folio to be unlocked. 1123 * 1124 * This must be called with the caller "holding" the folio, 1125 * ie with increased folio reference count so that the folio won't 1126 * go away during the wait. 1127 */ 1128 static inline void folio_wait_locked(struct folio *folio) 1129 { 1130 if (folio_test_locked(folio)) 1131 folio_wait_bit(folio, PG_locked); 1132 } 1133 1134 static inline int folio_wait_locked_killable(struct folio *folio) 1135 { 1136 if (!folio_test_locked(folio)) 1137 return 0; 1138 return folio_wait_bit_killable(folio, PG_locked); 1139 } 1140 1141 static inline void wait_on_page_locked(struct page *page) 1142 { 1143 folio_wait_locked(page_folio(page)); 1144 } 1145 1146 void folio_end_read(struct folio *folio, bool success); 1147 void wait_on_page_writeback(struct page *page); 1148 void folio_wait_writeback(struct folio *folio); 1149 int folio_wait_writeback_killable(struct folio *folio); 1150 void end_page_writeback(struct page *page); 1151 void folio_end_writeback(struct folio *folio); 1152 void wait_for_stable_page(struct page *page); 1153 void folio_wait_stable(struct folio *folio); 1154 void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn); 1155 void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb); 1156 void __folio_cancel_dirty(struct folio *folio); 1157 static inline void folio_cancel_dirty(struct folio *folio) 1158 { 1159 /* Avoid atomic ops, locking, etc. when not actually needed. */ 1160 if (folio_test_dirty(folio)) 1161 __folio_cancel_dirty(folio); 1162 } 1163 bool folio_clear_dirty_for_io(struct folio *folio); 1164 bool clear_page_dirty_for_io(struct page *page); 1165 void folio_invalidate(struct folio *folio, size_t offset, size_t length); 1166 bool noop_dirty_folio(struct address_space *mapping, struct folio *folio); 1167 1168 #ifdef CONFIG_MIGRATION 1169 int filemap_migrate_folio(struct address_space *mapping, struct folio *dst, 1170 struct folio *src, enum migrate_mode mode); 1171 #else 1172 #define filemap_migrate_folio NULL 1173 #endif 1174 void folio_end_private_2(struct folio *folio); 1175 void folio_wait_private_2(struct folio *folio); 1176 int folio_wait_private_2_killable(struct folio *folio); 1177 1178 /* 1179 * Add an arbitrary waiter to a page's wait queue 1180 */ 1181 void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter); 1182 1183 /* 1184 * Fault in userspace address range. 1185 */ 1186 size_t fault_in_writeable(char __user *uaddr, size_t size); 1187 size_t fault_in_subpage_writeable(char __user *uaddr, size_t size); 1188 size_t fault_in_safe_writeable(const char __user *uaddr, size_t size); 1189 size_t fault_in_readable(const char __user *uaddr, size_t size); 1190 1191 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 1192 pgoff_t index, gfp_t gfp); 1193 int filemap_add_folio(struct address_space *mapping, struct folio *folio, 1194 pgoff_t index, gfp_t gfp); 1195 void filemap_remove_folio(struct folio *folio); 1196 void __filemap_remove_folio(struct folio *folio, void *shadow); 1197 void replace_page_cache_folio(struct folio *old, struct folio *new); 1198 void delete_from_page_cache_batch(struct address_space *mapping, 1199 struct folio_batch *fbatch); 1200 bool filemap_release_folio(struct folio *folio, gfp_t gfp); 1201 loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end, 1202 int whence); 1203 1204 /* Must be non-static for BPF error injection */ 1205 int __filemap_add_folio(struct address_space *mapping, struct folio *folio, 1206 pgoff_t index, gfp_t gfp, void **shadowp); 1207 1208 bool filemap_range_has_writeback(struct address_space *mapping, 1209 loff_t start_byte, loff_t end_byte); 1210 1211 /** 1212 * filemap_range_needs_writeback - check if range potentially needs writeback 1213 * @mapping: address space within which to check 1214 * @start_byte: offset in bytes where the range starts 1215 * @end_byte: offset in bytes where the range ends (inclusive) 1216 * 1217 * Find at least one page in the range supplied, usually used to check if 1218 * direct writing in this range will trigger a writeback. Used by O_DIRECT 1219 * read/write with IOCB_NOWAIT, to see if the caller needs to do 1220 * filemap_write_and_wait_range() before proceeding. 1221 * 1222 * Return: %true if the caller should do filemap_write_and_wait_range() before 1223 * doing O_DIRECT to a page in this range, %false otherwise. 1224 */ 1225 static inline bool filemap_range_needs_writeback(struct address_space *mapping, 1226 loff_t start_byte, 1227 loff_t end_byte) 1228 { 1229 if (!mapping->nrpages) 1230 return false; 1231 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 1232 !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) 1233 return false; 1234 return filemap_range_has_writeback(mapping, start_byte, end_byte); 1235 } 1236 1237 /** 1238 * struct readahead_control - Describes a readahead request. 1239 * 1240 * A readahead request is for consecutive pages. Filesystems which 1241 * implement the ->readahead method should call readahead_page() or 1242 * readahead_page_batch() in a loop and attempt to start I/O against 1243 * each page in the request. 1244 * 1245 * Most of the fields in this struct are private and should be accessed 1246 * by the functions below. 1247 * 1248 * @file: The file, used primarily by network filesystems for authentication. 1249 * May be NULL if invoked internally by the filesystem. 1250 * @mapping: Readahead this filesystem object. 1251 * @ra: File readahead state. May be NULL. 1252 */ 1253 struct readahead_control { 1254 struct file *file; 1255 struct address_space *mapping; 1256 struct file_ra_state *ra; 1257 /* private: use the readahead_* accessors instead */ 1258 pgoff_t _index; 1259 unsigned int _nr_pages; 1260 unsigned int _batch_count; 1261 bool _workingset; 1262 unsigned long _pflags; 1263 }; 1264 1265 #define DEFINE_READAHEAD(ractl, f, r, m, i) \ 1266 struct readahead_control ractl = { \ 1267 .file = f, \ 1268 .mapping = m, \ 1269 .ra = r, \ 1270 ._index = i, \ 1271 } 1272 1273 #define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) 1274 1275 void page_cache_ra_unbounded(struct readahead_control *, 1276 unsigned long nr_to_read, unsigned long lookahead_count); 1277 void page_cache_sync_ra(struct readahead_control *, unsigned long req_count); 1278 void page_cache_async_ra(struct readahead_control *, struct folio *, 1279 unsigned long req_count); 1280 void readahead_expand(struct readahead_control *ractl, 1281 loff_t new_start, size_t new_len); 1282 1283 /** 1284 * page_cache_sync_readahead - generic file readahead 1285 * @mapping: address_space which holds the pagecache and I/O vectors 1286 * @ra: file_ra_state which holds the readahead state 1287 * @file: Used by the filesystem for authentication. 1288 * @index: Index of first page to be read. 1289 * @req_count: Total number of pages being read by the caller. 1290 * 1291 * page_cache_sync_readahead() should be called when a cache miss happened: 1292 * it will submit the read. The readahead logic may decide to piggyback more 1293 * pages onto the read request if access patterns suggest it will improve 1294 * performance. 1295 */ 1296 static inline 1297 void page_cache_sync_readahead(struct address_space *mapping, 1298 struct file_ra_state *ra, struct file *file, pgoff_t index, 1299 unsigned long req_count) 1300 { 1301 DEFINE_READAHEAD(ractl, file, ra, mapping, index); 1302 page_cache_sync_ra(&ractl, req_count); 1303 } 1304 1305 /** 1306 * page_cache_async_readahead - file readahead for marked pages 1307 * @mapping: address_space which holds the pagecache and I/O vectors 1308 * @ra: file_ra_state which holds the readahead state 1309 * @file: Used by the filesystem for authentication. 1310 * @folio: The folio at @index which triggered the readahead call. 1311 * @index: Index of first page to be read. 1312 * @req_count: Total number of pages being read by the caller. 1313 * 1314 * page_cache_async_readahead() should be called when a page is used which 1315 * is marked as PageReadahead; this is a marker to suggest that the application 1316 * has used up enough of the readahead window that we should start pulling in 1317 * more pages. 1318 */ 1319 static inline 1320 void page_cache_async_readahead(struct address_space *mapping, 1321 struct file_ra_state *ra, struct file *file, 1322 struct folio *folio, pgoff_t index, unsigned long req_count) 1323 { 1324 DEFINE_READAHEAD(ractl, file, ra, mapping, index); 1325 page_cache_async_ra(&ractl, folio, req_count); 1326 } 1327 1328 static inline struct folio *__readahead_folio(struct readahead_control *ractl) 1329 { 1330 struct folio *folio; 1331 1332 BUG_ON(ractl->_batch_count > ractl->_nr_pages); 1333 ractl->_nr_pages -= ractl->_batch_count; 1334 ractl->_index += ractl->_batch_count; 1335 1336 if (!ractl->_nr_pages) { 1337 ractl->_batch_count = 0; 1338 return NULL; 1339 } 1340 1341 folio = xa_load(&ractl->mapping->i_pages, ractl->_index); 1342 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 1343 ractl->_batch_count = folio_nr_pages(folio); 1344 1345 return folio; 1346 } 1347 1348 /** 1349 * readahead_page - Get the next page to read. 1350 * @ractl: The current readahead request. 1351 * 1352 * Context: The page is locked and has an elevated refcount. The caller 1353 * should decreases the refcount once the page has been submitted for I/O 1354 * and unlock the page once all I/O to that page has completed. 1355 * Return: A pointer to the next page, or %NULL if we are done. 1356 */ 1357 static inline struct page *readahead_page(struct readahead_control *ractl) 1358 { 1359 struct folio *folio = __readahead_folio(ractl); 1360 1361 return &folio->page; 1362 } 1363 1364 /** 1365 * readahead_folio - Get the next folio to read. 1366 * @ractl: The current readahead request. 1367 * 1368 * Context: The folio is locked. The caller should unlock the folio once 1369 * all I/O to that folio has completed. 1370 * Return: A pointer to the next folio, or %NULL if we are done. 1371 */ 1372 static inline struct folio *readahead_folio(struct readahead_control *ractl) 1373 { 1374 struct folio *folio = __readahead_folio(ractl); 1375 1376 if (folio) 1377 folio_put(folio); 1378 return folio; 1379 } 1380 1381 static inline unsigned int __readahead_batch(struct readahead_control *rac, 1382 struct page **array, unsigned int array_sz) 1383 { 1384 unsigned int i = 0; 1385 XA_STATE(xas, &rac->mapping->i_pages, 0); 1386 struct page *page; 1387 1388 BUG_ON(rac->_batch_count > rac->_nr_pages); 1389 rac->_nr_pages -= rac->_batch_count; 1390 rac->_index += rac->_batch_count; 1391 rac->_batch_count = 0; 1392 1393 xas_set(&xas, rac->_index); 1394 rcu_read_lock(); 1395 xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) { 1396 if (xas_retry(&xas, page)) 1397 continue; 1398 VM_BUG_ON_PAGE(!PageLocked(page), page); 1399 VM_BUG_ON_PAGE(PageTail(page), page); 1400 array[i++] = page; 1401 rac->_batch_count += thp_nr_pages(page); 1402 if (i == array_sz) 1403 break; 1404 } 1405 rcu_read_unlock(); 1406 1407 return i; 1408 } 1409 1410 /** 1411 * readahead_page_batch - Get a batch of pages to read. 1412 * @rac: The current readahead request. 1413 * @array: An array of pointers to struct page. 1414 * 1415 * Context: The pages are locked and have an elevated refcount. The caller 1416 * should decreases the refcount once the page has been submitted for I/O 1417 * and unlock the page once all I/O to that page has completed. 1418 * Return: The number of pages placed in the array. 0 indicates the request 1419 * is complete. 1420 */ 1421 #define readahead_page_batch(rac, array) \ 1422 __readahead_batch(rac, array, ARRAY_SIZE(array)) 1423 1424 /** 1425 * readahead_pos - The byte offset into the file of this readahead request. 1426 * @rac: The readahead request. 1427 */ 1428 static inline loff_t readahead_pos(struct readahead_control *rac) 1429 { 1430 return (loff_t)rac->_index * PAGE_SIZE; 1431 } 1432 1433 /** 1434 * readahead_length - The number of bytes in this readahead request. 1435 * @rac: The readahead request. 1436 */ 1437 static inline size_t readahead_length(struct readahead_control *rac) 1438 { 1439 return rac->_nr_pages * PAGE_SIZE; 1440 } 1441 1442 /** 1443 * readahead_index - The index of the first page in this readahead request. 1444 * @rac: The readahead request. 1445 */ 1446 static inline pgoff_t readahead_index(struct readahead_control *rac) 1447 { 1448 return rac->_index; 1449 } 1450 1451 /** 1452 * readahead_count - The number of pages in this readahead request. 1453 * @rac: The readahead request. 1454 */ 1455 static inline unsigned int readahead_count(struct readahead_control *rac) 1456 { 1457 return rac->_nr_pages; 1458 } 1459 1460 /** 1461 * readahead_batch_length - The number of bytes in the current batch. 1462 * @rac: The readahead request. 1463 */ 1464 static inline size_t readahead_batch_length(struct readahead_control *rac) 1465 { 1466 return rac->_batch_count * PAGE_SIZE; 1467 } 1468 1469 static inline unsigned long dir_pages(struct inode *inode) 1470 { 1471 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> 1472 PAGE_SHIFT; 1473 } 1474 1475 /** 1476 * folio_mkwrite_check_truncate - check if folio was truncated 1477 * @folio: the folio to check 1478 * @inode: the inode to check the folio against 1479 * 1480 * Return: the number of bytes in the folio up to EOF, 1481 * or -EFAULT if the folio was truncated. 1482 */ 1483 static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio, 1484 struct inode *inode) 1485 { 1486 loff_t size = i_size_read(inode); 1487 pgoff_t index = size >> PAGE_SHIFT; 1488 size_t offset = offset_in_folio(folio, size); 1489 1490 if (!folio->mapping) 1491 return -EFAULT; 1492 1493 /* folio is wholly inside EOF */ 1494 if (folio_next_index(folio) - 1 < index) 1495 return folio_size(folio); 1496 /* folio is wholly past EOF */ 1497 if (folio->index > index || !offset) 1498 return -EFAULT; 1499 /* folio is partially inside EOF */ 1500 return offset; 1501 } 1502 1503 /** 1504 * page_mkwrite_check_truncate - check if page was truncated 1505 * @page: the page to check 1506 * @inode: the inode to check the page against 1507 * 1508 * Returns the number of bytes in the page up to EOF, 1509 * or -EFAULT if the page was truncated. 1510 */ 1511 static inline int page_mkwrite_check_truncate(struct page *page, 1512 struct inode *inode) 1513 { 1514 loff_t size = i_size_read(inode); 1515 pgoff_t index = size >> PAGE_SHIFT; 1516 int offset = offset_in_page(size); 1517 1518 if (page->mapping != inode->i_mapping) 1519 return -EFAULT; 1520 1521 /* page is wholly inside EOF */ 1522 if (page->index < index) 1523 return PAGE_SIZE; 1524 /* page is wholly past EOF */ 1525 if (page->index > index || !offset) 1526 return -EFAULT; 1527 /* page is partially inside EOF */ 1528 return offset; 1529 } 1530 1531 /** 1532 * i_blocks_per_folio - How many blocks fit in this folio. 1533 * @inode: The inode which contains the blocks. 1534 * @folio: The folio. 1535 * 1536 * If the block size is larger than the size of this folio, return zero. 1537 * 1538 * Context: The caller should hold a refcount on the folio to prevent it 1539 * from being split. 1540 * Return: The number of filesystem blocks covered by this folio. 1541 */ 1542 static inline 1543 unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio) 1544 { 1545 return folio_size(folio) >> inode->i_blkbits; 1546 } 1547 1548 static inline 1549 unsigned int i_blocks_per_page(struct inode *inode, struct page *page) 1550 { 1551 return i_blocks_per_folio(inode, page_folio(page)); 1552 } 1553 #endif /* _LINUX_PAGEMAP_H */ 1554