1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * mm/readahead.c - address_space-level file readahead. 4 * 5 * Copyright (C) 2002, Linus Torvalds 6 * 7 * 09Apr2002 Andrew Morton 8 * Initial version. 9 */ 10 11 /** 12 * DOC: Readahead Overview 13 * 14 * Readahead is used to read content into the page cache before it is 15 * explicitly requested by the application. Readahead only ever 16 * attempts to read folios that are not yet in the page cache. If a 17 * folio is present but not up-to-date, readahead will not try to read 18 * it. In that case a simple ->read_folio() will be requested. 19 * 20 * Readahead is triggered when an application read request (whether a 21 * system call or a page fault) finds that the requested folio is not in 22 * the page cache, or that it is in the page cache and has the 23 * readahead flag set. This flag indicates that the folio was read 24 * as part of a previous readahead request and now that it has been 25 * accessed, it is time for the next readahead. 26 * 27 * Each readahead request is partly synchronous read, and partly async 28 * readahead. This is reflected in the struct file_ra_state which 29 * contains ->size being the total number of pages, and ->async_size 30 * which is the number of pages in the async section. The readahead 31 * flag will be set on the first folio in this async section to trigger 32 * a subsequent readahead. Once a series of sequential reads has been 33 * established, there should be no need for a synchronous component and 34 * all readahead request will be fully asynchronous. 35 * 36 * When either of the triggers causes a readahead, three numbers need 37 * to be determined: the start of the region to read, the size of the 38 * region, and the size of the async tail. 39 * 40 * The start of the region is simply the first page address at or after 41 * the accessed address, which is not currently populated in the page 42 * cache. This is found with a simple search in the page cache. 43 * 44 * The size of the async tail is determined by subtracting the size that 45 * was explicitly requested from the determined request size, unless 46 * this would be less than zero - then zero is used. NOTE THIS 47 * CALCULATION IS WRONG WHEN THE START OF THE REGION IS NOT THE ACCESSED 48 * PAGE. ALSO THIS CALCULATION IS NOT USED CONSISTENTLY. 49 * 50 * The size of the region is normally determined from the size of the 51 * previous readahead which loaded the preceding pages. This may be 52 * discovered from the struct file_ra_state for simple sequential reads, 53 * or from examining the state of the page cache when multiple 54 * sequential reads are interleaved. Specifically: where the readahead 55 * was triggered by the readahead flag, the size of the previous 56 * readahead is assumed to be the number of pages from the triggering 57 * page to the start of the new readahead. In these cases, the size of 58 * the previous readahead is scaled, often doubled, for the new 59 * readahead, though see get_next_ra_size() for details. 60 * 61 * If the size of the previous read cannot be determined, the number of 62 * preceding pages in the page cache is used to estimate the size of 63 * a previous read. This estimate could easily be misled by random 64 * reads being coincidentally adjacent, so it is ignored unless it is 65 * larger than the current request, and it is not scaled up, unless it 66 * is at the start of file. 67 * 68 * In general readahead is accelerated at the start of the file, as 69 * reads from there are often sequential. There are other minor 70 * adjustments to the readahead size in various special cases and these 71 * are best discovered by reading the code. 72 * 73 * The above calculation, based on the previous readahead size, 74 * determines the size of the readahead, to which any requested read 75 * size may be added. 76 * 77 * Readahead requests are sent to the filesystem using the ->readahead() 78 * address space operation, for which mpage_readahead() is a canonical 79 * implementation. ->readahead() should normally initiate reads on all 80 * folios, but may fail to read any or all folios without causing an I/O 81 * error. The page cache reading code will issue a ->read_folio() request 82 * for any folio which ->readahead() did not read, and only an error 83 * from this will be final. 84 * 85 * ->readahead() will generally call readahead_folio() repeatedly to get 86 * each folio from those prepared for readahead. It may fail to read a 87 * folio by: 88 * 89 * * not calling readahead_folio() sufficiently many times, effectively 90 * ignoring some folios, as might be appropriate if the path to 91 * storage is congested. 92 * 93 * * failing to actually submit a read request for a given folio, 94 * possibly due to insufficient resources, or 95 * 96 * * getting an error during subsequent processing of a request. 97 * 98 * In the last two cases, the folio should be unlocked by the filesystem 99 * to indicate that the read attempt has failed. In the first case the 100 * folio will be unlocked by the VFS. 101 * 102 * Those folios not in the final ``async_size`` of the request should be 103 * considered to be important and ->readahead() should not fail them due 104 * to congestion or temporary resource unavailability, but should wait 105 * for necessary resources (e.g. memory or indexing information) to 106 * become available. Folios in the final ``async_size`` may be 107 * considered less urgent and failure to read them is more acceptable. 108 * In this case it is best to use filemap_remove_folio() to remove the 109 * folios from the page cache as is automatically done for folios that 110 * were not fetched with readahead_folio(). This will allow a 111 * subsequent synchronous readahead request to try them again. If they 112 * are left in the page cache, then they will be read individually using 113 * ->read_folio() which may be less efficient. 114 */ 115 116 #include <linux/blkdev.h> 117 #include <linux/kernel.h> 118 #include <linux/dax.h> 119 #include <linux/gfp.h> 120 #include <linux/export.h> 121 #include <linux/backing-dev.h> 122 #include <linux/task_io_accounting_ops.h> 123 #include <linux/pagemap.h> 124 #include <linux/psi.h> 125 #include <linux/syscalls.h> 126 #include <linux/file.h> 127 #include <linux/mm_inline.h> 128 #include <linux/blk-cgroup.h> 129 #include <linux/fadvise.h> 130 #include <linux/sched/mm.h> 131 132 #define CREATE_TRACE_POINTS 133 #include <trace/events/readahead.h> 134 135 #include "internal.h" 136 137 /* 138 * Initialise a struct file's readahead state. Assumes that the caller has 139 * memset *ra to zero. 140 */ 141 void 142 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) 143 { 144 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; 145 ra->prev_pos = -1; 146 } 147 EXPORT_SYMBOL_GPL(file_ra_state_init); 148 149 static void read_pages(struct readahead_control *rac) 150 { 151 const struct address_space_operations *aops = rac->mapping->a_ops; 152 struct folio *folio; 153 struct blk_plug plug; 154 155 if (!readahead_count(rac)) 156 return; 157 158 if (unlikely(rac->_workingset)) 159 psi_memstall_enter(&rac->_pflags); 160 blk_start_plug(&plug); 161 162 if (aops->readahead) { 163 aops->readahead(rac); 164 /* Clean up the remaining folios. */ 165 while ((folio = readahead_folio(rac)) != NULL) { 166 folio_get(folio); 167 filemap_remove_folio(folio); 168 folio_unlock(folio); 169 folio_put(folio); 170 } 171 } else { 172 while ((folio = readahead_folio(rac)) != NULL) 173 aops->read_folio(rac->file, folio); 174 } 175 176 blk_finish_plug(&plug); 177 if (unlikely(rac->_workingset)) 178 psi_memstall_leave(&rac->_pflags); 179 rac->_workingset = false; 180 181 BUG_ON(readahead_count(rac)); 182 } 183 184 static struct folio *ractl_alloc_folio(struct readahead_control *ractl, 185 gfp_t gfp_mask, unsigned int order) 186 { 187 struct folio *folio; 188 189 folio = filemap_alloc_folio(gfp_mask, order, NULL); 190 if (folio && ractl->dropbehind) 191 __folio_set_dropbehind(folio); 192 193 return folio; 194 } 195 196 /** 197 * page_cache_ra_unbounded - Start unchecked readahead. 198 * @ractl: Readahead control. 199 * @nr_to_read: The number of pages to read. 200 * @lookahead_size: Where to start the next readahead. 201 * 202 * This function is for filesystems to call when they want to start 203 * readahead beyond a file's stated i_size. This is almost certainly 204 * not the function you want to call. Use page_cache_async_readahead() 205 * or page_cache_sync_readahead() instead. 206 * 207 * Context: File is referenced by caller, and ractl->mapping->invalidate_lock 208 * must be held by the caller at least in shared mode. Mutexes may be held by 209 * caller. May sleep, but will not reenter filesystem to reclaim memory. 210 */ 211 void page_cache_ra_unbounded(struct readahead_control *ractl, 212 unsigned long nr_to_read, unsigned long lookahead_size) 213 { 214 struct address_space *mapping = ractl->mapping; 215 unsigned long index = readahead_index(ractl); 216 gfp_t gfp_mask = readahead_gfp_mask(mapping); 217 unsigned long mark = ULONG_MAX, i = 0; 218 unsigned int min_nrpages = mapping_min_folio_nrpages(mapping); 219 220 /* 221 * Partway through the readahead operation, we will have added 222 * locked pages to the page cache, but will not yet have submitted 223 * them for I/O. Adding another page may need to allocate memory, 224 * which can trigger memory reclaim. Telling the VM we're in 225 * the middle of a filesystem operation will cause it to not 226 * touch file-backed pages, preventing a deadlock. Most (all?) 227 * filesystems already specify __GFP_NOFS in their mapping's 228 * gfp_mask, but let's be explicit here. 229 */ 230 unsigned int nofs = memalloc_nofs_save(); 231 232 lockdep_assert_held(&mapping->invalidate_lock); 233 234 trace_page_cache_ra_unbounded(mapping->host, index, nr_to_read, 235 lookahead_size); 236 index = mapping_align_index(mapping, index); 237 238 /* 239 * As iterator `i` is aligned to min_nrpages, round_up the 240 * difference between nr_to_read and lookahead_size to mark the 241 * index that only has lookahead or "async_region" to set the 242 * readahead flag. 243 */ 244 if (lookahead_size <= nr_to_read) { 245 unsigned long ra_folio_index; 246 247 ra_folio_index = round_up(readahead_index(ractl) + 248 nr_to_read - lookahead_size, 249 min_nrpages); 250 mark = ra_folio_index - index; 251 } 252 nr_to_read += readahead_index(ractl) - index; 253 ractl->_index = index; 254 255 /* 256 * Preallocate as many pages as we will need. 257 */ 258 while (i < nr_to_read) { 259 struct folio *folio = xa_load(&mapping->i_pages, index + i); 260 int ret; 261 262 if (folio && !xa_is_value(folio)) { 263 /* 264 * Page already present? Kick off the current batch 265 * of contiguous pages before continuing with the 266 * next batch. This page may be the one we would 267 * have intended to mark as Readahead, but we don't 268 * have a stable reference to this page, and it's 269 * not worth getting one just for that. 270 */ 271 read_pages(ractl); 272 ractl->_index += min_nrpages; 273 i = ractl->_index + ractl->_nr_pages - index; 274 continue; 275 } 276 277 folio = ractl_alloc_folio(ractl, gfp_mask, 278 mapping_min_folio_order(mapping)); 279 if (!folio) 280 break; 281 282 ret = filemap_add_folio(mapping, folio, index + i, gfp_mask); 283 if (ret < 0) { 284 folio_put(folio); 285 if (ret == -ENOMEM) 286 break; 287 read_pages(ractl); 288 ractl->_index += min_nrpages; 289 i = ractl->_index + ractl->_nr_pages - index; 290 continue; 291 } 292 if (i == mark) 293 folio_set_readahead(folio); 294 ractl->_workingset |= folio_test_workingset(folio); 295 ractl->_nr_pages += min_nrpages; 296 i += min_nrpages; 297 } 298 299 /* 300 * Now start the IO. We ignore I/O errors - if the folio is not 301 * uptodate then the caller will launch read_folio again, and 302 * will then handle the error. 303 */ 304 read_pages(ractl); 305 memalloc_nofs_restore(nofs); 306 } 307 EXPORT_SYMBOL_GPL(page_cache_ra_unbounded); 308 309 /* 310 * do_page_cache_ra() actually reads a chunk of disk. It allocates 311 * the pages first, then submits them for I/O. This avoids the very bad 312 * behaviour which would occur if page allocations are causing VM writeback. 313 * We really don't want to intermingle reads and writes like that. 314 */ 315 static void do_page_cache_ra(struct readahead_control *ractl, 316 unsigned long nr_to_read, unsigned long lookahead_size) 317 { 318 struct address_space *mapping = ractl->mapping; 319 unsigned long index = readahead_index(ractl); 320 loff_t isize = i_size_read(mapping->host); 321 pgoff_t end_index; /* The last page we want to read */ 322 323 if (isize == 0) 324 return; 325 326 end_index = (isize - 1) >> PAGE_SHIFT; 327 if (index > end_index) 328 return; 329 /* Don't read past the page containing the last byte of the file */ 330 if (nr_to_read > end_index - index) 331 nr_to_read = end_index - index + 1; 332 333 filemap_invalidate_lock_shared(mapping); 334 page_cache_ra_unbounded(ractl, nr_to_read, lookahead_size); 335 filemap_invalidate_unlock_shared(mapping); 336 } 337 338 /* 339 * Chunk the readahead into 2 megabyte units, so that we don't pin too much 340 * memory at once. 341 */ 342 void force_page_cache_ra(struct readahead_control *ractl, 343 unsigned long nr_to_read) 344 { 345 struct address_space *mapping = ractl->mapping; 346 struct file_ra_state *ra = ractl->ra; 347 struct backing_dev_info *bdi = inode_to_bdi(mapping->host); 348 unsigned long max_pages; 349 350 if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead)) 351 return; 352 353 /* 354 * If the request exceeds the readahead window, allow the read to 355 * be up to the optimal hardware IO size 356 */ 357 max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages); 358 nr_to_read = min_t(unsigned long, nr_to_read, max_pages); 359 while (nr_to_read) { 360 unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE; 361 362 if (this_chunk > nr_to_read) 363 this_chunk = nr_to_read; 364 do_page_cache_ra(ractl, this_chunk, 0); 365 366 nr_to_read -= this_chunk; 367 } 368 } 369 370 /* 371 * Set the initial window size, round to next power of 2 and square 372 * for small size, x 4 for medium, and x 2 for large 373 * for 128k (32 page) max ra 374 * 1-2 page = 16k, 3-4 page 32k, 5-8 page = 64k, > 8 page = 128k initial 375 */ 376 static unsigned long get_init_ra_size(unsigned long size, unsigned long max) 377 { 378 unsigned long newsize = roundup_pow_of_two(size); 379 380 if (newsize <= max / 32) 381 newsize = newsize * 4; 382 else if (newsize <= max / 4) 383 newsize = newsize * 2; 384 else 385 newsize = max; 386 387 return newsize; 388 } 389 390 /* 391 * Get the previous window size, ramp it up, and 392 * return it as the new window size. 393 */ 394 static unsigned long get_next_ra_size(struct file_ra_state *ra, 395 unsigned long max) 396 { 397 unsigned long cur = ra->size; 398 399 if (cur < max / 16) 400 return 4 * cur; 401 if (cur <= max / 2) 402 return 2 * cur; 403 return max; 404 } 405 406 /* 407 * On-demand readahead design. 408 * 409 * The fields in struct file_ra_state represent the most-recently-executed 410 * readahead attempt: 411 * 412 * |<----- async_size ---------| 413 * |------------------- size -------------------->| 414 * |==================#===========================| 415 * ^start ^page marked with PG_readahead 416 * 417 * To overlap application thinking time and disk I/O time, we do 418 * `readahead pipelining': Do not wait until the application consumed all 419 * readahead pages and stalled on the missing page at readahead_index; 420 * Instead, submit an asynchronous readahead I/O as soon as there are 421 * only async_size pages left in the readahead window. Normally async_size 422 * will be equal to size, for maximum pipelining. 423 * 424 * In interleaved sequential reads, concurrent streams on the same fd can 425 * be invalidating each other's readahead state. So we flag the new readahead 426 * page at (start+size-async_size) with PG_readahead, and use it as readahead 427 * indicator. The flag won't be set on already cached pages, to avoid the 428 * readahead-for-nothing fuss, saving pointless page cache lookups. 429 * 430 * prev_pos tracks the last visited byte in the _previous_ read request. 431 * It should be maintained by the caller, and will be used for detecting 432 * small random reads. Note that the readahead algorithm checks loosely 433 * for sequential patterns. Hence interleaved reads might be served as 434 * sequential ones. 435 * 436 * There is a special-case: if the first page which the application tries to 437 * read happens to be the first page of the file, it is assumed that a linear 438 * read is about to happen and the window is immediately set to the initial size 439 * based on I/O request size and the max_readahead. 440 * 441 * The code ramps up the readahead size aggressively at first, but slow down as 442 * it approaches max_readahead. 443 */ 444 445 static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index, 446 pgoff_t mark, unsigned int order, gfp_t gfp) 447 { 448 int err; 449 struct folio *folio = ractl_alloc_folio(ractl, gfp, order); 450 451 if (!folio) 452 return -ENOMEM; 453 mark = round_down(mark, 1UL << order); 454 if (index == mark) 455 folio_set_readahead(folio); 456 err = filemap_add_folio(ractl->mapping, folio, index, gfp); 457 if (err) { 458 folio_put(folio); 459 return err; 460 } 461 462 ractl->_nr_pages += 1UL << order; 463 ractl->_workingset |= folio_test_workingset(folio); 464 return 0; 465 } 466 467 void page_cache_ra_order(struct readahead_control *ractl, 468 struct file_ra_state *ra) 469 { 470 struct address_space *mapping = ractl->mapping; 471 pgoff_t start = readahead_index(ractl); 472 pgoff_t index = start; 473 unsigned int min_order = mapping_min_folio_order(mapping); 474 pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT; 475 pgoff_t mark = index + ra->size - ra->async_size; 476 unsigned int nofs; 477 int err = 0; 478 gfp_t gfp = readahead_gfp_mask(mapping); 479 unsigned int new_order = ra->order; 480 481 trace_page_cache_ra_order(mapping->host, start, ra); 482 if (!mapping_large_folio_support(mapping)) { 483 ra->order = 0; 484 goto fallback; 485 } 486 487 limit = min(limit, index + ra->size - 1); 488 489 new_order = min(mapping_max_folio_order(mapping), new_order); 490 new_order = min_t(unsigned int, new_order, ilog2(ra->size)); 491 new_order = max(new_order, min_order); 492 493 ra->order = new_order; 494 495 /* See comment in page_cache_ra_unbounded() */ 496 nofs = memalloc_nofs_save(); 497 filemap_invalidate_lock_shared(mapping); 498 /* 499 * If the new_order is greater than min_order and index is 500 * already aligned to new_order, then this will be noop as index 501 * aligned to new_order should also be aligned to min_order. 502 */ 503 ractl->_index = mapping_align_index(mapping, index); 504 index = readahead_index(ractl); 505 506 while (index <= limit) { 507 unsigned int order = new_order; 508 509 /* Align with smaller pages if needed */ 510 if (index & ((1UL << order) - 1)) 511 order = __ffs(index); 512 /* Don't allocate pages past EOF */ 513 while (order > min_order && index + (1UL << order) - 1 > limit) 514 order--; 515 err = ra_alloc_folio(ractl, index, mark, order, gfp); 516 if (err) 517 break; 518 index += 1UL << order; 519 } 520 521 read_pages(ractl); 522 filemap_invalidate_unlock_shared(mapping); 523 memalloc_nofs_restore(nofs); 524 525 /* 526 * If there were already pages in the page cache, then we may have 527 * left some gaps. Let the regular readahead code take care of this 528 * situation below. 529 */ 530 if (!err) 531 return; 532 fallback: 533 /* 534 * ->readahead() may have updated readahead window size so we have to 535 * check there's still something to read. 536 */ 537 if (ra->size > index - start) 538 do_page_cache_ra(ractl, ra->size - (index - start), 539 ra->async_size); 540 } 541 542 static unsigned long ractl_max_pages(struct readahead_control *ractl, 543 unsigned long req_size) 544 { 545 struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host); 546 unsigned long max_pages = ractl->ra->ra_pages; 547 548 /* 549 * If the request exceeds the readahead window, allow the read to 550 * be up to the optimal hardware IO size 551 */ 552 if (req_size > max_pages && bdi->io_pages > max_pages) 553 max_pages = min(req_size, bdi->io_pages); 554 return max_pages; 555 } 556 557 void page_cache_sync_ra(struct readahead_control *ractl, 558 unsigned long req_count) 559 { 560 pgoff_t index = readahead_index(ractl); 561 bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM); 562 struct file_ra_state *ra = ractl->ra; 563 unsigned long max_pages, contig_count; 564 pgoff_t prev_index, miss; 565 566 trace_page_cache_sync_ra(ractl->mapping->host, index, ra, req_count); 567 /* 568 * Even if readahead is disabled, issue this request as readahead 569 * as we'll need it to satisfy the requested range. The forced 570 * readahead will do the right thing and limit the read to just the 571 * requested range, which we'll set to 1 page for this case. 572 */ 573 if (!ra->ra_pages || blk_cgroup_congested()) { 574 if (!ractl->file) 575 return; 576 req_count = 1; 577 do_forced_ra = true; 578 } 579 580 /* be dumb */ 581 if (do_forced_ra) { 582 force_page_cache_ra(ractl, req_count); 583 return; 584 } 585 586 max_pages = ractl_max_pages(ractl, req_count); 587 prev_index = (unsigned long long)ra->prev_pos >> PAGE_SHIFT; 588 /* 589 * A start of file, oversized read, or sequential cache miss: 590 * trivial case: (index - prev_index) == 1 591 * unaligned reads: (index - prev_index) == 0 592 */ 593 if (!index || req_count > max_pages || index - prev_index <= 1UL) { 594 ra->start = index; 595 ra->size = get_init_ra_size(req_count, max_pages); 596 ra->async_size = ra->size > req_count ? ra->size - req_count : 597 ra->size >> 1; 598 goto readit; 599 } 600 601 /* 602 * Query the page cache and look for the traces(cached history pages) 603 * that a sequential stream would leave behind. 604 */ 605 rcu_read_lock(); 606 miss = page_cache_prev_miss(ractl->mapping, index - 1, max_pages); 607 rcu_read_unlock(); 608 contig_count = index - miss - 1; 609 /* 610 * Standalone, small random read. Read as is, and do not pollute the 611 * readahead state. 612 */ 613 if (contig_count <= req_count) { 614 do_page_cache_ra(ractl, req_count, 0); 615 return; 616 } 617 /* 618 * File cached from the beginning: 619 * it is a strong indication of long-run stream (or whole-file-read) 620 */ 621 if (miss == ULONG_MAX) 622 contig_count *= 2; 623 ra->start = index; 624 ra->size = min(contig_count + req_count, max_pages); 625 ra->async_size = 1; 626 readit: 627 ra->order = 0; 628 ractl->_index = ra->start; 629 page_cache_ra_order(ractl, ra); 630 } 631 EXPORT_SYMBOL_GPL(page_cache_sync_ra); 632 633 void page_cache_async_ra(struct readahead_control *ractl, 634 struct folio *folio, unsigned long req_count) 635 { 636 unsigned long max_pages; 637 struct file_ra_state *ra = ractl->ra; 638 pgoff_t index = readahead_index(ractl); 639 pgoff_t expected, start, end, aligned_end, align; 640 641 /* no readahead */ 642 if (!ra->ra_pages) 643 return; 644 645 /* 646 * Same bit is used for PG_readahead and PG_reclaim. 647 */ 648 if (folio_test_writeback(folio)) 649 return; 650 651 trace_page_cache_async_ra(ractl->mapping->host, index, ra, req_count); 652 folio_clear_readahead(folio); 653 654 if (blk_cgroup_congested()) 655 return; 656 657 max_pages = ractl_max_pages(ractl, req_count); 658 /* 659 * It's the expected callback index, assume sequential access. 660 * Ramp up sizes, and push forward the readahead window. 661 */ 662 expected = round_down(ra->start + ra->size - ra->async_size, 663 folio_nr_pages(folio)); 664 if (index == expected) { 665 ra->start += ra->size; 666 /* 667 * In the case of MADV_HUGEPAGE, the actual size might exceed 668 * the readahead window. 669 */ 670 ra->size = max(ra->size, get_next_ra_size(ra, max_pages)); 671 goto readit; 672 } 673 674 /* 675 * Hit a marked folio without valid readahead state. 676 * E.g. interleaved reads. 677 * Query the pagecache for async_size, which normally equals to 678 * readahead size. Ramp it up and use it as the new readahead size. 679 */ 680 rcu_read_lock(); 681 start = page_cache_next_miss(ractl->mapping, index + 1, max_pages); 682 rcu_read_unlock(); 683 684 if (!start || start - index > max_pages) 685 return; 686 687 ra->start = start; 688 ra->size = start - index; /* old async_size */ 689 ra->size += req_count; 690 ra->size = get_next_ra_size(ra, max_pages); 691 readit: 692 ra->order += 2; 693 align = 1UL << min(ra->order, ffs(max_pages) - 1); 694 end = ra->start + ra->size; 695 aligned_end = round_down(end, align); 696 if (aligned_end > ra->start) 697 ra->size -= end - aligned_end; 698 ra->async_size = ra->size; 699 ractl->_index = ra->start; 700 page_cache_ra_order(ractl, ra); 701 } 702 EXPORT_SYMBOL_GPL(page_cache_async_ra); 703 704 ssize_t ksys_readahead(int fd, loff_t offset, size_t count) 705 { 706 struct file *file; 707 const struct inode *inode; 708 709 CLASS(fd, f)(fd); 710 if (fd_empty(f)) 711 return -EBADF; 712 713 file = fd_file(f); 714 if (!(file->f_mode & FMODE_READ)) 715 return -EBADF; 716 717 /* 718 * The readahead() syscall is intended to run only on files 719 * that can execute readahead. If readahead is not possible 720 * on this file, then we must return -EINVAL. 721 */ 722 if (!file->f_mapping) 723 return -EINVAL; 724 if (!file->f_mapping->a_ops) 725 return -EINVAL; 726 727 inode = file_inode(file); 728 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) 729 return -EINVAL; 730 if (IS_ANON_FILE(inode)) 731 return -EINVAL; 732 733 return vfs_fadvise(fd_file(f), offset, count, POSIX_FADV_WILLNEED); 734 } 735 736 SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count) 737 { 738 return ksys_readahead(fd, offset, count); 739 } 740 741 #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_READAHEAD) 742 COMPAT_SYSCALL_DEFINE4(readahead, int, fd, compat_arg_u64_dual(offset), size_t, count) 743 { 744 return ksys_readahead(fd, compat_arg_u64_glue(offset), count); 745 } 746 #endif 747 748 /** 749 * readahead_expand - Expand a readahead request 750 * @ractl: The request to be expanded 751 * @new_start: The revised start 752 * @new_len: The revised size of the request 753 * 754 * Attempt to expand a readahead request outwards from the current size to the 755 * specified size by inserting locked pages before and after the current window 756 * to increase the size to the new window. This may involve the insertion of 757 * THPs, in which case the window may get expanded even beyond what was 758 * requested. 759 * 760 * The algorithm will stop if it encounters a conflicting page already in the 761 * pagecache and leave a smaller expansion than requested. 762 * 763 * The caller must check for this by examining the revised @ractl object for a 764 * different expansion than was requested. 765 */ 766 void readahead_expand(struct readahead_control *ractl, 767 loff_t new_start, size_t new_len) 768 { 769 struct address_space *mapping = ractl->mapping; 770 struct file_ra_state *ra = ractl->ra; 771 pgoff_t new_index, new_nr_pages; 772 gfp_t gfp_mask = readahead_gfp_mask(mapping); 773 unsigned long min_nrpages = mapping_min_folio_nrpages(mapping); 774 unsigned int min_order = mapping_min_folio_order(mapping); 775 776 new_index = new_start / PAGE_SIZE; 777 /* 778 * Readahead code should have aligned the ractl->_index to 779 * min_nrpages before calling readahead aops. 780 */ 781 VM_BUG_ON(!IS_ALIGNED(ractl->_index, min_nrpages)); 782 783 /* Expand the leading edge downwards */ 784 while (ractl->_index > new_index) { 785 unsigned long index = ractl->_index - 1; 786 struct folio *folio = xa_load(&mapping->i_pages, index); 787 788 if (folio && !xa_is_value(folio)) 789 return; /* Folio apparently present */ 790 791 folio = ractl_alloc_folio(ractl, gfp_mask, min_order); 792 if (!folio) 793 return; 794 795 index = mapping_align_index(mapping, index); 796 if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) { 797 folio_put(folio); 798 return; 799 } 800 if (unlikely(folio_test_workingset(folio)) && 801 !ractl->_workingset) { 802 ractl->_workingset = true; 803 psi_memstall_enter(&ractl->_pflags); 804 } 805 ractl->_nr_pages += min_nrpages; 806 ractl->_index = folio->index; 807 } 808 809 new_len += new_start - readahead_pos(ractl); 810 new_nr_pages = DIV_ROUND_UP(new_len, PAGE_SIZE); 811 812 /* Expand the trailing edge upwards */ 813 while (ractl->_nr_pages < new_nr_pages) { 814 unsigned long index = ractl->_index + ractl->_nr_pages; 815 struct folio *folio = xa_load(&mapping->i_pages, index); 816 817 if (folio && !xa_is_value(folio)) 818 return; /* Folio apparently present */ 819 820 folio = ractl_alloc_folio(ractl, gfp_mask, min_order); 821 if (!folio) 822 return; 823 824 index = mapping_align_index(mapping, index); 825 if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) { 826 folio_put(folio); 827 return; 828 } 829 if (unlikely(folio_test_workingset(folio)) && 830 !ractl->_workingset) { 831 ractl->_workingset = true; 832 psi_memstall_enter(&ractl->_pflags); 833 } 834 ractl->_nr_pages += min_nrpages; 835 if (ra) { 836 ra->size += min_nrpages; 837 ra->async_size += min_nrpages; 838 } 839 } 840 } 841 EXPORT_SYMBOL(readahead_expand); 842