xref: /linux/mm/readahead.c (revision df2e3152f1cb798ed8ffa7e488c50261e6dc50e3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm/readahead.c - address_space-level file readahead.
4  *
5  * Copyright (C) 2002, Linus Torvalds
6  *
7  * 09Apr2002	Andrew Morton
8  *		Initial version.
9  */
10 
11 /**
12  * DOC: Readahead Overview
13  *
14  * Readahead is used to read content into the page cache before it is
15  * explicitly requested by the application.  Readahead only ever
16  * attempts to read folios that are not yet in the page cache.  If a
17  * folio is present but not up-to-date, readahead will not try to read
18  * it. In that case a simple ->read_folio() will be requested.
19  *
20  * Readahead is triggered when an application read request (whether a
21  * system call or a page fault) finds that the requested folio is not in
22  * the page cache, or that it is in the page cache and has the
23  * readahead flag set.  This flag indicates that the folio was read
24  * as part of a previous readahead request and now that it has been
25  * accessed, it is time for the next readahead.
26  *
27  * Each readahead request is partly synchronous read, and partly async
28  * readahead.  This is reflected in the struct file_ra_state which
29  * contains ->size being the total number of pages, and ->async_size
30  * which is the number of pages in the async section.  The readahead
31  * flag will be set on the first folio in this async section to trigger
32  * a subsequent readahead.  Once a series of sequential reads has been
33  * established, there should be no need for a synchronous component and
34  * all readahead request will be fully asynchronous.
35  *
36  * When either of the triggers causes a readahead, three numbers need
37  * to be determined: the start of the region to read, the size of the
38  * region, and the size of the async tail.
39  *
40  * The start of the region is simply the first page address at or after
41  * the accessed address, which is not currently populated in the page
42  * cache.  This is found with a simple search in the page cache.
43  *
44  * The size of the async tail is determined by subtracting the size that
45  * was explicitly requested from the determined request size, unless
46  * this would be less than zero - then zero is used.  NOTE THIS
47  * CALCULATION IS WRONG WHEN THE START OF THE REGION IS NOT THE ACCESSED
48  * PAGE.  ALSO THIS CALCULATION IS NOT USED CONSISTENTLY.
49  *
50  * The size of the region is normally determined from the size of the
51  * previous readahead which loaded the preceding pages.  This may be
52  * discovered from the struct file_ra_state for simple sequential reads,
53  * or from examining the state of the page cache when multiple
54  * sequential reads are interleaved.  Specifically: where the readahead
55  * was triggered by the readahead flag, the size of the previous
56  * readahead is assumed to be the number of pages from the triggering
57  * page to the start of the new readahead.  In these cases, the size of
58  * the previous readahead is scaled, often doubled, for the new
59  * readahead, though see get_next_ra_size() for details.
60  *
61  * If the size of the previous read cannot be determined, the number of
62  * preceding pages in the page cache is used to estimate the size of
63  * a previous read.  This estimate could easily be misled by random
64  * reads being coincidentally adjacent, so it is ignored unless it is
65  * larger than the current request, and it is not scaled up, unless it
66  * is at the start of file.
67  *
68  * In general readahead is accelerated at the start of the file, as
69  * reads from there are often sequential.  There are other minor
70  * adjustments to the readahead size in various special cases and these
71  * are best discovered by reading the code.
72  *
73  * The above calculation, based on the previous readahead size,
74  * determines the size of the readahead, to which any requested read
75  * size may be added.
76  *
77  * Readahead requests are sent to the filesystem using the ->readahead()
78  * address space operation, for which mpage_readahead() is a canonical
79  * implementation.  ->readahead() should normally initiate reads on all
80  * folios, but may fail to read any or all folios without causing an I/O
81  * error.  The page cache reading code will issue a ->read_folio() request
82  * for any folio which ->readahead() did not read, and only an error
83  * from this will be final.
84  *
85  * ->readahead() will generally call readahead_folio() repeatedly to get
86  * each folio from those prepared for readahead.  It may fail to read a
87  * folio by:
88  *
89  * * not calling readahead_folio() sufficiently many times, effectively
90  *   ignoring some folios, as might be appropriate if the path to
91  *   storage is congested.
92  *
93  * * failing to actually submit a read request for a given folio,
94  *   possibly due to insufficient resources, or
95  *
96  * * getting an error during subsequent processing of a request.
97  *
98  * In the last two cases, the folio should be unlocked by the filesystem
99  * to indicate that the read attempt has failed.  In the first case the
100  * folio will be unlocked by the VFS.
101  *
102  * Those folios not in the final ``async_size`` of the request should be
103  * considered to be important and ->readahead() should not fail them due
104  * to congestion or temporary resource unavailability, but should wait
105  * for necessary resources (e.g.  memory or indexing information) to
106  * become available.  Folios in the final ``async_size`` may be
107  * considered less urgent and failure to read them is more acceptable.
108  * In this case it is best to use filemap_remove_folio() to remove the
109  * folios from the page cache as is automatically done for folios that
110  * were not fetched with readahead_folio().  This will allow a
111  * subsequent synchronous readahead request to try them again.  If they
112  * are left in the page cache, then they will be read individually using
113  * ->read_folio() which may be less efficient.
114  */
115 
116 #include <linux/blkdev.h>
117 #include <linux/kernel.h>
118 #include <linux/dax.h>
119 #include <linux/gfp.h>
120 #include <linux/export.h>
121 #include <linux/backing-dev.h>
122 #include <linux/task_io_accounting_ops.h>
123 #include <linux/pagemap.h>
124 #include <linux/psi.h>
125 #include <linux/syscalls.h>
126 #include <linux/file.h>
127 #include <linux/mm_inline.h>
128 #include <linux/blk-cgroup.h>
129 #include <linux/fadvise.h>
130 #include <linux/sched/mm.h>
131 #include <linux/fsnotify.h>
132 
133 #include "internal.h"
134 
135 /*
136  * Initialise a struct file's readahead state.  Assumes that the caller has
137  * memset *ra to zero.
138  */
139 void
140 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
141 {
142 	ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages;
143 	ra->prev_pos = -1;
144 }
145 EXPORT_SYMBOL_GPL(file_ra_state_init);
146 
147 static void read_pages(struct readahead_control *rac)
148 {
149 	const struct address_space_operations *aops = rac->mapping->a_ops;
150 	struct folio *folio;
151 	struct blk_plug plug;
152 
153 	if (!readahead_count(rac))
154 		return;
155 
156 	if (unlikely(rac->_workingset))
157 		psi_memstall_enter(&rac->_pflags);
158 	blk_start_plug(&plug);
159 
160 	if (aops->readahead) {
161 		aops->readahead(rac);
162 		/* Clean up the remaining folios. */
163 		while ((folio = readahead_folio(rac)) != NULL) {
164 			folio_get(folio);
165 			filemap_remove_folio(folio);
166 			folio_unlock(folio);
167 			folio_put(folio);
168 		}
169 	} else {
170 		while ((folio = readahead_folio(rac)) != NULL)
171 			aops->read_folio(rac->file, folio);
172 	}
173 
174 	blk_finish_plug(&plug);
175 	if (unlikely(rac->_workingset))
176 		psi_memstall_leave(&rac->_pflags);
177 	rac->_workingset = false;
178 
179 	BUG_ON(readahead_count(rac));
180 }
181 
182 static struct folio *ractl_alloc_folio(struct readahead_control *ractl,
183 				       gfp_t gfp_mask, unsigned int order)
184 {
185 	struct folio *folio;
186 
187 	folio = filemap_alloc_folio(gfp_mask, order);
188 	if (folio && ractl->dropbehind)
189 		__folio_set_dropbehind(folio);
190 
191 	return folio;
192 }
193 
194 /**
195  * page_cache_ra_unbounded - Start unchecked readahead.
196  * @ractl: Readahead control.
197  * @nr_to_read: The number of pages to read.
198  * @lookahead_size: Where to start the next readahead.
199  *
200  * This function is for filesystems to call when they want to start
201  * readahead beyond a file's stated i_size.  This is almost certainly
202  * not the function you want to call.  Use page_cache_async_readahead()
203  * or page_cache_sync_readahead() instead.
204  *
205  * Context: File is referenced by caller.  Mutexes may be held by caller.
206  * May sleep, but will not reenter filesystem to reclaim memory.
207  */
208 void page_cache_ra_unbounded(struct readahead_control *ractl,
209 		unsigned long nr_to_read, unsigned long lookahead_size)
210 {
211 	struct address_space *mapping = ractl->mapping;
212 	unsigned long index = readahead_index(ractl);
213 	gfp_t gfp_mask = readahead_gfp_mask(mapping);
214 	unsigned long mark = ULONG_MAX, i = 0;
215 	unsigned int min_nrpages = mapping_min_folio_nrpages(mapping);
216 
217 	/*
218 	 * Partway through the readahead operation, we will have added
219 	 * locked pages to the page cache, but will not yet have submitted
220 	 * them for I/O.  Adding another page may need to allocate memory,
221 	 * which can trigger memory reclaim.  Telling the VM we're in
222 	 * the middle of a filesystem operation will cause it to not
223 	 * touch file-backed pages, preventing a deadlock.  Most (all?)
224 	 * filesystems already specify __GFP_NOFS in their mapping's
225 	 * gfp_mask, but let's be explicit here.
226 	 */
227 	unsigned int nofs = memalloc_nofs_save();
228 
229 	filemap_invalidate_lock_shared(mapping);
230 	index = mapping_align_index(mapping, index);
231 
232 	/*
233 	 * As iterator `i` is aligned to min_nrpages, round_up the
234 	 * difference between nr_to_read and lookahead_size to mark the
235 	 * index that only has lookahead or "async_region" to set the
236 	 * readahead flag.
237 	 */
238 	if (lookahead_size <= nr_to_read) {
239 		unsigned long ra_folio_index;
240 
241 		ra_folio_index = round_up(readahead_index(ractl) +
242 					  nr_to_read - lookahead_size,
243 					  min_nrpages);
244 		mark = ra_folio_index - index;
245 	}
246 	nr_to_read += readahead_index(ractl) - index;
247 	ractl->_index = index;
248 
249 	/*
250 	 * Preallocate as many pages as we will need.
251 	 */
252 	while (i < nr_to_read) {
253 		struct folio *folio = xa_load(&mapping->i_pages, index + i);
254 		int ret;
255 
256 		if (folio && !xa_is_value(folio)) {
257 			/*
258 			 * Page already present?  Kick off the current batch
259 			 * of contiguous pages before continuing with the
260 			 * next batch.  This page may be the one we would
261 			 * have intended to mark as Readahead, but we don't
262 			 * have a stable reference to this page, and it's
263 			 * not worth getting one just for that.
264 			 */
265 			read_pages(ractl);
266 			ractl->_index += min_nrpages;
267 			i = ractl->_index + ractl->_nr_pages - index;
268 			continue;
269 		}
270 
271 		folio = ractl_alloc_folio(ractl, gfp_mask,
272 					mapping_min_folio_order(mapping));
273 		if (!folio)
274 			break;
275 
276 		ret = filemap_add_folio(mapping, folio, index + i, gfp_mask);
277 		if (ret < 0) {
278 			folio_put(folio);
279 			if (ret == -ENOMEM)
280 				break;
281 			read_pages(ractl);
282 			ractl->_index += min_nrpages;
283 			i = ractl->_index + ractl->_nr_pages - index;
284 			continue;
285 		}
286 		if (i == mark)
287 			folio_set_readahead(folio);
288 		ractl->_workingset |= folio_test_workingset(folio);
289 		ractl->_nr_pages += min_nrpages;
290 		i += min_nrpages;
291 	}
292 
293 	/*
294 	 * Now start the IO.  We ignore I/O errors - if the folio is not
295 	 * uptodate then the caller will launch read_folio again, and
296 	 * will then handle the error.
297 	 */
298 	read_pages(ractl);
299 	filemap_invalidate_unlock_shared(mapping);
300 	memalloc_nofs_restore(nofs);
301 }
302 EXPORT_SYMBOL_GPL(page_cache_ra_unbounded);
303 
304 /*
305  * do_page_cache_ra() actually reads a chunk of disk.  It allocates
306  * the pages first, then submits them for I/O. This avoids the very bad
307  * behaviour which would occur if page allocations are causing VM writeback.
308  * We really don't want to intermingle reads and writes like that.
309  */
310 static void do_page_cache_ra(struct readahead_control *ractl,
311 		unsigned long nr_to_read, unsigned long lookahead_size)
312 {
313 	struct inode *inode = ractl->mapping->host;
314 	unsigned long index = readahead_index(ractl);
315 	loff_t isize = i_size_read(inode);
316 	pgoff_t end_index;	/* The last page we want to read */
317 
318 	if (isize == 0)
319 		return;
320 
321 	end_index = (isize - 1) >> PAGE_SHIFT;
322 	if (index > end_index)
323 		return;
324 	/* Don't read past the page containing the last byte of the file */
325 	if (nr_to_read > end_index - index)
326 		nr_to_read = end_index - index + 1;
327 
328 	page_cache_ra_unbounded(ractl, nr_to_read, lookahead_size);
329 }
330 
331 /*
332  * Chunk the readahead into 2 megabyte units, so that we don't pin too much
333  * memory at once.
334  */
335 void force_page_cache_ra(struct readahead_control *ractl,
336 		unsigned long nr_to_read)
337 {
338 	struct address_space *mapping = ractl->mapping;
339 	struct file_ra_state *ra = ractl->ra;
340 	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
341 	unsigned long max_pages;
342 
343 	if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead))
344 		return;
345 
346 	/*
347 	 * If the request exceeds the readahead window, allow the read to
348 	 * be up to the optimal hardware IO size
349 	 */
350 	max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
351 	nr_to_read = min_t(unsigned long, nr_to_read, max_pages);
352 	while (nr_to_read) {
353 		unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE;
354 
355 		if (this_chunk > nr_to_read)
356 			this_chunk = nr_to_read;
357 		do_page_cache_ra(ractl, this_chunk, 0);
358 
359 		nr_to_read -= this_chunk;
360 	}
361 }
362 
363 /*
364  * Set the initial window size, round to next power of 2 and square
365  * for small size, x 4 for medium, and x 2 for large
366  * for 128k (32 page) max ra
367  * 1-2 page = 16k, 3-4 page 32k, 5-8 page = 64k, > 8 page = 128k initial
368  */
369 static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
370 {
371 	unsigned long newsize = roundup_pow_of_two(size);
372 
373 	if (newsize <= max / 32)
374 		newsize = newsize * 4;
375 	else if (newsize <= max / 4)
376 		newsize = newsize * 2;
377 	else
378 		newsize = max;
379 
380 	return newsize;
381 }
382 
383 /*
384  *  Get the previous window size, ramp it up, and
385  *  return it as the new window size.
386  */
387 static unsigned long get_next_ra_size(struct file_ra_state *ra,
388 				      unsigned long max)
389 {
390 	unsigned long cur = ra->size;
391 
392 	if (cur < max / 16)
393 		return 4 * cur;
394 	if (cur <= max / 2)
395 		return 2 * cur;
396 	return max;
397 }
398 
399 /*
400  * On-demand readahead design.
401  *
402  * The fields in struct file_ra_state represent the most-recently-executed
403  * readahead attempt:
404  *
405  *                        |<----- async_size ---------|
406  *     |------------------- size -------------------->|
407  *     |==================#===========================|
408  *     ^start             ^page marked with PG_readahead
409  *
410  * To overlap application thinking time and disk I/O time, we do
411  * `readahead pipelining': Do not wait until the application consumed all
412  * readahead pages and stalled on the missing page at readahead_index;
413  * Instead, submit an asynchronous readahead I/O as soon as there are
414  * only async_size pages left in the readahead window. Normally async_size
415  * will be equal to size, for maximum pipelining.
416  *
417  * In interleaved sequential reads, concurrent streams on the same fd can
418  * be invalidating each other's readahead state. So we flag the new readahead
419  * page at (start+size-async_size) with PG_readahead, and use it as readahead
420  * indicator. The flag won't be set on already cached pages, to avoid the
421  * readahead-for-nothing fuss, saving pointless page cache lookups.
422  *
423  * prev_pos tracks the last visited byte in the _previous_ read request.
424  * It should be maintained by the caller, and will be used for detecting
425  * small random reads. Note that the readahead algorithm checks loosely
426  * for sequential patterns. Hence interleaved reads might be served as
427  * sequential ones.
428  *
429  * There is a special-case: if the first page which the application tries to
430  * read happens to be the first page of the file, it is assumed that a linear
431  * read is about to happen and the window is immediately set to the initial size
432  * based on I/O request size and the max_readahead.
433  *
434  * The code ramps up the readahead size aggressively at first, but slow down as
435  * it approaches max_readhead.
436  */
437 
438 static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index,
439 		pgoff_t mark, unsigned int order, gfp_t gfp)
440 {
441 	int err;
442 	struct folio *folio = ractl_alloc_folio(ractl, gfp, order);
443 
444 	if (!folio)
445 		return -ENOMEM;
446 	mark = round_down(mark, 1UL << order);
447 	if (index == mark)
448 		folio_set_readahead(folio);
449 	err = filemap_add_folio(ractl->mapping, folio, index, gfp);
450 	if (err) {
451 		folio_put(folio);
452 		return err;
453 	}
454 
455 	ractl->_nr_pages += 1UL << order;
456 	ractl->_workingset |= folio_test_workingset(folio);
457 	return 0;
458 }
459 
460 void page_cache_ra_order(struct readahead_control *ractl,
461 		struct file_ra_state *ra, unsigned int new_order)
462 {
463 	struct address_space *mapping = ractl->mapping;
464 	pgoff_t start = readahead_index(ractl);
465 	pgoff_t index = start;
466 	unsigned int min_order = mapping_min_folio_order(mapping);
467 	pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT;
468 	pgoff_t mark = index + ra->size - ra->async_size;
469 	unsigned int nofs;
470 	int err = 0;
471 	gfp_t gfp = readahead_gfp_mask(mapping);
472 	unsigned int min_ra_size = max(4, mapping_min_folio_nrpages(mapping));
473 
474 	/*
475 	 * Fallback when size < min_nrpages as each folio should be
476 	 * at least min_nrpages anyway.
477 	 */
478 	if (!mapping_large_folio_support(mapping) || ra->size < min_ra_size)
479 		goto fallback;
480 
481 	limit = min(limit, index + ra->size - 1);
482 
483 	if (new_order < mapping_max_folio_order(mapping))
484 		new_order += 2;
485 
486 	new_order = min(mapping_max_folio_order(mapping), new_order);
487 	new_order = min_t(unsigned int, new_order, ilog2(ra->size));
488 	new_order = max(new_order, min_order);
489 
490 	/* See comment in page_cache_ra_unbounded() */
491 	nofs = memalloc_nofs_save();
492 	filemap_invalidate_lock_shared(mapping);
493 	/*
494 	 * If the new_order is greater than min_order and index is
495 	 * already aligned to new_order, then this will be noop as index
496 	 * aligned to new_order should also be aligned to min_order.
497 	 */
498 	ractl->_index = mapping_align_index(mapping, index);
499 	index = readahead_index(ractl);
500 
501 	while (index <= limit) {
502 		unsigned int order = new_order;
503 
504 		/* Align with smaller pages if needed */
505 		if (index & ((1UL << order) - 1))
506 			order = __ffs(index);
507 		/* Don't allocate pages past EOF */
508 		while (order > min_order && index + (1UL << order) - 1 > limit)
509 			order--;
510 		err = ra_alloc_folio(ractl, index, mark, order, gfp);
511 		if (err)
512 			break;
513 		index += 1UL << order;
514 	}
515 
516 	read_pages(ractl);
517 	filemap_invalidate_unlock_shared(mapping);
518 	memalloc_nofs_restore(nofs);
519 
520 	/*
521 	 * If there were already pages in the page cache, then we may have
522 	 * left some gaps.  Let the regular readahead code take care of this
523 	 * situation below.
524 	 */
525 	if (!err)
526 		return;
527 fallback:
528 	/*
529 	 * ->readahead() may have updated readahead window size so we have to
530 	 * check there's still something to read.
531 	 */
532 	if (ra->size > index - start)
533 		do_page_cache_ra(ractl, ra->size - (index - start),
534 				 ra->async_size);
535 }
536 
537 static unsigned long ractl_max_pages(struct readahead_control *ractl,
538 		unsigned long req_size)
539 {
540 	struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host);
541 	unsigned long max_pages = ractl->ra->ra_pages;
542 
543 	/*
544 	 * If the request exceeds the readahead window, allow the read to
545 	 * be up to the optimal hardware IO size
546 	 */
547 	if (req_size > max_pages && bdi->io_pages > max_pages)
548 		max_pages = min(req_size, bdi->io_pages);
549 	return max_pages;
550 }
551 
552 void page_cache_sync_ra(struct readahead_control *ractl,
553 		unsigned long req_count)
554 {
555 	pgoff_t index = readahead_index(ractl);
556 	bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM);
557 	struct file_ra_state *ra = ractl->ra;
558 	unsigned long max_pages, contig_count;
559 	pgoff_t prev_index, miss;
560 
561 	/*
562 	 * If we have pre-content watches we need to disable readahead to make
563 	 * sure that we don't find 0 filled pages in cache that we never emitted
564 	 * events for. Filesystems supporting HSM must make sure to not call
565 	 * this function with ractl->file unset for files handled by HSM.
566 	 */
567 	if (ractl->file && unlikely(FMODE_FSNOTIFY_HSM(ractl->file->f_mode)))
568 		return;
569 
570 	/*
571 	 * Even if readahead is disabled, issue this request as readahead
572 	 * as we'll need it to satisfy the requested range. The forced
573 	 * readahead will do the right thing and limit the read to just the
574 	 * requested range, which we'll set to 1 page for this case.
575 	 */
576 	if (!ra->ra_pages || blk_cgroup_congested()) {
577 		if (!ractl->file)
578 			return;
579 		req_count = 1;
580 		do_forced_ra = true;
581 	}
582 
583 	/* be dumb */
584 	if (do_forced_ra) {
585 		force_page_cache_ra(ractl, req_count);
586 		return;
587 	}
588 
589 	max_pages = ractl_max_pages(ractl, req_count);
590 	prev_index = (unsigned long long)ra->prev_pos >> PAGE_SHIFT;
591 	/*
592 	 * A start of file, oversized read, or sequential cache miss:
593 	 * trivial case: (index - prev_index) == 1
594 	 * unaligned reads: (index - prev_index) == 0
595 	 */
596 	if (!index || req_count > max_pages || index - prev_index <= 1UL) {
597 		ra->start = index;
598 		ra->size = get_init_ra_size(req_count, max_pages);
599 		ra->async_size = ra->size > req_count ? ra->size - req_count :
600 							ra->size >> 1;
601 		goto readit;
602 	}
603 
604 	/*
605 	 * Query the page cache and look for the traces(cached history pages)
606 	 * that a sequential stream would leave behind.
607 	 */
608 	rcu_read_lock();
609 	miss = page_cache_prev_miss(ractl->mapping, index - 1, max_pages);
610 	rcu_read_unlock();
611 	contig_count = index - miss - 1;
612 	/*
613 	 * Standalone, small random read. Read as is, and do not pollute the
614 	 * readahead state.
615 	 */
616 	if (contig_count <= req_count) {
617 		do_page_cache_ra(ractl, req_count, 0);
618 		return;
619 	}
620 	/*
621 	 * File cached from the beginning:
622 	 * it is a strong indication of long-run stream (or whole-file-read)
623 	 */
624 	if (miss == ULONG_MAX)
625 		contig_count *= 2;
626 	ra->start = index;
627 	ra->size = min(contig_count + req_count, max_pages);
628 	ra->async_size = 1;
629 readit:
630 	ractl->_index = ra->start;
631 	page_cache_ra_order(ractl, ra, 0);
632 }
633 EXPORT_SYMBOL_GPL(page_cache_sync_ra);
634 
635 void page_cache_async_ra(struct readahead_control *ractl,
636 		struct folio *folio, unsigned long req_count)
637 {
638 	unsigned long max_pages;
639 	struct file_ra_state *ra = ractl->ra;
640 	pgoff_t index = readahead_index(ractl);
641 	pgoff_t expected, start;
642 	unsigned int order = folio_order(folio);
643 
644 	/* no readahead */
645 	if (!ra->ra_pages)
646 		return;
647 
648 	/* See the comment in page_cache_sync_ra. */
649 	if (ractl->file && unlikely(FMODE_FSNOTIFY_HSM(ractl->file->f_mode)))
650 		return;
651 
652 	/*
653 	 * Same bit is used for PG_readahead and PG_reclaim.
654 	 */
655 	if (folio_test_writeback(folio))
656 		return;
657 
658 	folio_clear_readahead(folio);
659 
660 	if (blk_cgroup_congested())
661 		return;
662 
663 	max_pages = ractl_max_pages(ractl, req_count);
664 	/*
665 	 * It's the expected callback index, assume sequential access.
666 	 * Ramp up sizes, and push forward the readahead window.
667 	 */
668 	expected = round_down(ra->start + ra->size - ra->async_size,
669 			1UL << order);
670 	if (index == expected) {
671 		ra->start += ra->size;
672 		/*
673 		 * In the case of MADV_HUGEPAGE, the actual size might exceed
674 		 * the readahead window.
675 		 */
676 		ra->size = max(ra->size, get_next_ra_size(ra, max_pages));
677 		ra->async_size = ra->size;
678 		goto readit;
679 	}
680 
681 	/*
682 	 * Hit a marked folio without valid readahead state.
683 	 * E.g. interleaved reads.
684 	 * Query the pagecache for async_size, which normally equals to
685 	 * readahead size. Ramp it up and use it as the new readahead size.
686 	 */
687 	rcu_read_lock();
688 	start = page_cache_next_miss(ractl->mapping, index + 1, max_pages);
689 	rcu_read_unlock();
690 
691 	if (!start || start - index > max_pages)
692 		return;
693 
694 	ra->start = start;
695 	ra->size = start - index;	/* old async_size */
696 	ra->size += req_count;
697 	ra->size = get_next_ra_size(ra, max_pages);
698 	ra->async_size = ra->size;
699 readit:
700 	ractl->_index = ra->start;
701 	page_cache_ra_order(ractl, ra, order);
702 }
703 EXPORT_SYMBOL_GPL(page_cache_async_ra);
704 
705 ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
706 {
707 	CLASS(fd, f)(fd);
708 
709 	if (fd_empty(f) || !(fd_file(f)->f_mode & FMODE_READ))
710 		return -EBADF;
711 
712 	/*
713 	 * The readahead() syscall is intended to run only on files
714 	 * that can execute readahead. If readahead is not possible
715 	 * on this file, then we must return -EINVAL.
716 	 */
717 	if (!fd_file(f)->f_mapping || !fd_file(f)->f_mapping->a_ops ||
718 	    (!S_ISREG(file_inode(fd_file(f))->i_mode) &&
719 	    !S_ISBLK(file_inode(fd_file(f))->i_mode)))
720 		return -EINVAL;
721 
722 	return vfs_fadvise(fd_file(f), offset, count, POSIX_FADV_WILLNEED);
723 }
724 
725 SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
726 {
727 	return ksys_readahead(fd, offset, count);
728 }
729 
730 #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_READAHEAD)
731 COMPAT_SYSCALL_DEFINE4(readahead, int, fd, compat_arg_u64_dual(offset), size_t, count)
732 {
733 	return ksys_readahead(fd, compat_arg_u64_glue(offset), count);
734 }
735 #endif
736 
737 /**
738  * readahead_expand - Expand a readahead request
739  * @ractl: The request to be expanded
740  * @new_start: The revised start
741  * @new_len: The revised size of the request
742  *
743  * Attempt to expand a readahead request outwards from the current size to the
744  * specified size by inserting locked pages before and after the current window
745  * to increase the size to the new window.  This may involve the insertion of
746  * THPs, in which case the window may get expanded even beyond what was
747  * requested.
748  *
749  * The algorithm will stop if it encounters a conflicting page already in the
750  * pagecache and leave a smaller expansion than requested.
751  *
752  * The caller must check for this by examining the revised @ractl object for a
753  * different expansion than was requested.
754  */
755 void readahead_expand(struct readahead_control *ractl,
756 		      loff_t new_start, size_t new_len)
757 {
758 	struct address_space *mapping = ractl->mapping;
759 	struct file_ra_state *ra = ractl->ra;
760 	pgoff_t new_index, new_nr_pages;
761 	gfp_t gfp_mask = readahead_gfp_mask(mapping);
762 	unsigned long min_nrpages = mapping_min_folio_nrpages(mapping);
763 	unsigned int min_order = mapping_min_folio_order(mapping);
764 
765 	new_index = new_start / PAGE_SIZE;
766 	/*
767 	 * Readahead code should have aligned the ractl->_index to
768 	 * min_nrpages before calling readahead aops.
769 	 */
770 	VM_BUG_ON(!IS_ALIGNED(ractl->_index, min_nrpages));
771 
772 	/* Expand the leading edge downwards */
773 	while (ractl->_index > new_index) {
774 		unsigned long index = ractl->_index - 1;
775 		struct folio *folio = xa_load(&mapping->i_pages, index);
776 
777 		if (folio && !xa_is_value(folio))
778 			return; /* Folio apparently present */
779 
780 		folio = ractl_alloc_folio(ractl, gfp_mask, min_order);
781 		if (!folio)
782 			return;
783 
784 		index = mapping_align_index(mapping, index);
785 		if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) {
786 			folio_put(folio);
787 			return;
788 		}
789 		if (unlikely(folio_test_workingset(folio)) &&
790 				!ractl->_workingset) {
791 			ractl->_workingset = true;
792 			psi_memstall_enter(&ractl->_pflags);
793 		}
794 		ractl->_nr_pages += min_nrpages;
795 		ractl->_index = folio->index;
796 	}
797 
798 	new_len += new_start - readahead_pos(ractl);
799 	new_nr_pages = DIV_ROUND_UP(new_len, PAGE_SIZE);
800 
801 	/* Expand the trailing edge upwards */
802 	while (ractl->_nr_pages < new_nr_pages) {
803 		unsigned long index = ractl->_index + ractl->_nr_pages;
804 		struct folio *folio = xa_load(&mapping->i_pages, index);
805 
806 		if (folio && !xa_is_value(folio))
807 			return; /* Folio apparently present */
808 
809 		folio = ractl_alloc_folio(ractl, gfp_mask, min_order);
810 		if (!folio)
811 			return;
812 
813 		index = mapping_align_index(mapping, index);
814 		if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) {
815 			folio_put(folio);
816 			return;
817 		}
818 		if (unlikely(folio_test_workingset(folio)) &&
819 				!ractl->_workingset) {
820 			ractl->_workingset = true;
821 			psi_memstall_enter(&ractl->_pflags);
822 		}
823 		ractl->_nr_pages += min_nrpages;
824 		if (ra) {
825 			ra->size += min_nrpages;
826 			ra->async_size += min_nrpages;
827 		}
828 	}
829 }
830 EXPORT_SYMBOL(readahead_expand);
831