xref: /linux/mm/readahead.c (revision e814f3fd16acfb7f9966773953de8f740a1e3202)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm/readahead.c - address_space-level file readahead.
4  *
5  * Copyright (C) 2002, Linus Torvalds
6  *
7  * 09Apr2002	Andrew Morton
8  *		Initial version.
9  */
10 
11 /**
12  * DOC: Readahead Overview
13  *
14  * Readahead is used to read content into the page cache before it is
15  * explicitly requested by the application.  Readahead only ever
16  * attempts to read folios that are not yet in the page cache.  If a
17  * folio is present but not up-to-date, readahead will not try to read
18  * it. In that case a simple ->read_folio() will be requested.
19  *
20  * Readahead is triggered when an application read request (whether a
21  * system call or a page fault) finds that the requested folio is not in
22  * the page cache, or that it is in the page cache and has the
23  * readahead flag set.  This flag indicates that the folio was read
24  * as part of a previous readahead request and now that it has been
25  * accessed, it is time for the next readahead.
26  *
27  * Each readahead request is partly synchronous read, and partly async
28  * readahead.  This is reflected in the struct file_ra_state which
29  * contains ->size being the total number of pages, and ->async_size
30  * which is the number of pages in the async section.  The readahead
31  * flag will be set on the first folio in this async section to trigger
32  * a subsequent readahead.  Once a series of sequential reads has been
33  * established, there should be no need for a synchronous component and
34  * all readahead request will be fully asynchronous.
35  *
36  * When either of the triggers causes a readahead, three numbers need
37  * to be determined: the start of the region to read, the size of the
38  * region, and the size of the async tail.
39  *
40  * The start of the region is simply the first page address at or after
41  * the accessed address, which is not currently populated in the page
42  * cache.  This is found with a simple search in the page cache.
43  *
44  * The size of the async tail is determined by subtracting the size that
45  * was explicitly requested from the determined request size, unless
46  * this would be less than zero - then zero is used.  NOTE THIS
47  * CALCULATION IS WRONG WHEN THE START OF THE REGION IS NOT THE ACCESSED
48  * PAGE.  ALSO THIS CALCULATION IS NOT USED CONSISTENTLY.
49  *
50  * The size of the region is normally determined from the size of the
51  * previous readahead which loaded the preceding pages.  This may be
52  * discovered from the struct file_ra_state for simple sequential reads,
53  * or from examining the state of the page cache when multiple
54  * sequential reads are interleaved.  Specifically: where the readahead
55  * was triggered by the readahead flag, the size of the previous
56  * readahead is assumed to be the number of pages from the triggering
57  * page to the start of the new readahead.  In these cases, the size of
58  * the previous readahead is scaled, often doubled, for the new
59  * readahead, though see get_next_ra_size() for details.
60  *
61  * If the size of the previous read cannot be determined, the number of
62  * preceding pages in the page cache is used to estimate the size of
63  * a previous read.  This estimate could easily be misled by random
64  * reads being coincidentally adjacent, so it is ignored unless it is
65  * larger than the current request, and it is not scaled up, unless it
66  * is at the start of file.
67  *
68  * In general readahead is accelerated at the start of the file, as
69  * reads from there are often sequential.  There are other minor
70  * adjustments to the readahead size in various special cases and these
71  * are best discovered by reading the code.
72  *
73  * The above calculation, based on the previous readahead size,
74  * determines the size of the readahead, to which any requested read
75  * size may be added.
76  *
77  * Readahead requests are sent to the filesystem using the ->readahead()
78  * address space operation, for which mpage_readahead() is a canonical
79  * implementation.  ->readahead() should normally initiate reads on all
80  * folios, but may fail to read any or all folios without causing an I/O
81  * error.  The page cache reading code will issue a ->read_folio() request
82  * for any folio which ->readahead() did not read, and only an error
83  * from this will be final.
84  *
85  * ->readahead() will generally call readahead_folio() repeatedly to get
86  * each folio from those prepared for readahead.  It may fail to read a
87  * folio by:
88  *
89  * * not calling readahead_folio() sufficiently many times, effectively
90  *   ignoring some folios, as might be appropriate if the path to
91  *   storage is congested.
92  *
93  * * failing to actually submit a read request for a given folio,
94  *   possibly due to insufficient resources, or
95  *
96  * * getting an error during subsequent processing of a request.
97  *
98  * In the last two cases, the folio should be unlocked by the filesystem
99  * to indicate that the read attempt has failed.  In the first case the
100  * folio will be unlocked by the VFS.
101  *
102  * Those folios not in the final ``async_size`` of the request should be
103  * considered to be important and ->readahead() should not fail them due
104  * to congestion or temporary resource unavailability, but should wait
105  * for necessary resources (e.g.  memory or indexing information) to
106  * become available.  Folios in the final ``async_size`` may be
107  * considered less urgent and failure to read them is more acceptable.
108  * In this case it is best to use filemap_remove_folio() to remove the
109  * folios from the page cache as is automatically done for folios that
110  * were not fetched with readahead_folio().  This will allow a
111  * subsequent synchronous readahead request to try them again.  If they
112  * are left in the page cache, then they will be read individually using
113  * ->read_folio() which may be less efficient.
114  */
115 
116 #include <linux/blkdev.h>
117 #include <linux/kernel.h>
118 #include <linux/dax.h>
119 #include <linux/gfp.h>
120 #include <linux/export.h>
121 #include <linux/backing-dev.h>
122 #include <linux/task_io_accounting_ops.h>
123 #include <linux/pagemap.h>
124 #include <linux/psi.h>
125 #include <linux/syscalls.h>
126 #include <linux/file.h>
127 #include <linux/mm_inline.h>
128 #include <linux/blk-cgroup.h>
129 #include <linux/fadvise.h>
130 #include <linux/sched/mm.h>
131 #include <linux/fsnotify.h>
132 
133 #include "internal.h"
134 
135 /*
136  * Initialise a struct file's readahead state.  Assumes that the caller has
137  * memset *ra to zero.
138  */
139 void
140 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
141 {
142 	ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages;
143 	ra->prev_pos = -1;
144 }
145 EXPORT_SYMBOL_GPL(file_ra_state_init);
146 
147 static void read_pages(struct readahead_control *rac)
148 {
149 	const struct address_space_operations *aops = rac->mapping->a_ops;
150 	struct folio *folio;
151 	struct blk_plug plug;
152 
153 	if (!readahead_count(rac))
154 		return;
155 
156 	if (unlikely(rac->_workingset))
157 		psi_memstall_enter(&rac->_pflags);
158 	blk_start_plug(&plug);
159 
160 	if (aops->readahead) {
161 		aops->readahead(rac);
162 		/*
163 		 * Clean up the remaining folios.  The sizes in ->ra
164 		 * may be used to size the next readahead, so make sure
165 		 * they accurately reflect what happened.
166 		 */
167 		while ((folio = readahead_folio(rac)) != NULL) {
168 			unsigned long nr = folio_nr_pages(folio);
169 
170 			folio_get(folio);
171 			rac->ra->size -= nr;
172 			if (rac->ra->async_size >= nr) {
173 				rac->ra->async_size -= nr;
174 				filemap_remove_folio(folio);
175 			}
176 			folio_unlock(folio);
177 			folio_put(folio);
178 		}
179 	} else {
180 		while ((folio = readahead_folio(rac)) != NULL)
181 			aops->read_folio(rac->file, folio);
182 	}
183 
184 	blk_finish_plug(&plug);
185 	if (unlikely(rac->_workingset))
186 		psi_memstall_leave(&rac->_pflags);
187 	rac->_workingset = false;
188 
189 	BUG_ON(readahead_count(rac));
190 }
191 
192 /**
193  * page_cache_ra_unbounded - Start unchecked readahead.
194  * @ractl: Readahead control.
195  * @nr_to_read: The number of pages to read.
196  * @lookahead_size: Where to start the next readahead.
197  *
198  * This function is for filesystems to call when they want to start
199  * readahead beyond a file's stated i_size.  This is almost certainly
200  * not the function you want to call.  Use page_cache_async_readahead()
201  * or page_cache_sync_readahead() instead.
202  *
203  * Context: File is referenced by caller.  Mutexes may be held by caller.
204  * May sleep, but will not reenter filesystem to reclaim memory.
205  */
206 void page_cache_ra_unbounded(struct readahead_control *ractl,
207 		unsigned long nr_to_read, unsigned long lookahead_size)
208 {
209 	struct address_space *mapping = ractl->mapping;
210 	unsigned long index = readahead_index(ractl);
211 	gfp_t gfp_mask = readahead_gfp_mask(mapping);
212 	unsigned long mark = ULONG_MAX, i = 0;
213 	unsigned int min_nrpages = mapping_min_folio_nrpages(mapping);
214 
215 	/*
216 	 * Partway through the readahead operation, we will have added
217 	 * locked pages to the page cache, but will not yet have submitted
218 	 * them for I/O.  Adding another page may need to allocate memory,
219 	 * which can trigger memory reclaim.  Telling the VM we're in
220 	 * the middle of a filesystem operation will cause it to not
221 	 * touch file-backed pages, preventing a deadlock.  Most (all?)
222 	 * filesystems already specify __GFP_NOFS in their mapping's
223 	 * gfp_mask, but let's be explicit here.
224 	 */
225 	unsigned int nofs = memalloc_nofs_save();
226 
227 	filemap_invalidate_lock_shared(mapping);
228 	index = mapping_align_index(mapping, index);
229 
230 	/*
231 	 * As iterator `i` is aligned to min_nrpages, round_up the
232 	 * difference between nr_to_read and lookahead_size to mark the
233 	 * index that only has lookahead or "async_region" to set the
234 	 * readahead flag.
235 	 */
236 	if (lookahead_size <= nr_to_read) {
237 		unsigned long ra_folio_index;
238 
239 		ra_folio_index = round_up(readahead_index(ractl) +
240 					  nr_to_read - lookahead_size,
241 					  min_nrpages);
242 		mark = ra_folio_index - index;
243 	}
244 	nr_to_read += readahead_index(ractl) - index;
245 	ractl->_index = index;
246 
247 	/*
248 	 * Preallocate as many pages as we will need.
249 	 */
250 	while (i < nr_to_read) {
251 		struct folio *folio = xa_load(&mapping->i_pages, index + i);
252 		int ret;
253 
254 		if (folio && !xa_is_value(folio)) {
255 			/*
256 			 * Page already present?  Kick off the current batch
257 			 * of contiguous pages before continuing with the
258 			 * next batch.  This page may be the one we would
259 			 * have intended to mark as Readahead, but we don't
260 			 * have a stable reference to this page, and it's
261 			 * not worth getting one just for that.
262 			 */
263 			read_pages(ractl);
264 			ractl->_index += min_nrpages;
265 			i = ractl->_index + ractl->_nr_pages - index;
266 			continue;
267 		}
268 
269 		folio = filemap_alloc_folio(gfp_mask,
270 					    mapping_min_folio_order(mapping));
271 		if (!folio)
272 			break;
273 
274 		ret = filemap_add_folio(mapping, folio, index + i, gfp_mask);
275 		if (ret < 0) {
276 			folio_put(folio);
277 			if (ret == -ENOMEM)
278 				break;
279 			read_pages(ractl);
280 			ractl->_index += min_nrpages;
281 			i = ractl->_index + ractl->_nr_pages - index;
282 			continue;
283 		}
284 		if (i == mark)
285 			folio_set_readahead(folio);
286 		ractl->_workingset |= folio_test_workingset(folio);
287 		ractl->_nr_pages += min_nrpages;
288 		i += min_nrpages;
289 	}
290 
291 	/*
292 	 * Now start the IO.  We ignore I/O errors - if the folio is not
293 	 * uptodate then the caller will launch read_folio again, and
294 	 * will then handle the error.
295 	 */
296 	read_pages(ractl);
297 	filemap_invalidate_unlock_shared(mapping);
298 	memalloc_nofs_restore(nofs);
299 }
300 EXPORT_SYMBOL_GPL(page_cache_ra_unbounded);
301 
302 /*
303  * do_page_cache_ra() actually reads a chunk of disk.  It allocates
304  * the pages first, then submits them for I/O. This avoids the very bad
305  * behaviour which would occur if page allocations are causing VM writeback.
306  * We really don't want to intermingle reads and writes like that.
307  */
308 static void do_page_cache_ra(struct readahead_control *ractl,
309 		unsigned long nr_to_read, unsigned long lookahead_size)
310 {
311 	struct inode *inode = ractl->mapping->host;
312 	unsigned long index = readahead_index(ractl);
313 	loff_t isize = i_size_read(inode);
314 	pgoff_t end_index;	/* The last page we want to read */
315 
316 	if (isize == 0)
317 		return;
318 
319 	end_index = (isize - 1) >> PAGE_SHIFT;
320 	if (index > end_index)
321 		return;
322 	/* Don't read past the page containing the last byte of the file */
323 	if (nr_to_read > end_index - index)
324 		nr_to_read = end_index - index + 1;
325 
326 	page_cache_ra_unbounded(ractl, nr_to_read, lookahead_size);
327 }
328 
329 /*
330  * Chunk the readahead into 2 megabyte units, so that we don't pin too much
331  * memory at once.
332  */
333 void force_page_cache_ra(struct readahead_control *ractl,
334 		unsigned long nr_to_read)
335 {
336 	struct address_space *mapping = ractl->mapping;
337 	struct file_ra_state *ra = ractl->ra;
338 	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
339 	unsigned long max_pages;
340 
341 	if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead))
342 		return;
343 
344 	/*
345 	 * If the request exceeds the readahead window, allow the read to
346 	 * be up to the optimal hardware IO size
347 	 */
348 	max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
349 	nr_to_read = min_t(unsigned long, nr_to_read, max_pages);
350 	while (nr_to_read) {
351 		unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE;
352 
353 		if (this_chunk > nr_to_read)
354 			this_chunk = nr_to_read;
355 		do_page_cache_ra(ractl, this_chunk, 0);
356 
357 		nr_to_read -= this_chunk;
358 	}
359 }
360 
361 /*
362  * Set the initial window size, round to next power of 2 and square
363  * for small size, x 4 for medium, and x 2 for large
364  * for 128k (32 page) max ra
365  * 1-2 page = 16k, 3-4 page 32k, 5-8 page = 64k, > 8 page = 128k initial
366  */
367 static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
368 {
369 	unsigned long newsize = roundup_pow_of_two(size);
370 
371 	if (newsize <= max / 32)
372 		newsize = newsize * 4;
373 	else if (newsize <= max / 4)
374 		newsize = newsize * 2;
375 	else
376 		newsize = max;
377 
378 	return newsize;
379 }
380 
381 /*
382  *  Get the previous window size, ramp it up, and
383  *  return it as the new window size.
384  */
385 static unsigned long get_next_ra_size(struct file_ra_state *ra,
386 				      unsigned long max)
387 {
388 	unsigned long cur = ra->size;
389 
390 	if (cur < max / 16)
391 		return 4 * cur;
392 	if (cur <= max / 2)
393 		return 2 * cur;
394 	return max;
395 }
396 
397 /*
398  * On-demand readahead design.
399  *
400  * The fields in struct file_ra_state represent the most-recently-executed
401  * readahead attempt:
402  *
403  *                        |<----- async_size ---------|
404  *     |------------------- size -------------------->|
405  *     |==================#===========================|
406  *     ^start             ^page marked with PG_readahead
407  *
408  * To overlap application thinking time and disk I/O time, we do
409  * `readahead pipelining': Do not wait until the application consumed all
410  * readahead pages and stalled on the missing page at readahead_index;
411  * Instead, submit an asynchronous readahead I/O as soon as there are
412  * only async_size pages left in the readahead window. Normally async_size
413  * will be equal to size, for maximum pipelining.
414  *
415  * In interleaved sequential reads, concurrent streams on the same fd can
416  * be invalidating each other's readahead state. So we flag the new readahead
417  * page at (start+size-async_size) with PG_readahead, and use it as readahead
418  * indicator. The flag won't be set on already cached pages, to avoid the
419  * readahead-for-nothing fuss, saving pointless page cache lookups.
420  *
421  * prev_pos tracks the last visited byte in the _previous_ read request.
422  * It should be maintained by the caller, and will be used for detecting
423  * small random reads. Note that the readahead algorithm checks loosely
424  * for sequential patterns. Hence interleaved reads might be served as
425  * sequential ones.
426  *
427  * There is a special-case: if the first page which the application tries to
428  * read happens to be the first page of the file, it is assumed that a linear
429  * read is about to happen and the window is immediately set to the initial size
430  * based on I/O request size and the max_readahead.
431  *
432  * The code ramps up the readahead size aggressively at first, but slow down as
433  * it approaches max_readhead.
434  */
435 
436 static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index,
437 		pgoff_t mark, unsigned int order, gfp_t gfp)
438 {
439 	int err;
440 	struct folio *folio = filemap_alloc_folio(gfp, order);
441 
442 	if (!folio)
443 		return -ENOMEM;
444 	mark = round_down(mark, 1UL << order);
445 	if (index == mark)
446 		folio_set_readahead(folio);
447 	err = filemap_add_folio(ractl->mapping, folio, index, gfp);
448 	if (err) {
449 		folio_put(folio);
450 		return err;
451 	}
452 
453 	ractl->_nr_pages += 1UL << order;
454 	ractl->_workingset |= folio_test_workingset(folio);
455 	return 0;
456 }
457 
458 void page_cache_ra_order(struct readahead_control *ractl,
459 		struct file_ra_state *ra, unsigned int new_order)
460 {
461 	struct address_space *mapping = ractl->mapping;
462 	pgoff_t index = readahead_index(ractl);
463 	unsigned int min_order = mapping_min_folio_order(mapping);
464 	pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT;
465 	pgoff_t mark = index + ra->size - ra->async_size;
466 	unsigned int nofs;
467 	int err = 0;
468 	gfp_t gfp = readahead_gfp_mask(mapping);
469 	unsigned int min_ra_size = max(4, mapping_min_folio_nrpages(mapping));
470 
471 	/*
472 	 * Fallback when size < min_nrpages as each folio should be
473 	 * at least min_nrpages anyway.
474 	 */
475 	if (!mapping_large_folio_support(mapping) || ra->size < min_ra_size)
476 		goto fallback;
477 
478 	limit = min(limit, index + ra->size - 1);
479 
480 	if (new_order < mapping_max_folio_order(mapping))
481 		new_order += 2;
482 
483 	new_order = min(mapping_max_folio_order(mapping), new_order);
484 	new_order = min_t(unsigned int, new_order, ilog2(ra->size));
485 	new_order = max(new_order, min_order);
486 
487 	/* See comment in page_cache_ra_unbounded() */
488 	nofs = memalloc_nofs_save();
489 	filemap_invalidate_lock_shared(mapping);
490 	/*
491 	 * If the new_order is greater than min_order and index is
492 	 * already aligned to new_order, then this will be noop as index
493 	 * aligned to new_order should also be aligned to min_order.
494 	 */
495 	ractl->_index = mapping_align_index(mapping, index);
496 	index = readahead_index(ractl);
497 
498 	while (index <= limit) {
499 		unsigned int order = new_order;
500 
501 		/* Align with smaller pages if needed */
502 		if (index & ((1UL << order) - 1))
503 			order = __ffs(index);
504 		/* Don't allocate pages past EOF */
505 		while (order > min_order && index + (1UL << order) - 1 > limit)
506 			order--;
507 		err = ra_alloc_folio(ractl, index, mark, order, gfp);
508 		if (err)
509 			break;
510 		index += 1UL << order;
511 	}
512 
513 	read_pages(ractl);
514 	filemap_invalidate_unlock_shared(mapping);
515 	memalloc_nofs_restore(nofs);
516 
517 	/*
518 	 * If there were already pages in the page cache, then we may have
519 	 * left some gaps.  Let the regular readahead code take care of this
520 	 * situation.
521 	 */
522 	if (!err)
523 		return;
524 fallback:
525 	do_page_cache_ra(ractl, ra->size, ra->async_size);
526 }
527 
528 static unsigned long ractl_max_pages(struct readahead_control *ractl,
529 		unsigned long req_size)
530 {
531 	struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host);
532 	unsigned long max_pages = ractl->ra->ra_pages;
533 
534 	/*
535 	 * If the request exceeds the readahead window, allow the read to
536 	 * be up to the optimal hardware IO size
537 	 */
538 	if (req_size > max_pages && bdi->io_pages > max_pages)
539 		max_pages = min(req_size, bdi->io_pages);
540 	return max_pages;
541 }
542 
543 void page_cache_sync_ra(struct readahead_control *ractl,
544 		unsigned long req_count)
545 {
546 	pgoff_t index = readahead_index(ractl);
547 	bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM);
548 	struct file_ra_state *ra = ractl->ra;
549 	unsigned long max_pages, contig_count;
550 	pgoff_t prev_index, miss;
551 
552 	/*
553 	 * If we have pre-content watches we need to disable readahead to make
554 	 * sure that we don't find 0 filled pages in cache that we never emitted
555 	 * events for. Filesystems supporting HSM must make sure to not call
556 	 * this function with ractl->file unset for files handled by HSM.
557 	 */
558 	if (ractl->file && unlikely(FMODE_FSNOTIFY_HSM(ractl->file->f_mode)))
559 		return;
560 
561 	/*
562 	 * Even if readahead is disabled, issue this request as readahead
563 	 * as we'll need it to satisfy the requested range. The forced
564 	 * readahead will do the right thing and limit the read to just the
565 	 * requested range, which we'll set to 1 page for this case.
566 	 */
567 	if (!ra->ra_pages || blk_cgroup_congested()) {
568 		if (!ractl->file)
569 			return;
570 		req_count = 1;
571 		do_forced_ra = true;
572 	}
573 
574 	/* be dumb */
575 	if (do_forced_ra) {
576 		force_page_cache_ra(ractl, req_count);
577 		return;
578 	}
579 
580 	max_pages = ractl_max_pages(ractl, req_count);
581 	prev_index = (unsigned long long)ra->prev_pos >> PAGE_SHIFT;
582 	/*
583 	 * A start of file, oversized read, or sequential cache miss:
584 	 * trivial case: (index - prev_index) == 1
585 	 * unaligned reads: (index - prev_index) == 0
586 	 */
587 	if (!index || req_count > max_pages || index - prev_index <= 1UL) {
588 		ra->start = index;
589 		ra->size = get_init_ra_size(req_count, max_pages);
590 		ra->async_size = ra->size > req_count ? ra->size - req_count :
591 							ra->size >> 1;
592 		goto readit;
593 	}
594 
595 	/*
596 	 * Query the page cache and look for the traces(cached history pages)
597 	 * that a sequential stream would leave behind.
598 	 */
599 	rcu_read_lock();
600 	miss = page_cache_prev_miss(ractl->mapping, index - 1, max_pages);
601 	rcu_read_unlock();
602 	contig_count = index - miss - 1;
603 	/*
604 	 * Standalone, small random read. Read as is, and do not pollute the
605 	 * readahead state.
606 	 */
607 	if (contig_count <= req_count) {
608 		do_page_cache_ra(ractl, req_count, 0);
609 		return;
610 	}
611 	/*
612 	 * File cached from the beginning:
613 	 * it is a strong indication of long-run stream (or whole-file-read)
614 	 */
615 	if (miss == ULONG_MAX)
616 		contig_count *= 2;
617 	ra->start = index;
618 	ra->size = min(contig_count + req_count, max_pages);
619 	ra->async_size = 1;
620 readit:
621 	ractl->_index = ra->start;
622 	page_cache_ra_order(ractl, ra, 0);
623 }
624 EXPORT_SYMBOL_GPL(page_cache_sync_ra);
625 
626 void page_cache_async_ra(struct readahead_control *ractl,
627 		struct folio *folio, unsigned long req_count)
628 {
629 	unsigned long max_pages;
630 	struct file_ra_state *ra = ractl->ra;
631 	pgoff_t index = readahead_index(ractl);
632 	pgoff_t expected, start;
633 	unsigned int order = folio_order(folio);
634 
635 	/* no readahead */
636 	if (!ra->ra_pages)
637 		return;
638 
639 	/* See the comment in page_cache_sync_ra. */
640 	if (ractl->file && unlikely(FMODE_FSNOTIFY_HSM(ractl->file->f_mode)))
641 		return;
642 
643 	/*
644 	 * Same bit is used for PG_readahead and PG_reclaim.
645 	 */
646 	if (folio_test_writeback(folio))
647 		return;
648 
649 	folio_clear_readahead(folio);
650 
651 	if (blk_cgroup_congested())
652 		return;
653 
654 	max_pages = ractl_max_pages(ractl, req_count);
655 	/*
656 	 * It's the expected callback index, assume sequential access.
657 	 * Ramp up sizes, and push forward the readahead window.
658 	 */
659 	expected = round_down(ra->start + ra->size - ra->async_size,
660 			1UL << order);
661 	if (index == expected) {
662 		ra->start += ra->size;
663 		/*
664 		 * In the case of MADV_HUGEPAGE, the actual size might exceed
665 		 * the readahead window.
666 		 */
667 		ra->size = max(ra->size, get_next_ra_size(ra, max_pages));
668 		ra->async_size = ra->size;
669 		goto readit;
670 	}
671 
672 	/*
673 	 * Hit a marked folio without valid readahead state.
674 	 * E.g. interleaved reads.
675 	 * Query the pagecache for async_size, which normally equals to
676 	 * readahead size. Ramp it up and use it as the new readahead size.
677 	 */
678 	rcu_read_lock();
679 	start = page_cache_next_miss(ractl->mapping, index + 1, max_pages);
680 	rcu_read_unlock();
681 
682 	if (!start || start - index > max_pages)
683 		return;
684 
685 	ra->start = start;
686 	ra->size = start - index;	/* old async_size */
687 	ra->size += req_count;
688 	ra->size = get_next_ra_size(ra, max_pages);
689 	ra->async_size = ra->size;
690 readit:
691 	ractl->_index = ra->start;
692 	page_cache_ra_order(ractl, ra, order);
693 }
694 EXPORT_SYMBOL_GPL(page_cache_async_ra);
695 
696 ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
697 {
698 	CLASS(fd, f)(fd);
699 
700 	if (fd_empty(f) || !(fd_file(f)->f_mode & FMODE_READ))
701 		return -EBADF;
702 
703 	/*
704 	 * The readahead() syscall is intended to run only on files
705 	 * that can execute readahead. If readahead is not possible
706 	 * on this file, then we must return -EINVAL.
707 	 */
708 	if (!fd_file(f)->f_mapping || !fd_file(f)->f_mapping->a_ops ||
709 	    (!S_ISREG(file_inode(fd_file(f))->i_mode) &&
710 	    !S_ISBLK(file_inode(fd_file(f))->i_mode)))
711 		return -EINVAL;
712 
713 	return vfs_fadvise(fd_file(f), offset, count, POSIX_FADV_WILLNEED);
714 }
715 
716 SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
717 {
718 	return ksys_readahead(fd, offset, count);
719 }
720 
721 #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_READAHEAD)
722 COMPAT_SYSCALL_DEFINE4(readahead, int, fd, compat_arg_u64_dual(offset), size_t, count)
723 {
724 	return ksys_readahead(fd, compat_arg_u64_glue(offset), count);
725 }
726 #endif
727 
728 /**
729  * readahead_expand - Expand a readahead request
730  * @ractl: The request to be expanded
731  * @new_start: The revised start
732  * @new_len: The revised size of the request
733  *
734  * Attempt to expand a readahead request outwards from the current size to the
735  * specified size by inserting locked pages before and after the current window
736  * to increase the size to the new window.  This may involve the insertion of
737  * THPs, in which case the window may get expanded even beyond what was
738  * requested.
739  *
740  * The algorithm will stop if it encounters a conflicting page already in the
741  * pagecache and leave a smaller expansion than requested.
742  *
743  * The caller must check for this by examining the revised @ractl object for a
744  * different expansion than was requested.
745  */
746 void readahead_expand(struct readahead_control *ractl,
747 		      loff_t new_start, size_t new_len)
748 {
749 	struct address_space *mapping = ractl->mapping;
750 	struct file_ra_state *ra = ractl->ra;
751 	pgoff_t new_index, new_nr_pages;
752 	gfp_t gfp_mask = readahead_gfp_mask(mapping);
753 	unsigned long min_nrpages = mapping_min_folio_nrpages(mapping);
754 	unsigned int min_order = mapping_min_folio_order(mapping);
755 
756 	new_index = new_start / PAGE_SIZE;
757 	/*
758 	 * Readahead code should have aligned the ractl->_index to
759 	 * min_nrpages before calling readahead aops.
760 	 */
761 	VM_BUG_ON(!IS_ALIGNED(ractl->_index, min_nrpages));
762 
763 	/* Expand the leading edge downwards */
764 	while (ractl->_index > new_index) {
765 		unsigned long index = ractl->_index - 1;
766 		struct folio *folio = xa_load(&mapping->i_pages, index);
767 
768 		if (folio && !xa_is_value(folio))
769 			return; /* Folio apparently present */
770 
771 		folio = filemap_alloc_folio(gfp_mask, min_order);
772 		if (!folio)
773 			return;
774 
775 		index = mapping_align_index(mapping, index);
776 		if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) {
777 			folio_put(folio);
778 			return;
779 		}
780 		if (unlikely(folio_test_workingset(folio)) &&
781 				!ractl->_workingset) {
782 			ractl->_workingset = true;
783 			psi_memstall_enter(&ractl->_pflags);
784 		}
785 		ractl->_nr_pages += min_nrpages;
786 		ractl->_index = folio->index;
787 	}
788 
789 	new_len += new_start - readahead_pos(ractl);
790 	new_nr_pages = DIV_ROUND_UP(new_len, PAGE_SIZE);
791 
792 	/* Expand the trailing edge upwards */
793 	while (ractl->_nr_pages < new_nr_pages) {
794 		unsigned long index = ractl->_index + ractl->_nr_pages;
795 		struct folio *folio = xa_load(&mapping->i_pages, index);
796 
797 		if (folio && !xa_is_value(folio))
798 			return; /* Folio apparently present */
799 
800 		folio = filemap_alloc_folio(gfp_mask, min_order);
801 		if (!folio)
802 			return;
803 
804 		index = mapping_align_index(mapping, index);
805 		if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) {
806 			folio_put(folio);
807 			return;
808 		}
809 		if (unlikely(folio_test_workingset(folio)) &&
810 				!ractl->_workingset) {
811 			ractl->_workingset = true;
812 			psi_memstall_enter(&ractl->_pflags);
813 		}
814 		ractl->_nr_pages += min_nrpages;
815 		if (ra) {
816 			ra->size += min_nrpages;
817 			ra->async_size += min_nrpages;
818 		}
819 	}
820 }
821 EXPORT_SYMBOL(readahead_expand);
822