xref: /linux/mm/readahead.c (revision 8804d970fab45726b3c7cd7f240b31122aa94219)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm/readahead.c - address_space-level file readahead.
4  *
5  * Copyright (C) 2002, Linus Torvalds
6  *
7  * 09Apr2002	Andrew Morton
8  *		Initial version.
9  */
10 
11 /**
12  * DOC: Readahead Overview
13  *
14  * Readahead is used to read content into the page cache before it is
15  * explicitly requested by the application.  Readahead only ever
16  * attempts to read folios that are not yet in the page cache.  If a
17  * folio is present but not up-to-date, readahead will not try to read
18  * it. In that case a simple ->read_folio() will be requested.
19  *
20  * Readahead is triggered when an application read request (whether a
21  * system call or a page fault) finds that the requested folio is not in
22  * the page cache, or that it is in the page cache and has the
23  * readahead flag set.  This flag indicates that the folio was read
24  * as part of a previous readahead request and now that it has been
25  * accessed, it is time for the next readahead.
26  *
27  * Each readahead request is partly synchronous read, and partly async
28  * readahead.  This is reflected in the struct file_ra_state which
29  * contains ->size being the total number of pages, and ->async_size
30  * which is the number of pages in the async section.  The readahead
31  * flag will be set on the first folio in this async section to trigger
32  * a subsequent readahead.  Once a series of sequential reads has been
33  * established, there should be no need for a synchronous component and
34  * all readahead request will be fully asynchronous.
35  *
36  * When either of the triggers causes a readahead, three numbers need
37  * to be determined: the start of the region to read, the size of the
38  * region, and the size of the async tail.
39  *
40  * The start of the region is simply the first page address at or after
41  * the accessed address, which is not currently populated in the page
42  * cache.  This is found with a simple search in the page cache.
43  *
44  * The size of the async tail is determined by subtracting the size that
45  * was explicitly requested from the determined request size, unless
46  * this would be less than zero - then zero is used.  NOTE THIS
47  * CALCULATION IS WRONG WHEN THE START OF THE REGION IS NOT THE ACCESSED
48  * PAGE.  ALSO THIS CALCULATION IS NOT USED CONSISTENTLY.
49  *
50  * The size of the region is normally determined from the size of the
51  * previous readahead which loaded the preceding pages.  This may be
52  * discovered from the struct file_ra_state for simple sequential reads,
53  * or from examining the state of the page cache when multiple
54  * sequential reads are interleaved.  Specifically: where the readahead
55  * was triggered by the readahead flag, the size of the previous
56  * readahead is assumed to be the number of pages from the triggering
57  * page to the start of the new readahead.  In these cases, the size of
58  * the previous readahead is scaled, often doubled, for the new
59  * readahead, though see get_next_ra_size() for details.
60  *
61  * If the size of the previous read cannot be determined, the number of
62  * preceding pages in the page cache is used to estimate the size of
63  * a previous read.  This estimate could easily be misled by random
64  * reads being coincidentally adjacent, so it is ignored unless it is
65  * larger than the current request, and it is not scaled up, unless it
66  * is at the start of file.
67  *
68  * In general readahead is accelerated at the start of the file, as
69  * reads from there are often sequential.  There are other minor
70  * adjustments to the readahead size in various special cases and these
71  * are best discovered by reading the code.
72  *
73  * The above calculation, based on the previous readahead size,
74  * determines the size of the readahead, to which any requested read
75  * size may be added.
76  *
77  * Readahead requests are sent to the filesystem using the ->readahead()
78  * address space operation, for which mpage_readahead() is a canonical
79  * implementation.  ->readahead() should normally initiate reads on all
80  * folios, but may fail to read any or all folios without causing an I/O
81  * error.  The page cache reading code will issue a ->read_folio() request
82  * for any folio which ->readahead() did not read, and only an error
83  * from this will be final.
84  *
85  * ->readahead() will generally call readahead_folio() repeatedly to get
86  * each folio from those prepared for readahead.  It may fail to read a
87  * folio by:
88  *
89  * * not calling readahead_folio() sufficiently many times, effectively
90  *   ignoring some folios, as might be appropriate if the path to
91  *   storage is congested.
92  *
93  * * failing to actually submit a read request for a given folio,
94  *   possibly due to insufficient resources, or
95  *
96  * * getting an error during subsequent processing of a request.
97  *
98  * In the last two cases, the folio should be unlocked by the filesystem
99  * to indicate that the read attempt has failed.  In the first case the
100  * folio will be unlocked by the VFS.
101  *
102  * Those folios not in the final ``async_size`` of the request should be
103  * considered to be important and ->readahead() should not fail them due
104  * to congestion or temporary resource unavailability, but should wait
105  * for necessary resources (e.g.  memory or indexing information) to
106  * become available.  Folios in the final ``async_size`` may be
107  * considered less urgent and failure to read them is more acceptable.
108  * In this case it is best to use filemap_remove_folio() to remove the
109  * folios from the page cache as is automatically done for folios that
110  * were not fetched with readahead_folio().  This will allow a
111  * subsequent synchronous readahead request to try them again.  If they
112  * are left in the page cache, then they will be read individually using
113  * ->read_folio() which may be less efficient.
114  */
115 
116 #include <linux/blkdev.h>
117 #include <linux/kernel.h>
118 #include <linux/dax.h>
119 #include <linux/gfp.h>
120 #include <linux/export.h>
121 #include <linux/backing-dev.h>
122 #include <linux/task_io_accounting_ops.h>
123 #include <linux/pagemap.h>
124 #include <linux/psi.h>
125 #include <linux/syscalls.h>
126 #include <linux/file.h>
127 #include <linux/mm_inline.h>
128 #include <linux/blk-cgroup.h>
129 #include <linux/fadvise.h>
130 #include <linux/sched/mm.h>
131 
132 #define CREATE_TRACE_POINTS
133 #include <trace/events/readahead.h>
134 
135 #include "internal.h"
136 
137 /*
138  * Initialise a struct file's readahead state.  Assumes that the caller has
139  * memset *ra to zero.
140  */
141 void
file_ra_state_init(struct file_ra_state * ra,struct address_space * mapping)142 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
143 {
144 	ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages;
145 	ra->prev_pos = -1;
146 }
147 EXPORT_SYMBOL_GPL(file_ra_state_init);
148 
read_pages(struct readahead_control * rac)149 static void read_pages(struct readahead_control *rac)
150 {
151 	const struct address_space_operations *aops = rac->mapping->a_ops;
152 	struct folio *folio;
153 	struct blk_plug plug;
154 
155 	if (!readahead_count(rac))
156 		return;
157 
158 	if (unlikely(rac->_workingset))
159 		psi_memstall_enter(&rac->_pflags);
160 	blk_start_plug(&plug);
161 
162 	if (aops->readahead) {
163 		aops->readahead(rac);
164 		/* Clean up the remaining folios. */
165 		while ((folio = readahead_folio(rac)) != NULL) {
166 			folio_get(folio);
167 			filemap_remove_folio(folio);
168 			folio_unlock(folio);
169 			folio_put(folio);
170 		}
171 	} else {
172 		while ((folio = readahead_folio(rac)) != NULL)
173 			aops->read_folio(rac->file, folio);
174 	}
175 
176 	blk_finish_plug(&plug);
177 	if (unlikely(rac->_workingset))
178 		psi_memstall_leave(&rac->_pflags);
179 	rac->_workingset = false;
180 
181 	BUG_ON(readahead_count(rac));
182 }
183 
ractl_alloc_folio(struct readahead_control * ractl,gfp_t gfp_mask,unsigned int order)184 static struct folio *ractl_alloc_folio(struct readahead_control *ractl,
185 				       gfp_t gfp_mask, unsigned int order)
186 {
187 	struct folio *folio;
188 
189 	folio = filemap_alloc_folio(gfp_mask, order);
190 	if (folio && ractl->dropbehind)
191 		__folio_set_dropbehind(folio);
192 
193 	return folio;
194 }
195 
196 /**
197  * page_cache_ra_unbounded - Start unchecked readahead.
198  * @ractl: Readahead control.
199  * @nr_to_read: The number of pages to read.
200  * @lookahead_size: Where to start the next readahead.
201  *
202  * This function is for filesystems to call when they want to start
203  * readahead beyond a file's stated i_size.  This is almost certainly
204  * not the function you want to call.  Use page_cache_async_readahead()
205  * or page_cache_sync_readahead() instead.
206  *
207  * Context: File is referenced by caller.  Mutexes may be held by caller.
208  * May sleep, but will not reenter filesystem to reclaim memory.
209  */
page_cache_ra_unbounded(struct readahead_control * ractl,unsigned long nr_to_read,unsigned long lookahead_size)210 void page_cache_ra_unbounded(struct readahead_control *ractl,
211 		unsigned long nr_to_read, unsigned long lookahead_size)
212 {
213 	struct address_space *mapping = ractl->mapping;
214 	unsigned long index = readahead_index(ractl);
215 	gfp_t gfp_mask = readahead_gfp_mask(mapping);
216 	unsigned long mark = ULONG_MAX, i = 0;
217 	unsigned int min_nrpages = mapping_min_folio_nrpages(mapping);
218 
219 	/*
220 	 * Partway through the readahead operation, we will have added
221 	 * locked pages to the page cache, but will not yet have submitted
222 	 * them for I/O.  Adding another page may need to allocate memory,
223 	 * which can trigger memory reclaim.  Telling the VM we're in
224 	 * the middle of a filesystem operation will cause it to not
225 	 * touch file-backed pages, preventing a deadlock.  Most (all?)
226 	 * filesystems already specify __GFP_NOFS in their mapping's
227 	 * gfp_mask, but let's be explicit here.
228 	 */
229 	unsigned int nofs = memalloc_nofs_save();
230 
231 	trace_page_cache_ra_unbounded(mapping->host, index, nr_to_read,
232 				      lookahead_size);
233 	filemap_invalidate_lock_shared(mapping);
234 	index = mapping_align_index(mapping, index);
235 
236 	/*
237 	 * As iterator `i` is aligned to min_nrpages, round_up the
238 	 * difference between nr_to_read and lookahead_size to mark the
239 	 * index that only has lookahead or "async_region" to set the
240 	 * readahead flag.
241 	 */
242 	if (lookahead_size <= nr_to_read) {
243 		unsigned long ra_folio_index;
244 
245 		ra_folio_index = round_up(readahead_index(ractl) +
246 					  nr_to_read - lookahead_size,
247 					  min_nrpages);
248 		mark = ra_folio_index - index;
249 	}
250 	nr_to_read += readahead_index(ractl) - index;
251 	ractl->_index = index;
252 
253 	/*
254 	 * Preallocate as many pages as we will need.
255 	 */
256 	while (i < nr_to_read) {
257 		struct folio *folio = xa_load(&mapping->i_pages, index + i);
258 		int ret;
259 
260 		if (folio && !xa_is_value(folio)) {
261 			/*
262 			 * Page already present?  Kick off the current batch
263 			 * of contiguous pages before continuing with the
264 			 * next batch.  This page may be the one we would
265 			 * have intended to mark as Readahead, but we don't
266 			 * have a stable reference to this page, and it's
267 			 * not worth getting one just for that.
268 			 */
269 			read_pages(ractl);
270 			ractl->_index += min_nrpages;
271 			i = ractl->_index + ractl->_nr_pages - index;
272 			continue;
273 		}
274 
275 		folio = ractl_alloc_folio(ractl, gfp_mask,
276 					mapping_min_folio_order(mapping));
277 		if (!folio)
278 			break;
279 
280 		ret = filemap_add_folio(mapping, folio, index + i, gfp_mask);
281 		if (ret < 0) {
282 			folio_put(folio);
283 			if (ret == -ENOMEM)
284 				break;
285 			read_pages(ractl);
286 			ractl->_index += min_nrpages;
287 			i = ractl->_index + ractl->_nr_pages - index;
288 			continue;
289 		}
290 		if (i == mark)
291 			folio_set_readahead(folio);
292 		ractl->_workingset |= folio_test_workingset(folio);
293 		ractl->_nr_pages += min_nrpages;
294 		i += min_nrpages;
295 	}
296 
297 	/*
298 	 * Now start the IO.  We ignore I/O errors - if the folio is not
299 	 * uptodate then the caller will launch read_folio again, and
300 	 * will then handle the error.
301 	 */
302 	read_pages(ractl);
303 	filemap_invalidate_unlock_shared(mapping);
304 	memalloc_nofs_restore(nofs);
305 }
306 EXPORT_SYMBOL_GPL(page_cache_ra_unbounded);
307 
308 /*
309  * do_page_cache_ra() actually reads a chunk of disk.  It allocates
310  * the pages first, then submits them for I/O. This avoids the very bad
311  * behaviour which would occur if page allocations are causing VM writeback.
312  * We really don't want to intermingle reads and writes like that.
313  */
do_page_cache_ra(struct readahead_control * ractl,unsigned long nr_to_read,unsigned long lookahead_size)314 static void do_page_cache_ra(struct readahead_control *ractl,
315 		unsigned long nr_to_read, unsigned long lookahead_size)
316 {
317 	struct inode *inode = ractl->mapping->host;
318 	unsigned long index = readahead_index(ractl);
319 	loff_t isize = i_size_read(inode);
320 	pgoff_t end_index;	/* The last page we want to read */
321 
322 	if (isize == 0)
323 		return;
324 
325 	end_index = (isize - 1) >> PAGE_SHIFT;
326 	if (index > end_index)
327 		return;
328 	/* Don't read past the page containing the last byte of the file */
329 	if (nr_to_read > end_index - index)
330 		nr_to_read = end_index - index + 1;
331 
332 	page_cache_ra_unbounded(ractl, nr_to_read, lookahead_size);
333 }
334 
335 /*
336  * Chunk the readahead into 2 megabyte units, so that we don't pin too much
337  * memory at once.
338  */
force_page_cache_ra(struct readahead_control * ractl,unsigned long nr_to_read)339 void force_page_cache_ra(struct readahead_control *ractl,
340 		unsigned long nr_to_read)
341 {
342 	struct address_space *mapping = ractl->mapping;
343 	struct file_ra_state *ra = ractl->ra;
344 	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
345 	unsigned long max_pages;
346 
347 	if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead))
348 		return;
349 
350 	/*
351 	 * If the request exceeds the readahead window, allow the read to
352 	 * be up to the optimal hardware IO size
353 	 */
354 	max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
355 	nr_to_read = min_t(unsigned long, nr_to_read, max_pages);
356 	while (nr_to_read) {
357 		unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE;
358 
359 		if (this_chunk > nr_to_read)
360 			this_chunk = nr_to_read;
361 		do_page_cache_ra(ractl, this_chunk, 0);
362 
363 		nr_to_read -= this_chunk;
364 	}
365 }
366 
367 /*
368  * Set the initial window size, round to next power of 2 and square
369  * for small size, x 4 for medium, and x 2 for large
370  * for 128k (32 page) max ra
371  * 1-2 page = 16k, 3-4 page 32k, 5-8 page = 64k, > 8 page = 128k initial
372  */
get_init_ra_size(unsigned long size,unsigned long max)373 static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
374 {
375 	unsigned long newsize = roundup_pow_of_two(size);
376 
377 	if (newsize <= max / 32)
378 		newsize = newsize * 4;
379 	else if (newsize <= max / 4)
380 		newsize = newsize * 2;
381 	else
382 		newsize = max;
383 
384 	return newsize;
385 }
386 
387 /*
388  *  Get the previous window size, ramp it up, and
389  *  return it as the new window size.
390  */
get_next_ra_size(struct file_ra_state * ra,unsigned long max)391 static unsigned long get_next_ra_size(struct file_ra_state *ra,
392 				      unsigned long max)
393 {
394 	unsigned long cur = ra->size;
395 
396 	if (cur < max / 16)
397 		return 4 * cur;
398 	if (cur <= max / 2)
399 		return 2 * cur;
400 	return max;
401 }
402 
403 /*
404  * On-demand readahead design.
405  *
406  * The fields in struct file_ra_state represent the most-recently-executed
407  * readahead attempt:
408  *
409  *                        |<----- async_size ---------|
410  *     |------------------- size -------------------->|
411  *     |==================#===========================|
412  *     ^start             ^page marked with PG_readahead
413  *
414  * To overlap application thinking time and disk I/O time, we do
415  * `readahead pipelining': Do not wait until the application consumed all
416  * readahead pages and stalled on the missing page at readahead_index;
417  * Instead, submit an asynchronous readahead I/O as soon as there are
418  * only async_size pages left in the readahead window. Normally async_size
419  * will be equal to size, for maximum pipelining.
420  *
421  * In interleaved sequential reads, concurrent streams on the same fd can
422  * be invalidating each other's readahead state. So we flag the new readahead
423  * page at (start+size-async_size) with PG_readahead, and use it as readahead
424  * indicator. The flag won't be set on already cached pages, to avoid the
425  * readahead-for-nothing fuss, saving pointless page cache lookups.
426  *
427  * prev_pos tracks the last visited byte in the _previous_ read request.
428  * It should be maintained by the caller, and will be used for detecting
429  * small random reads. Note that the readahead algorithm checks loosely
430  * for sequential patterns. Hence interleaved reads might be served as
431  * sequential ones.
432  *
433  * There is a special-case: if the first page which the application tries to
434  * read happens to be the first page of the file, it is assumed that a linear
435  * read is about to happen and the window is immediately set to the initial size
436  * based on I/O request size and the max_readahead.
437  *
438  * The code ramps up the readahead size aggressively at first, but slow down as
439  * it approaches max_readhead.
440  */
441 
ra_alloc_folio(struct readahead_control * ractl,pgoff_t index,pgoff_t mark,unsigned int order,gfp_t gfp)442 static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index,
443 		pgoff_t mark, unsigned int order, gfp_t gfp)
444 {
445 	int err;
446 	struct folio *folio = ractl_alloc_folio(ractl, gfp, order);
447 
448 	if (!folio)
449 		return -ENOMEM;
450 	mark = round_down(mark, 1UL << order);
451 	if (index == mark)
452 		folio_set_readahead(folio);
453 	err = filemap_add_folio(ractl->mapping, folio, index, gfp);
454 	if (err) {
455 		folio_put(folio);
456 		return err;
457 	}
458 
459 	ractl->_nr_pages += 1UL << order;
460 	ractl->_workingset |= folio_test_workingset(folio);
461 	return 0;
462 }
463 
page_cache_ra_order(struct readahead_control * ractl,struct file_ra_state * ra)464 void page_cache_ra_order(struct readahead_control *ractl,
465 		struct file_ra_state *ra)
466 {
467 	struct address_space *mapping = ractl->mapping;
468 	pgoff_t start = readahead_index(ractl);
469 	pgoff_t index = start;
470 	unsigned int min_order = mapping_min_folio_order(mapping);
471 	pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT;
472 	pgoff_t mark = index + ra->size - ra->async_size;
473 	unsigned int nofs;
474 	int err = 0;
475 	gfp_t gfp = readahead_gfp_mask(mapping);
476 	unsigned int new_order = ra->order;
477 
478 	trace_page_cache_ra_order(mapping->host, start, ra);
479 	if (!mapping_large_folio_support(mapping)) {
480 		ra->order = 0;
481 		goto fallback;
482 	}
483 
484 	limit = min(limit, index + ra->size - 1);
485 
486 	new_order = min(mapping_max_folio_order(mapping), new_order);
487 	new_order = min_t(unsigned int, new_order, ilog2(ra->size));
488 	new_order = max(new_order, min_order);
489 
490 	ra->order = new_order;
491 
492 	/* See comment in page_cache_ra_unbounded() */
493 	nofs = memalloc_nofs_save();
494 	filemap_invalidate_lock_shared(mapping);
495 	/*
496 	 * If the new_order is greater than min_order and index is
497 	 * already aligned to new_order, then this will be noop as index
498 	 * aligned to new_order should also be aligned to min_order.
499 	 */
500 	ractl->_index = mapping_align_index(mapping, index);
501 	index = readahead_index(ractl);
502 
503 	while (index <= limit) {
504 		unsigned int order = new_order;
505 
506 		/* Align with smaller pages if needed */
507 		if (index & ((1UL << order) - 1))
508 			order = __ffs(index);
509 		/* Don't allocate pages past EOF */
510 		while (order > min_order && index + (1UL << order) - 1 > limit)
511 			order--;
512 		err = ra_alloc_folio(ractl, index, mark, order, gfp);
513 		if (err)
514 			break;
515 		index += 1UL << order;
516 	}
517 
518 	read_pages(ractl);
519 	filemap_invalidate_unlock_shared(mapping);
520 	memalloc_nofs_restore(nofs);
521 
522 	/*
523 	 * If there were already pages in the page cache, then we may have
524 	 * left some gaps.  Let the regular readahead code take care of this
525 	 * situation below.
526 	 */
527 	if (!err)
528 		return;
529 fallback:
530 	/*
531 	 * ->readahead() may have updated readahead window size so we have to
532 	 * check there's still something to read.
533 	 */
534 	if (ra->size > index - start)
535 		do_page_cache_ra(ractl, ra->size - (index - start),
536 				 ra->async_size);
537 }
538 
ractl_max_pages(struct readahead_control * ractl,unsigned long req_size)539 static unsigned long ractl_max_pages(struct readahead_control *ractl,
540 		unsigned long req_size)
541 {
542 	struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host);
543 	unsigned long max_pages = ractl->ra->ra_pages;
544 
545 	/*
546 	 * If the request exceeds the readahead window, allow the read to
547 	 * be up to the optimal hardware IO size
548 	 */
549 	if (req_size > max_pages && bdi->io_pages > max_pages)
550 		max_pages = min(req_size, bdi->io_pages);
551 	return max_pages;
552 }
553 
page_cache_sync_ra(struct readahead_control * ractl,unsigned long req_count)554 void page_cache_sync_ra(struct readahead_control *ractl,
555 		unsigned long req_count)
556 {
557 	pgoff_t index = readahead_index(ractl);
558 	bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM);
559 	struct file_ra_state *ra = ractl->ra;
560 	unsigned long max_pages, contig_count;
561 	pgoff_t prev_index, miss;
562 
563 	trace_page_cache_sync_ra(ractl->mapping->host, index, ra, req_count);
564 	/*
565 	 * Even if readahead is disabled, issue this request as readahead
566 	 * as we'll need it to satisfy the requested range. The forced
567 	 * readahead will do the right thing and limit the read to just the
568 	 * requested range, which we'll set to 1 page for this case.
569 	 */
570 	if (!ra->ra_pages || blk_cgroup_congested()) {
571 		if (!ractl->file)
572 			return;
573 		req_count = 1;
574 		do_forced_ra = true;
575 	}
576 
577 	/* be dumb */
578 	if (do_forced_ra) {
579 		force_page_cache_ra(ractl, req_count);
580 		return;
581 	}
582 
583 	max_pages = ractl_max_pages(ractl, req_count);
584 	prev_index = (unsigned long long)ra->prev_pos >> PAGE_SHIFT;
585 	/*
586 	 * A start of file, oversized read, or sequential cache miss:
587 	 * trivial case: (index - prev_index) == 1
588 	 * unaligned reads: (index - prev_index) == 0
589 	 */
590 	if (!index || req_count > max_pages || index - prev_index <= 1UL) {
591 		ra->start = index;
592 		ra->size = get_init_ra_size(req_count, max_pages);
593 		ra->async_size = ra->size > req_count ? ra->size - req_count :
594 							ra->size >> 1;
595 		goto readit;
596 	}
597 
598 	/*
599 	 * Query the page cache and look for the traces(cached history pages)
600 	 * that a sequential stream would leave behind.
601 	 */
602 	rcu_read_lock();
603 	miss = page_cache_prev_miss(ractl->mapping, index - 1, max_pages);
604 	rcu_read_unlock();
605 	contig_count = index - miss - 1;
606 	/*
607 	 * Standalone, small random read. Read as is, and do not pollute the
608 	 * readahead state.
609 	 */
610 	if (contig_count <= req_count) {
611 		do_page_cache_ra(ractl, req_count, 0);
612 		return;
613 	}
614 	/*
615 	 * File cached from the beginning:
616 	 * it is a strong indication of long-run stream (or whole-file-read)
617 	 */
618 	if (miss == ULONG_MAX)
619 		contig_count *= 2;
620 	ra->start = index;
621 	ra->size = min(contig_count + req_count, max_pages);
622 	ra->async_size = 1;
623 readit:
624 	ra->order = 0;
625 	ractl->_index = ra->start;
626 	page_cache_ra_order(ractl, ra);
627 }
628 EXPORT_SYMBOL_GPL(page_cache_sync_ra);
629 
page_cache_async_ra(struct readahead_control * ractl,struct folio * folio,unsigned long req_count)630 void page_cache_async_ra(struct readahead_control *ractl,
631 		struct folio *folio, unsigned long req_count)
632 {
633 	unsigned long max_pages;
634 	struct file_ra_state *ra = ractl->ra;
635 	pgoff_t index = readahead_index(ractl);
636 	pgoff_t expected, start, end, aligned_end, align;
637 
638 	/* no readahead */
639 	if (!ra->ra_pages)
640 		return;
641 
642 	/*
643 	 * Same bit is used for PG_readahead and PG_reclaim.
644 	 */
645 	if (folio_test_writeback(folio))
646 		return;
647 
648 	trace_page_cache_async_ra(ractl->mapping->host, index, ra, req_count);
649 	folio_clear_readahead(folio);
650 
651 	if (blk_cgroup_congested())
652 		return;
653 
654 	max_pages = ractl_max_pages(ractl, req_count);
655 	/*
656 	 * It's the expected callback index, assume sequential access.
657 	 * Ramp up sizes, and push forward the readahead window.
658 	 */
659 	expected = round_down(ra->start + ra->size - ra->async_size,
660 			folio_nr_pages(folio));
661 	if (index == expected) {
662 		ra->start += ra->size;
663 		/*
664 		 * In the case of MADV_HUGEPAGE, the actual size might exceed
665 		 * the readahead window.
666 		 */
667 		ra->size = max(ra->size, get_next_ra_size(ra, max_pages));
668 		goto readit;
669 	}
670 
671 	/*
672 	 * Hit a marked folio without valid readahead state.
673 	 * E.g. interleaved reads.
674 	 * Query the pagecache for async_size, which normally equals to
675 	 * readahead size. Ramp it up and use it as the new readahead size.
676 	 */
677 	rcu_read_lock();
678 	start = page_cache_next_miss(ractl->mapping, index + 1, max_pages);
679 	rcu_read_unlock();
680 
681 	if (!start || start - index > max_pages)
682 		return;
683 
684 	ra->start = start;
685 	ra->size = start - index;	/* old async_size */
686 	ra->size += req_count;
687 	ra->size = get_next_ra_size(ra, max_pages);
688 readit:
689 	ra->order += 2;
690 	align = 1UL << min(ra->order, ffs(max_pages) - 1);
691 	end = ra->start + ra->size;
692 	aligned_end = round_down(end, align);
693 	if (aligned_end > ra->start)
694 		ra->size -= end - aligned_end;
695 	ra->async_size = ra->size;
696 	ractl->_index = ra->start;
697 	page_cache_ra_order(ractl, ra);
698 }
699 EXPORT_SYMBOL_GPL(page_cache_async_ra);
700 
ksys_readahead(int fd,loff_t offset,size_t count)701 ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
702 {
703 	struct file *file;
704 	const struct inode *inode;
705 
706 	CLASS(fd, f)(fd);
707 	if (fd_empty(f))
708 		return -EBADF;
709 
710 	file = fd_file(f);
711 	if (!(file->f_mode & FMODE_READ))
712 		return -EBADF;
713 
714 	/*
715 	 * The readahead() syscall is intended to run only on files
716 	 * that can execute readahead. If readahead is not possible
717 	 * on this file, then we must return -EINVAL.
718 	 */
719 	if (!file->f_mapping)
720 		return -EINVAL;
721 	if (!file->f_mapping->a_ops)
722 		return -EINVAL;
723 
724 	inode = file_inode(file);
725 	if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
726 		return -EINVAL;
727 	if (IS_ANON_FILE(inode))
728 		return -EINVAL;
729 
730 	return vfs_fadvise(fd_file(f), offset, count, POSIX_FADV_WILLNEED);
731 }
732 
SYSCALL_DEFINE3(readahead,int,fd,loff_t,offset,size_t,count)733 SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
734 {
735 	return ksys_readahead(fd, offset, count);
736 }
737 
738 #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_READAHEAD)
COMPAT_SYSCALL_DEFINE4(readahead,int,fd,compat_arg_u64_dual (offset),size_t,count)739 COMPAT_SYSCALL_DEFINE4(readahead, int, fd, compat_arg_u64_dual(offset), size_t, count)
740 {
741 	return ksys_readahead(fd, compat_arg_u64_glue(offset), count);
742 }
743 #endif
744 
745 /**
746  * readahead_expand - Expand a readahead request
747  * @ractl: The request to be expanded
748  * @new_start: The revised start
749  * @new_len: The revised size of the request
750  *
751  * Attempt to expand a readahead request outwards from the current size to the
752  * specified size by inserting locked pages before and after the current window
753  * to increase the size to the new window.  This may involve the insertion of
754  * THPs, in which case the window may get expanded even beyond what was
755  * requested.
756  *
757  * The algorithm will stop if it encounters a conflicting page already in the
758  * pagecache and leave a smaller expansion than requested.
759  *
760  * The caller must check for this by examining the revised @ractl object for a
761  * different expansion than was requested.
762  */
readahead_expand(struct readahead_control * ractl,loff_t new_start,size_t new_len)763 void readahead_expand(struct readahead_control *ractl,
764 		      loff_t new_start, size_t new_len)
765 {
766 	struct address_space *mapping = ractl->mapping;
767 	struct file_ra_state *ra = ractl->ra;
768 	pgoff_t new_index, new_nr_pages;
769 	gfp_t gfp_mask = readahead_gfp_mask(mapping);
770 	unsigned long min_nrpages = mapping_min_folio_nrpages(mapping);
771 	unsigned int min_order = mapping_min_folio_order(mapping);
772 
773 	new_index = new_start / PAGE_SIZE;
774 	/*
775 	 * Readahead code should have aligned the ractl->_index to
776 	 * min_nrpages before calling readahead aops.
777 	 */
778 	VM_BUG_ON(!IS_ALIGNED(ractl->_index, min_nrpages));
779 
780 	/* Expand the leading edge downwards */
781 	while (ractl->_index > new_index) {
782 		unsigned long index = ractl->_index - 1;
783 		struct folio *folio = xa_load(&mapping->i_pages, index);
784 
785 		if (folio && !xa_is_value(folio))
786 			return; /* Folio apparently present */
787 
788 		folio = ractl_alloc_folio(ractl, gfp_mask, min_order);
789 		if (!folio)
790 			return;
791 
792 		index = mapping_align_index(mapping, index);
793 		if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) {
794 			folio_put(folio);
795 			return;
796 		}
797 		if (unlikely(folio_test_workingset(folio)) &&
798 				!ractl->_workingset) {
799 			ractl->_workingset = true;
800 			psi_memstall_enter(&ractl->_pflags);
801 		}
802 		ractl->_nr_pages += min_nrpages;
803 		ractl->_index = folio->index;
804 	}
805 
806 	new_len += new_start - readahead_pos(ractl);
807 	new_nr_pages = DIV_ROUND_UP(new_len, PAGE_SIZE);
808 
809 	/* Expand the trailing edge upwards */
810 	while (ractl->_nr_pages < new_nr_pages) {
811 		unsigned long index = ractl->_index + ractl->_nr_pages;
812 		struct folio *folio = xa_load(&mapping->i_pages, index);
813 
814 		if (folio && !xa_is_value(folio))
815 			return; /* Folio apparently present */
816 
817 		folio = ractl_alloc_folio(ractl, gfp_mask, min_order);
818 		if (!folio)
819 			return;
820 
821 		index = mapping_align_index(mapping, index);
822 		if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) {
823 			folio_put(folio);
824 			return;
825 		}
826 		if (unlikely(folio_test_workingset(folio)) &&
827 				!ractl->_workingset) {
828 			ractl->_workingset = true;
829 			psi_memstall_enter(&ractl->_pflags);
830 		}
831 		ractl->_nr_pages += min_nrpages;
832 		if (ra) {
833 			ra->size += min_nrpages;
834 			ra->async_size += min_nrpages;
835 		}
836 	}
837 }
838 EXPORT_SYMBOL(readahead_expand);
839