xref: /linux/mm/readahead.c (revision eb88e6bfbc0a975e08a18c39d1138d3e6cdc00a5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm/readahead.c - address_space-level file readahead.
4  *
5  * Copyright (C) 2002, Linus Torvalds
6  *
7  * 09Apr2002	Andrew Morton
8  *		Initial version.
9  */
10 
11 /**
12  * DOC: Readahead Overview
13  *
14  * Readahead is used to read content into the page cache before it is
15  * explicitly requested by the application.  Readahead only ever
16  * attempts to read folios that are not yet in the page cache.  If a
17  * folio is present but not up-to-date, readahead will not try to read
18  * it. In that case a simple ->read_folio() will be requested.
19  *
20  * Readahead is triggered when an application read request (whether a
21  * system call or a page fault) finds that the requested folio is not in
22  * the page cache, or that it is in the page cache and has the
23  * readahead flag set.  This flag indicates that the folio was read
24  * as part of a previous readahead request and now that it has been
25  * accessed, it is time for the next readahead.
26  *
27  * Each readahead request is partly synchronous read, and partly async
28  * readahead.  This is reflected in the struct file_ra_state which
29  * contains ->size being the total number of pages, and ->async_size
30  * which is the number of pages in the async section.  The readahead
31  * flag will be set on the first folio in this async section to trigger
32  * a subsequent readahead.  Once a series of sequential reads has been
33  * established, there should be no need for a synchronous component and
34  * all readahead request will be fully asynchronous.
35  *
36  * When either of the triggers causes a readahead, three numbers need
37  * to be determined: the start of the region to read, the size of the
38  * region, and the size of the async tail.
39  *
40  * The start of the region is simply the first page address at or after
41  * the accessed address, which is not currently populated in the page
42  * cache.  This is found with a simple search in the page cache.
43  *
44  * The size of the async tail is determined by subtracting the size that
45  * was explicitly requested from the determined request size, unless
46  * this would be less than zero - then zero is used.  NOTE THIS
47  * CALCULATION IS WRONG WHEN THE START OF THE REGION IS NOT THE ACCESSED
48  * PAGE.  ALSO THIS CALCULATION IS NOT USED CONSISTENTLY.
49  *
50  * The size of the region is normally determined from the size of the
51  * previous readahead which loaded the preceding pages.  This may be
52  * discovered from the struct file_ra_state for simple sequential reads,
53  * or from examining the state of the page cache when multiple
54  * sequential reads are interleaved.  Specifically: where the readahead
55  * was triggered by the readahead flag, the size of the previous
56  * readahead is assumed to be the number of pages from the triggering
57  * page to the start of the new readahead.  In these cases, the size of
58  * the previous readahead is scaled, often doubled, for the new
59  * readahead, though see get_next_ra_size() for details.
60  *
61  * If the size of the previous read cannot be determined, the number of
62  * preceding pages in the page cache is used to estimate the size of
63  * a previous read.  This estimate could easily be misled by random
64  * reads being coincidentally adjacent, so it is ignored unless it is
65  * larger than the current request, and it is not scaled up, unless it
66  * is at the start of file.
67  *
68  * In general readahead is accelerated at the start of the file, as
69  * reads from there are often sequential.  There are other minor
70  * adjustments to the readahead size in various special cases and these
71  * are best discovered by reading the code.
72  *
73  * The above calculation, based on the previous readahead size,
74  * determines the size of the readahead, to which any requested read
75  * size may be added.
76  *
77  * Readahead requests are sent to the filesystem using the ->readahead()
78  * address space operation, for which mpage_readahead() is a canonical
79  * implementation.  ->readahead() should normally initiate reads on all
80  * folios, but may fail to read any or all folios without causing an I/O
81  * error.  The page cache reading code will issue a ->read_folio() request
82  * for any folio which ->readahead() did not read, and only an error
83  * from this will be final.
84  *
85  * ->readahead() will generally call readahead_folio() repeatedly to get
86  * each folio from those prepared for readahead.  It may fail to read a
87  * folio by:
88  *
89  * * not calling readahead_folio() sufficiently many times, effectively
90  *   ignoring some folios, as might be appropriate if the path to
91  *   storage is congested.
92  *
93  * * failing to actually submit a read request for a given folio,
94  *   possibly due to insufficient resources, or
95  *
96  * * getting an error during subsequent processing of a request.
97  *
98  * In the last two cases, the folio should be unlocked by the filesystem
99  * to indicate that the read attempt has failed.  In the first case the
100  * folio will be unlocked by the VFS.
101  *
102  * Those folios not in the final ``async_size`` of the request should be
103  * considered to be important and ->readahead() should not fail them due
104  * to congestion or temporary resource unavailability, but should wait
105  * for necessary resources (e.g.  memory or indexing information) to
106  * become available.  Folios in the final ``async_size`` may be
107  * considered less urgent and failure to read them is more acceptable.
108  * In this case it is best to use filemap_remove_folio() to remove the
109  * folios from the page cache as is automatically done for folios that
110  * were not fetched with readahead_folio().  This will allow a
111  * subsequent synchronous readahead request to try them again.  If they
112  * are left in the page cache, then they will be read individually using
113  * ->read_folio() which may be less efficient.
114  */
115 
116 #include <linux/blkdev.h>
117 #include <linux/kernel.h>
118 #include <linux/dax.h>
119 #include <linux/gfp.h>
120 #include <linux/export.h>
121 #include <linux/backing-dev.h>
122 #include <linux/task_io_accounting_ops.h>
123 #include <linux/pagemap.h>
124 #include <linux/psi.h>
125 #include <linux/syscalls.h>
126 #include <linux/file.h>
127 #include <linux/mm_inline.h>
128 #include <linux/blk-cgroup.h>
129 #include <linux/fadvise.h>
130 #include <linux/sched/mm.h>
131 
132 #include "internal.h"
133 
134 /*
135  * Initialise a struct file's readahead state.  Assumes that the caller has
136  * memset *ra to zero.
137  */
138 void
file_ra_state_init(struct file_ra_state * ra,struct address_space * mapping)139 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
140 {
141 	ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages;
142 	ra->prev_pos = -1;
143 }
144 EXPORT_SYMBOL_GPL(file_ra_state_init);
145 
read_pages(struct readahead_control * rac)146 static void read_pages(struct readahead_control *rac)
147 {
148 	const struct address_space_operations *aops = rac->mapping->a_ops;
149 	struct folio *folio;
150 	struct blk_plug plug;
151 
152 	if (!readahead_count(rac))
153 		return;
154 
155 	if (unlikely(rac->_workingset))
156 		psi_memstall_enter(&rac->_pflags);
157 	blk_start_plug(&plug);
158 
159 	if (aops->readahead) {
160 		aops->readahead(rac);
161 		/* Clean up the remaining folios. */
162 		while ((folio = readahead_folio(rac)) != NULL) {
163 			folio_get(folio);
164 			filemap_remove_folio(folio);
165 			folio_unlock(folio);
166 			folio_put(folio);
167 		}
168 	} else {
169 		while ((folio = readahead_folio(rac)) != NULL)
170 			aops->read_folio(rac->file, folio);
171 	}
172 
173 	blk_finish_plug(&plug);
174 	if (unlikely(rac->_workingset))
175 		psi_memstall_leave(&rac->_pflags);
176 	rac->_workingset = false;
177 
178 	BUG_ON(readahead_count(rac));
179 }
180 
ractl_alloc_folio(struct readahead_control * ractl,gfp_t gfp_mask,unsigned int order)181 static struct folio *ractl_alloc_folio(struct readahead_control *ractl,
182 				       gfp_t gfp_mask, unsigned int order)
183 {
184 	struct folio *folio;
185 
186 	folio = filemap_alloc_folio(gfp_mask, order);
187 	if (folio && ractl->dropbehind)
188 		__folio_set_dropbehind(folio);
189 
190 	return folio;
191 }
192 
193 /**
194  * page_cache_ra_unbounded - Start unchecked readahead.
195  * @ractl: Readahead control.
196  * @nr_to_read: The number of pages to read.
197  * @lookahead_size: Where to start the next readahead.
198  *
199  * This function is for filesystems to call when they want to start
200  * readahead beyond a file's stated i_size.  This is almost certainly
201  * not the function you want to call.  Use page_cache_async_readahead()
202  * or page_cache_sync_readahead() instead.
203  *
204  * Context: File is referenced by caller.  Mutexes may be held by caller.
205  * May sleep, but will not reenter filesystem to reclaim memory.
206  */
page_cache_ra_unbounded(struct readahead_control * ractl,unsigned long nr_to_read,unsigned long lookahead_size)207 void page_cache_ra_unbounded(struct readahead_control *ractl,
208 		unsigned long nr_to_read, unsigned long lookahead_size)
209 {
210 	struct address_space *mapping = ractl->mapping;
211 	unsigned long index = readahead_index(ractl);
212 	gfp_t gfp_mask = readahead_gfp_mask(mapping);
213 	unsigned long mark = ULONG_MAX, i = 0;
214 	unsigned int min_nrpages = mapping_min_folio_nrpages(mapping);
215 
216 	/*
217 	 * Partway through the readahead operation, we will have added
218 	 * locked pages to the page cache, but will not yet have submitted
219 	 * them for I/O.  Adding another page may need to allocate memory,
220 	 * which can trigger memory reclaim.  Telling the VM we're in
221 	 * the middle of a filesystem operation will cause it to not
222 	 * touch file-backed pages, preventing a deadlock.  Most (all?)
223 	 * filesystems already specify __GFP_NOFS in their mapping's
224 	 * gfp_mask, but let's be explicit here.
225 	 */
226 	unsigned int nofs = memalloc_nofs_save();
227 
228 	filemap_invalidate_lock_shared(mapping);
229 	index = mapping_align_index(mapping, index);
230 
231 	/*
232 	 * As iterator `i` is aligned to min_nrpages, round_up the
233 	 * difference between nr_to_read and lookahead_size to mark the
234 	 * index that only has lookahead or "async_region" to set the
235 	 * readahead flag.
236 	 */
237 	if (lookahead_size <= nr_to_read) {
238 		unsigned long ra_folio_index;
239 
240 		ra_folio_index = round_up(readahead_index(ractl) +
241 					  nr_to_read - lookahead_size,
242 					  min_nrpages);
243 		mark = ra_folio_index - index;
244 	}
245 	nr_to_read += readahead_index(ractl) - index;
246 	ractl->_index = index;
247 
248 	/*
249 	 * Preallocate as many pages as we will need.
250 	 */
251 	while (i < nr_to_read) {
252 		struct folio *folio = xa_load(&mapping->i_pages, index + i);
253 		int ret;
254 
255 		if (folio && !xa_is_value(folio)) {
256 			/*
257 			 * Page already present?  Kick off the current batch
258 			 * of contiguous pages before continuing with the
259 			 * next batch.  This page may be the one we would
260 			 * have intended to mark as Readahead, but we don't
261 			 * have a stable reference to this page, and it's
262 			 * not worth getting one just for that.
263 			 */
264 			read_pages(ractl);
265 			ractl->_index += min_nrpages;
266 			i = ractl->_index + ractl->_nr_pages - index;
267 			continue;
268 		}
269 
270 		folio = ractl_alloc_folio(ractl, gfp_mask,
271 					mapping_min_folio_order(mapping));
272 		if (!folio)
273 			break;
274 
275 		ret = filemap_add_folio(mapping, folio, index + i, gfp_mask);
276 		if (ret < 0) {
277 			folio_put(folio);
278 			if (ret == -ENOMEM)
279 				break;
280 			read_pages(ractl);
281 			ractl->_index += min_nrpages;
282 			i = ractl->_index + ractl->_nr_pages - index;
283 			continue;
284 		}
285 		if (i == mark)
286 			folio_set_readahead(folio);
287 		ractl->_workingset |= folio_test_workingset(folio);
288 		ractl->_nr_pages += min_nrpages;
289 		i += min_nrpages;
290 	}
291 
292 	/*
293 	 * Now start the IO.  We ignore I/O errors - if the folio is not
294 	 * uptodate then the caller will launch read_folio again, and
295 	 * will then handle the error.
296 	 */
297 	read_pages(ractl);
298 	filemap_invalidate_unlock_shared(mapping);
299 	memalloc_nofs_restore(nofs);
300 }
301 EXPORT_SYMBOL_GPL(page_cache_ra_unbounded);
302 
303 /*
304  * do_page_cache_ra() actually reads a chunk of disk.  It allocates
305  * the pages first, then submits them for I/O. This avoids the very bad
306  * behaviour which would occur if page allocations are causing VM writeback.
307  * We really don't want to intermingle reads and writes like that.
308  */
do_page_cache_ra(struct readahead_control * ractl,unsigned long nr_to_read,unsigned long lookahead_size)309 static void do_page_cache_ra(struct readahead_control *ractl,
310 		unsigned long nr_to_read, unsigned long lookahead_size)
311 {
312 	struct inode *inode = ractl->mapping->host;
313 	unsigned long index = readahead_index(ractl);
314 	loff_t isize = i_size_read(inode);
315 	pgoff_t end_index;	/* The last page we want to read */
316 
317 	if (isize == 0)
318 		return;
319 
320 	end_index = (isize - 1) >> PAGE_SHIFT;
321 	if (index > end_index)
322 		return;
323 	/* Don't read past the page containing the last byte of the file */
324 	if (nr_to_read > end_index - index)
325 		nr_to_read = end_index - index + 1;
326 
327 	page_cache_ra_unbounded(ractl, nr_to_read, lookahead_size);
328 }
329 
330 /*
331  * Chunk the readahead into 2 megabyte units, so that we don't pin too much
332  * memory at once.
333  */
force_page_cache_ra(struct readahead_control * ractl,unsigned long nr_to_read)334 void force_page_cache_ra(struct readahead_control *ractl,
335 		unsigned long nr_to_read)
336 {
337 	struct address_space *mapping = ractl->mapping;
338 	struct file_ra_state *ra = ractl->ra;
339 	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
340 	unsigned long max_pages;
341 
342 	if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead))
343 		return;
344 
345 	/*
346 	 * If the request exceeds the readahead window, allow the read to
347 	 * be up to the optimal hardware IO size
348 	 */
349 	max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
350 	nr_to_read = min_t(unsigned long, nr_to_read, max_pages);
351 	while (nr_to_read) {
352 		unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE;
353 
354 		if (this_chunk > nr_to_read)
355 			this_chunk = nr_to_read;
356 		do_page_cache_ra(ractl, this_chunk, 0);
357 
358 		nr_to_read -= this_chunk;
359 	}
360 }
361 
362 /*
363  * Set the initial window size, round to next power of 2 and square
364  * for small size, x 4 for medium, and x 2 for large
365  * for 128k (32 page) max ra
366  * 1-2 page = 16k, 3-4 page 32k, 5-8 page = 64k, > 8 page = 128k initial
367  */
get_init_ra_size(unsigned long size,unsigned long max)368 static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
369 {
370 	unsigned long newsize = roundup_pow_of_two(size);
371 
372 	if (newsize <= max / 32)
373 		newsize = newsize * 4;
374 	else if (newsize <= max / 4)
375 		newsize = newsize * 2;
376 	else
377 		newsize = max;
378 
379 	return newsize;
380 }
381 
382 /*
383  *  Get the previous window size, ramp it up, and
384  *  return it as the new window size.
385  */
get_next_ra_size(struct file_ra_state * ra,unsigned long max)386 static unsigned long get_next_ra_size(struct file_ra_state *ra,
387 				      unsigned long max)
388 {
389 	unsigned long cur = ra->size;
390 
391 	if (cur < max / 16)
392 		return 4 * cur;
393 	if (cur <= max / 2)
394 		return 2 * cur;
395 	return max;
396 }
397 
398 /*
399  * On-demand readahead design.
400  *
401  * The fields in struct file_ra_state represent the most-recently-executed
402  * readahead attempt:
403  *
404  *                        |<----- async_size ---------|
405  *     |------------------- size -------------------->|
406  *     |==================#===========================|
407  *     ^start             ^page marked with PG_readahead
408  *
409  * To overlap application thinking time and disk I/O time, we do
410  * `readahead pipelining': Do not wait until the application consumed all
411  * readahead pages and stalled on the missing page at readahead_index;
412  * Instead, submit an asynchronous readahead I/O as soon as there are
413  * only async_size pages left in the readahead window. Normally async_size
414  * will be equal to size, for maximum pipelining.
415  *
416  * In interleaved sequential reads, concurrent streams on the same fd can
417  * be invalidating each other's readahead state. So we flag the new readahead
418  * page at (start+size-async_size) with PG_readahead, and use it as readahead
419  * indicator. The flag won't be set on already cached pages, to avoid the
420  * readahead-for-nothing fuss, saving pointless page cache lookups.
421  *
422  * prev_pos tracks the last visited byte in the _previous_ read request.
423  * It should be maintained by the caller, and will be used for detecting
424  * small random reads. Note that the readahead algorithm checks loosely
425  * for sequential patterns. Hence interleaved reads might be served as
426  * sequential ones.
427  *
428  * There is a special-case: if the first page which the application tries to
429  * read happens to be the first page of the file, it is assumed that a linear
430  * read is about to happen and the window is immediately set to the initial size
431  * based on I/O request size and the max_readahead.
432  *
433  * The code ramps up the readahead size aggressively at first, but slow down as
434  * it approaches max_readhead.
435  */
436 
ra_alloc_folio(struct readahead_control * ractl,pgoff_t index,pgoff_t mark,unsigned int order,gfp_t gfp)437 static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index,
438 		pgoff_t mark, unsigned int order, gfp_t gfp)
439 {
440 	int err;
441 	struct folio *folio = ractl_alloc_folio(ractl, gfp, order);
442 
443 	if (!folio)
444 		return -ENOMEM;
445 	mark = round_down(mark, 1UL << order);
446 	if (index == mark)
447 		folio_set_readahead(folio);
448 	err = filemap_add_folio(ractl->mapping, folio, index, gfp);
449 	if (err) {
450 		folio_put(folio);
451 		return err;
452 	}
453 
454 	ractl->_nr_pages += 1UL << order;
455 	ractl->_workingset |= folio_test_workingset(folio);
456 	return 0;
457 }
458 
page_cache_ra_order(struct readahead_control * ractl,struct file_ra_state * ra,unsigned int new_order)459 void page_cache_ra_order(struct readahead_control *ractl,
460 		struct file_ra_state *ra, unsigned int new_order)
461 {
462 	struct address_space *mapping = ractl->mapping;
463 	pgoff_t start = readahead_index(ractl);
464 	pgoff_t index = start;
465 	unsigned int min_order = mapping_min_folio_order(mapping);
466 	pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT;
467 	pgoff_t mark = index + ra->size - ra->async_size;
468 	unsigned int nofs;
469 	int err = 0;
470 	gfp_t gfp = readahead_gfp_mask(mapping);
471 	unsigned int min_ra_size = max(4, mapping_min_folio_nrpages(mapping));
472 
473 	/*
474 	 * Fallback when size < min_nrpages as each folio should be
475 	 * at least min_nrpages anyway.
476 	 */
477 	if (!mapping_large_folio_support(mapping) || ra->size < min_ra_size)
478 		goto fallback;
479 
480 	limit = min(limit, index + ra->size - 1);
481 
482 	if (new_order < mapping_max_folio_order(mapping))
483 		new_order += 2;
484 
485 	new_order = min(mapping_max_folio_order(mapping), new_order);
486 	new_order = min_t(unsigned int, new_order, ilog2(ra->size));
487 	new_order = max(new_order, min_order);
488 
489 	/* See comment in page_cache_ra_unbounded() */
490 	nofs = memalloc_nofs_save();
491 	filemap_invalidate_lock_shared(mapping);
492 	/*
493 	 * If the new_order is greater than min_order and index is
494 	 * already aligned to new_order, then this will be noop as index
495 	 * aligned to new_order should also be aligned to min_order.
496 	 */
497 	ractl->_index = mapping_align_index(mapping, index);
498 	index = readahead_index(ractl);
499 
500 	while (index <= limit) {
501 		unsigned int order = new_order;
502 
503 		/* Align with smaller pages if needed */
504 		if (index & ((1UL << order) - 1))
505 			order = __ffs(index);
506 		/* Don't allocate pages past EOF */
507 		while (order > min_order && index + (1UL << order) - 1 > limit)
508 			order--;
509 		err = ra_alloc_folio(ractl, index, mark, order, gfp);
510 		if (err)
511 			break;
512 		index += 1UL << order;
513 	}
514 
515 	read_pages(ractl);
516 	filemap_invalidate_unlock_shared(mapping);
517 	memalloc_nofs_restore(nofs);
518 
519 	/*
520 	 * If there were already pages in the page cache, then we may have
521 	 * left some gaps.  Let the regular readahead code take care of this
522 	 * situation below.
523 	 */
524 	if (!err)
525 		return;
526 fallback:
527 	/*
528 	 * ->readahead() may have updated readahead window size so we have to
529 	 * check there's still something to read.
530 	 */
531 	if (ra->size > index - start)
532 		do_page_cache_ra(ractl, ra->size - (index - start),
533 				 ra->async_size);
534 }
535 
ractl_max_pages(struct readahead_control * ractl,unsigned long req_size)536 static unsigned long ractl_max_pages(struct readahead_control *ractl,
537 		unsigned long req_size)
538 {
539 	struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host);
540 	unsigned long max_pages = ractl->ra->ra_pages;
541 
542 	/*
543 	 * If the request exceeds the readahead window, allow the read to
544 	 * be up to the optimal hardware IO size
545 	 */
546 	if (req_size > max_pages && bdi->io_pages > max_pages)
547 		max_pages = min(req_size, bdi->io_pages);
548 	return max_pages;
549 }
550 
page_cache_sync_ra(struct readahead_control * ractl,unsigned long req_count)551 void page_cache_sync_ra(struct readahead_control *ractl,
552 		unsigned long req_count)
553 {
554 	pgoff_t index = readahead_index(ractl);
555 	bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM);
556 	struct file_ra_state *ra = ractl->ra;
557 	unsigned long max_pages, contig_count;
558 	pgoff_t prev_index, miss;
559 
560 	/*
561 	 * Even if readahead is disabled, issue this request as readahead
562 	 * as we'll need it to satisfy the requested range. The forced
563 	 * readahead will do the right thing and limit the read to just the
564 	 * requested range, which we'll set to 1 page for this case.
565 	 */
566 	if (!ra->ra_pages || blk_cgroup_congested()) {
567 		if (!ractl->file)
568 			return;
569 		req_count = 1;
570 		do_forced_ra = true;
571 	}
572 
573 	/* be dumb */
574 	if (do_forced_ra) {
575 		force_page_cache_ra(ractl, req_count);
576 		return;
577 	}
578 
579 	max_pages = ractl_max_pages(ractl, req_count);
580 	prev_index = (unsigned long long)ra->prev_pos >> PAGE_SHIFT;
581 	/*
582 	 * A start of file, oversized read, or sequential cache miss:
583 	 * trivial case: (index - prev_index) == 1
584 	 * unaligned reads: (index - prev_index) == 0
585 	 */
586 	if (!index || req_count > max_pages || index - prev_index <= 1UL) {
587 		ra->start = index;
588 		ra->size = get_init_ra_size(req_count, max_pages);
589 		ra->async_size = ra->size > req_count ? ra->size - req_count :
590 							ra->size >> 1;
591 		goto readit;
592 	}
593 
594 	/*
595 	 * Query the page cache and look for the traces(cached history pages)
596 	 * that a sequential stream would leave behind.
597 	 */
598 	rcu_read_lock();
599 	miss = page_cache_prev_miss(ractl->mapping, index - 1, max_pages);
600 	rcu_read_unlock();
601 	contig_count = index - miss - 1;
602 	/*
603 	 * Standalone, small random read. Read as is, and do not pollute the
604 	 * readahead state.
605 	 */
606 	if (contig_count <= req_count) {
607 		do_page_cache_ra(ractl, req_count, 0);
608 		return;
609 	}
610 	/*
611 	 * File cached from the beginning:
612 	 * it is a strong indication of long-run stream (or whole-file-read)
613 	 */
614 	if (miss == ULONG_MAX)
615 		contig_count *= 2;
616 	ra->start = index;
617 	ra->size = min(contig_count + req_count, max_pages);
618 	ra->async_size = 1;
619 readit:
620 	ractl->_index = ra->start;
621 	page_cache_ra_order(ractl, ra, 0);
622 }
623 EXPORT_SYMBOL_GPL(page_cache_sync_ra);
624 
page_cache_async_ra(struct readahead_control * ractl,struct folio * folio,unsigned long req_count)625 void page_cache_async_ra(struct readahead_control *ractl,
626 		struct folio *folio, unsigned long req_count)
627 {
628 	unsigned long max_pages;
629 	struct file_ra_state *ra = ractl->ra;
630 	pgoff_t index = readahead_index(ractl);
631 	pgoff_t expected, start;
632 	unsigned int order = folio_order(folio);
633 
634 	/* no readahead */
635 	if (!ra->ra_pages)
636 		return;
637 
638 	/*
639 	 * Same bit is used for PG_readahead and PG_reclaim.
640 	 */
641 	if (folio_test_writeback(folio))
642 		return;
643 
644 	folio_clear_readahead(folio);
645 
646 	if (blk_cgroup_congested())
647 		return;
648 
649 	max_pages = ractl_max_pages(ractl, req_count);
650 	/*
651 	 * It's the expected callback index, assume sequential access.
652 	 * Ramp up sizes, and push forward the readahead window.
653 	 */
654 	expected = round_down(ra->start + ra->size - ra->async_size,
655 			1UL << order);
656 	if (index == expected) {
657 		ra->start += ra->size;
658 		/*
659 		 * In the case of MADV_HUGEPAGE, the actual size might exceed
660 		 * the readahead window.
661 		 */
662 		ra->size = max(ra->size, get_next_ra_size(ra, max_pages));
663 		ra->async_size = ra->size;
664 		goto readit;
665 	}
666 
667 	/*
668 	 * Hit a marked folio without valid readahead state.
669 	 * E.g. interleaved reads.
670 	 * Query the pagecache for async_size, which normally equals to
671 	 * readahead size. Ramp it up and use it as the new readahead size.
672 	 */
673 	rcu_read_lock();
674 	start = page_cache_next_miss(ractl->mapping, index + 1, max_pages);
675 	rcu_read_unlock();
676 
677 	if (!start || start - index > max_pages)
678 		return;
679 
680 	ra->start = start;
681 	ra->size = start - index;	/* old async_size */
682 	ra->size += req_count;
683 	ra->size = get_next_ra_size(ra, max_pages);
684 	ra->async_size = ra->size;
685 readit:
686 	ractl->_index = ra->start;
687 	page_cache_ra_order(ractl, ra, order);
688 }
689 EXPORT_SYMBOL_GPL(page_cache_async_ra);
690 
ksys_readahead(int fd,loff_t offset,size_t count)691 ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
692 {
693 	CLASS(fd, f)(fd);
694 
695 	if (fd_empty(f) || !(fd_file(f)->f_mode & FMODE_READ))
696 		return -EBADF;
697 
698 	/*
699 	 * The readahead() syscall is intended to run only on files
700 	 * that can execute readahead. If readahead is not possible
701 	 * on this file, then we must return -EINVAL.
702 	 */
703 	if (!fd_file(f)->f_mapping || !fd_file(f)->f_mapping->a_ops ||
704 	    (!S_ISREG(file_inode(fd_file(f))->i_mode) &&
705 	    !S_ISBLK(file_inode(fd_file(f))->i_mode)))
706 		return -EINVAL;
707 
708 	return vfs_fadvise(fd_file(f), offset, count, POSIX_FADV_WILLNEED);
709 }
710 
SYSCALL_DEFINE3(readahead,int,fd,loff_t,offset,size_t,count)711 SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
712 {
713 	return ksys_readahead(fd, offset, count);
714 }
715 
716 #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_READAHEAD)
COMPAT_SYSCALL_DEFINE4(readahead,int,fd,compat_arg_u64_dual (offset),size_t,count)717 COMPAT_SYSCALL_DEFINE4(readahead, int, fd, compat_arg_u64_dual(offset), size_t, count)
718 {
719 	return ksys_readahead(fd, compat_arg_u64_glue(offset), count);
720 }
721 #endif
722 
723 /**
724  * readahead_expand - Expand a readahead request
725  * @ractl: The request to be expanded
726  * @new_start: The revised start
727  * @new_len: The revised size of the request
728  *
729  * Attempt to expand a readahead request outwards from the current size to the
730  * specified size by inserting locked pages before and after the current window
731  * to increase the size to the new window.  This may involve the insertion of
732  * THPs, in which case the window may get expanded even beyond what was
733  * requested.
734  *
735  * The algorithm will stop if it encounters a conflicting page already in the
736  * pagecache and leave a smaller expansion than requested.
737  *
738  * The caller must check for this by examining the revised @ractl object for a
739  * different expansion than was requested.
740  */
readahead_expand(struct readahead_control * ractl,loff_t new_start,size_t new_len)741 void readahead_expand(struct readahead_control *ractl,
742 		      loff_t new_start, size_t new_len)
743 {
744 	struct address_space *mapping = ractl->mapping;
745 	struct file_ra_state *ra = ractl->ra;
746 	pgoff_t new_index, new_nr_pages;
747 	gfp_t gfp_mask = readahead_gfp_mask(mapping);
748 	unsigned long min_nrpages = mapping_min_folio_nrpages(mapping);
749 	unsigned int min_order = mapping_min_folio_order(mapping);
750 
751 	new_index = new_start / PAGE_SIZE;
752 	/*
753 	 * Readahead code should have aligned the ractl->_index to
754 	 * min_nrpages before calling readahead aops.
755 	 */
756 	VM_BUG_ON(!IS_ALIGNED(ractl->_index, min_nrpages));
757 
758 	/* Expand the leading edge downwards */
759 	while (ractl->_index > new_index) {
760 		unsigned long index = ractl->_index - 1;
761 		struct folio *folio = xa_load(&mapping->i_pages, index);
762 
763 		if (folio && !xa_is_value(folio))
764 			return; /* Folio apparently present */
765 
766 		folio = ractl_alloc_folio(ractl, gfp_mask, min_order);
767 		if (!folio)
768 			return;
769 
770 		index = mapping_align_index(mapping, index);
771 		if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) {
772 			folio_put(folio);
773 			return;
774 		}
775 		if (unlikely(folio_test_workingset(folio)) &&
776 				!ractl->_workingset) {
777 			ractl->_workingset = true;
778 			psi_memstall_enter(&ractl->_pflags);
779 		}
780 		ractl->_nr_pages += min_nrpages;
781 		ractl->_index = folio->index;
782 	}
783 
784 	new_len += new_start - readahead_pos(ractl);
785 	new_nr_pages = DIV_ROUND_UP(new_len, PAGE_SIZE);
786 
787 	/* Expand the trailing edge upwards */
788 	while (ractl->_nr_pages < new_nr_pages) {
789 		unsigned long index = ractl->_index + ractl->_nr_pages;
790 		struct folio *folio = xa_load(&mapping->i_pages, index);
791 
792 		if (folio && !xa_is_value(folio))
793 			return; /* Folio apparently present */
794 
795 		folio = ractl_alloc_folio(ractl, gfp_mask, min_order);
796 		if (!folio)
797 			return;
798 
799 		index = mapping_align_index(mapping, index);
800 		if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) {
801 			folio_put(folio);
802 			return;
803 		}
804 		if (unlikely(folio_test_workingset(folio)) &&
805 				!ractl->_workingset) {
806 			ractl->_workingset = true;
807 			psi_memstall_enter(&ractl->_pflags);
808 		}
809 		ractl->_nr_pages += min_nrpages;
810 		if (ra) {
811 			ra->size += min_nrpages;
812 			ra->async_size += min_nrpages;
813 		}
814 	}
815 }
816 EXPORT_SYMBOL(readahead_expand);
817