xref: /linux/mm/readahead.c (revision bd628c1bed7902ec1f24ba0fe70758949146abbe)
1 /*
2  * mm/readahead.c - address_space-level file readahead.
3  *
4  * Copyright (C) 2002, Linus Torvalds
5  *
6  * 09Apr2002	Andrew Morton
7  *		Initial version.
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/dax.h>
12 #include <linux/gfp.h>
13 #include <linux/export.h>
14 #include <linux/blkdev.h>
15 #include <linux/backing-dev.h>
16 #include <linux/task_io_accounting_ops.h>
17 #include <linux/pagevec.h>
18 #include <linux/pagemap.h>
19 #include <linux/syscalls.h>
20 #include <linux/file.h>
21 #include <linux/mm_inline.h>
22 #include <linux/blk-cgroup.h>
23 #include <linux/fadvise.h>
24 
25 #include "internal.h"
26 
27 /*
28  * Initialise a struct file's readahead state.  Assumes that the caller has
29  * memset *ra to zero.
30  */
31 void
32 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
33 {
34 	ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages;
35 	ra->prev_pos = -1;
36 }
37 EXPORT_SYMBOL_GPL(file_ra_state_init);
38 
39 /*
40  * see if a page needs releasing upon read_cache_pages() failure
41  * - the caller of read_cache_pages() may have set PG_private or PG_fscache
42  *   before calling, such as the NFS fs marking pages that are cached locally
43  *   on disk, thus we need to give the fs a chance to clean up in the event of
44  *   an error
45  */
46 static void read_cache_pages_invalidate_page(struct address_space *mapping,
47 					     struct page *page)
48 {
49 	if (page_has_private(page)) {
50 		if (!trylock_page(page))
51 			BUG();
52 		page->mapping = mapping;
53 		do_invalidatepage(page, 0, PAGE_SIZE);
54 		page->mapping = NULL;
55 		unlock_page(page);
56 	}
57 	put_page(page);
58 }
59 
60 /*
61  * release a list of pages, invalidating them first if need be
62  */
63 static void read_cache_pages_invalidate_pages(struct address_space *mapping,
64 					      struct list_head *pages)
65 {
66 	struct page *victim;
67 
68 	while (!list_empty(pages)) {
69 		victim = lru_to_page(pages);
70 		list_del(&victim->lru);
71 		read_cache_pages_invalidate_page(mapping, victim);
72 	}
73 }
74 
75 /**
76  * read_cache_pages - populate an address space with some pages & start reads against them
77  * @mapping: the address_space
78  * @pages: The address of a list_head which contains the target pages.  These
79  *   pages have their ->index populated and are otherwise uninitialised.
80  * @filler: callback routine for filling a single page.
81  * @data: private data for the callback routine.
82  *
83  * Hides the details of the LRU cache etc from the filesystems.
84  */
85 int read_cache_pages(struct address_space *mapping, struct list_head *pages,
86 			int (*filler)(void *, struct page *), void *data)
87 {
88 	struct page *page;
89 	int ret = 0;
90 
91 	while (!list_empty(pages)) {
92 		page = lru_to_page(pages);
93 		list_del(&page->lru);
94 		if (add_to_page_cache_lru(page, mapping, page->index,
95 				readahead_gfp_mask(mapping))) {
96 			read_cache_pages_invalidate_page(mapping, page);
97 			continue;
98 		}
99 		put_page(page);
100 
101 		ret = filler(data, page);
102 		if (unlikely(ret)) {
103 			read_cache_pages_invalidate_pages(mapping, pages);
104 			break;
105 		}
106 		task_io_account_read(PAGE_SIZE);
107 	}
108 	return ret;
109 }
110 
111 EXPORT_SYMBOL(read_cache_pages);
112 
113 static int read_pages(struct address_space *mapping, struct file *filp,
114 		struct list_head *pages, unsigned int nr_pages, gfp_t gfp)
115 {
116 	struct blk_plug plug;
117 	unsigned page_idx;
118 	int ret;
119 
120 	blk_start_plug(&plug);
121 
122 	if (mapping->a_ops->readpages) {
123 		ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
124 		/* Clean up the remaining pages */
125 		put_pages_list(pages);
126 		goto out;
127 	}
128 
129 	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
130 		struct page *page = lru_to_page(pages);
131 		list_del(&page->lru);
132 		if (!add_to_page_cache_lru(page, mapping, page->index, gfp))
133 			mapping->a_ops->readpage(filp, page);
134 		put_page(page);
135 	}
136 	ret = 0;
137 
138 out:
139 	blk_finish_plug(&plug);
140 
141 	return ret;
142 }
143 
144 /*
145  * __do_page_cache_readahead() actually reads a chunk of disk.  It allocates
146  * the pages first, then submits them for I/O. This avoids the very bad
147  * behaviour which would occur if page allocations are causing VM writeback.
148  * We really don't want to intermingle reads and writes like that.
149  *
150  * Returns the number of pages requested, or the maximum amount of I/O allowed.
151  */
152 unsigned int __do_page_cache_readahead(struct address_space *mapping,
153 		struct file *filp, pgoff_t offset, unsigned long nr_to_read,
154 		unsigned long lookahead_size)
155 {
156 	struct inode *inode = mapping->host;
157 	struct page *page;
158 	unsigned long end_index;	/* The last page we want to read */
159 	LIST_HEAD(page_pool);
160 	int page_idx;
161 	unsigned int nr_pages = 0;
162 	loff_t isize = i_size_read(inode);
163 	gfp_t gfp_mask = readahead_gfp_mask(mapping);
164 
165 	if (isize == 0)
166 		goto out;
167 
168 	end_index = ((isize - 1) >> PAGE_SHIFT);
169 
170 	/*
171 	 * Preallocate as many pages as we will need.
172 	 */
173 	for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
174 		pgoff_t page_offset = offset + page_idx;
175 
176 		if (page_offset > end_index)
177 			break;
178 
179 		page = xa_load(&mapping->i_pages, page_offset);
180 		if (page && !xa_is_value(page)) {
181 			/*
182 			 * Page already present?  Kick off the current batch of
183 			 * contiguous pages before continuing with the next
184 			 * batch.
185 			 */
186 			if (nr_pages)
187 				read_pages(mapping, filp, &page_pool, nr_pages,
188 						gfp_mask);
189 			nr_pages = 0;
190 			continue;
191 		}
192 
193 		page = __page_cache_alloc(gfp_mask);
194 		if (!page)
195 			break;
196 		page->index = page_offset;
197 		list_add(&page->lru, &page_pool);
198 		if (page_idx == nr_to_read - lookahead_size)
199 			SetPageReadahead(page);
200 		nr_pages++;
201 	}
202 
203 	/*
204 	 * Now start the IO.  We ignore I/O errors - if the page is not
205 	 * uptodate then the caller will launch readpage again, and
206 	 * will then handle the error.
207 	 */
208 	if (nr_pages)
209 		read_pages(mapping, filp, &page_pool, nr_pages, gfp_mask);
210 	BUG_ON(!list_empty(&page_pool));
211 out:
212 	return nr_pages;
213 }
214 
215 /*
216  * Chunk the readahead into 2 megabyte units, so that we don't pin too much
217  * memory at once.
218  */
219 int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
220 			       pgoff_t offset, unsigned long nr_to_read)
221 {
222 	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
223 	struct file_ra_state *ra = &filp->f_ra;
224 	unsigned long max_pages;
225 
226 	if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
227 		return -EINVAL;
228 
229 	/*
230 	 * If the request exceeds the readahead window, allow the read to
231 	 * be up to the optimal hardware IO size
232 	 */
233 	max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
234 	nr_to_read = min(nr_to_read, max_pages);
235 	while (nr_to_read) {
236 		unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE;
237 
238 		if (this_chunk > nr_to_read)
239 			this_chunk = nr_to_read;
240 		__do_page_cache_readahead(mapping, filp, offset, this_chunk, 0);
241 
242 		offset += this_chunk;
243 		nr_to_read -= this_chunk;
244 	}
245 	return 0;
246 }
247 
248 /*
249  * Set the initial window size, round to next power of 2 and square
250  * for small size, x 4 for medium, and x 2 for large
251  * for 128k (32 page) max ra
252  * 1-8 page = 32k initial, > 8 page = 128k initial
253  */
254 static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
255 {
256 	unsigned long newsize = roundup_pow_of_two(size);
257 
258 	if (newsize <= max / 32)
259 		newsize = newsize * 4;
260 	else if (newsize <= max / 4)
261 		newsize = newsize * 2;
262 	else
263 		newsize = max;
264 
265 	return newsize;
266 }
267 
268 /*
269  *  Get the previous window size, ramp it up, and
270  *  return it as the new window size.
271  */
272 static unsigned long get_next_ra_size(struct file_ra_state *ra,
273 				      unsigned long max)
274 {
275 	unsigned long cur = ra->size;
276 
277 	if (cur < max / 16)
278 		return 4 * cur;
279 	if (cur <= max / 2)
280 		return 2 * cur;
281 	return max;
282 }
283 
284 /*
285  * On-demand readahead design.
286  *
287  * The fields in struct file_ra_state represent the most-recently-executed
288  * readahead attempt:
289  *
290  *                        |<----- async_size ---------|
291  *     |------------------- size -------------------->|
292  *     |==================#===========================|
293  *     ^start             ^page marked with PG_readahead
294  *
295  * To overlap application thinking time and disk I/O time, we do
296  * `readahead pipelining': Do not wait until the application consumed all
297  * readahead pages and stalled on the missing page at readahead_index;
298  * Instead, submit an asynchronous readahead I/O as soon as there are
299  * only async_size pages left in the readahead window. Normally async_size
300  * will be equal to size, for maximum pipelining.
301  *
302  * In interleaved sequential reads, concurrent streams on the same fd can
303  * be invalidating each other's readahead state. So we flag the new readahead
304  * page at (start+size-async_size) with PG_readahead, and use it as readahead
305  * indicator. The flag won't be set on already cached pages, to avoid the
306  * readahead-for-nothing fuss, saving pointless page cache lookups.
307  *
308  * prev_pos tracks the last visited byte in the _previous_ read request.
309  * It should be maintained by the caller, and will be used for detecting
310  * small random reads. Note that the readahead algorithm checks loosely
311  * for sequential patterns. Hence interleaved reads might be served as
312  * sequential ones.
313  *
314  * There is a special-case: if the first page which the application tries to
315  * read happens to be the first page of the file, it is assumed that a linear
316  * read is about to happen and the window is immediately set to the initial size
317  * based on I/O request size and the max_readahead.
318  *
319  * The code ramps up the readahead size aggressively at first, but slow down as
320  * it approaches max_readhead.
321  */
322 
323 /*
324  * Count contiguously cached pages from @offset-1 to @offset-@max,
325  * this count is a conservative estimation of
326  * 	- length of the sequential read sequence, or
327  * 	- thrashing threshold in memory tight systems
328  */
329 static pgoff_t count_history_pages(struct address_space *mapping,
330 				   pgoff_t offset, unsigned long max)
331 {
332 	pgoff_t head;
333 
334 	rcu_read_lock();
335 	head = page_cache_prev_miss(mapping, offset - 1, max);
336 	rcu_read_unlock();
337 
338 	return offset - 1 - head;
339 }
340 
341 /*
342  * page cache context based read-ahead
343  */
344 static int try_context_readahead(struct address_space *mapping,
345 				 struct file_ra_state *ra,
346 				 pgoff_t offset,
347 				 unsigned long req_size,
348 				 unsigned long max)
349 {
350 	pgoff_t size;
351 
352 	size = count_history_pages(mapping, offset, max);
353 
354 	/*
355 	 * not enough history pages:
356 	 * it could be a random read
357 	 */
358 	if (size <= req_size)
359 		return 0;
360 
361 	/*
362 	 * starts from beginning of file:
363 	 * it is a strong indication of long-run stream (or whole-file-read)
364 	 */
365 	if (size >= offset)
366 		size *= 2;
367 
368 	ra->start = offset;
369 	ra->size = min(size + req_size, max);
370 	ra->async_size = 1;
371 
372 	return 1;
373 }
374 
375 /*
376  * A minimal readahead algorithm for trivial sequential/random reads.
377  */
378 static unsigned long
379 ondemand_readahead(struct address_space *mapping,
380 		   struct file_ra_state *ra, struct file *filp,
381 		   bool hit_readahead_marker, pgoff_t offset,
382 		   unsigned long req_size)
383 {
384 	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
385 	unsigned long max_pages = ra->ra_pages;
386 	unsigned long add_pages;
387 	pgoff_t prev_offset;
388 
389 	/*
390 	 * If the request exceeds the readahead window, allow the read to
391 	 * be up to the optimal hardware IO size
392 	 */
393 	if (req_size > max_pages && bdi->io_pages > max_pages)
394 		max_pages = min(req_size, bdi->io_pages);
395 
396 	/*
397 	 * start of file
398 	 */
399 	if (!offset)
400 		goto initial_readahead;
401 
402 	/*
403 	 * It's the expected callback offset, assume sequential access.
404 	 * Ramp up sizes, and push forward the readahead window.
405 	 */
406 	if ((offset == (ra->start + ra->size - ra->async_size) ||
407 	     offset == (ra->start + ra->size))) {
408 		ra->start += ra->size;
409 		ra->size = get_next_ra_size(ra, max_pages);
410 		ra->async_size = ra->size;
411 		goto readit;
412 	}
413 
414 	/*
415 	 * Hit a marked page without valid readahead state.
416 	 * E.g. interleaved reads.
417 	 * Query the pagecache for async_size, which normally equals to
418 	 * readahead size. Ramp it up and use it as the new readahead size.
419 	 */
420 	if (hit_readahead_marker) {
421 		pgoff_t start;
422 
423 		rcu_read_lock();
424 		start = page_cache_next_miss(mapping, offset + 1, max_pages);
425 		rcu_read_unlock();
426 
427 		if (!start || start - offset > max_pages)
428 			return 0;
429 
430 		ra->start = start;
431 		ra->size = start - offset;	/* old async_size */
432 		ra->size += req_size;
433 		ra->size = get_next_ra_size(ra, max_pages);
434 		ra->async_size = ra->size;
435 		goto readit;
436 	}
437 
438 	/*
439 	 * oversize read
440 	 */
441 	if (req_size > max_pages)
442 		goto initial_readahead;
443 
444 	/*
445 	 * sequential cache miss
446 	 * trivial case: (offset - prev_offset) == 1
447 	 * unaligned reads: (offset - prev_offset) == 0
448 	 */
449 	prev_offset = (unsigned long long)ra->prev_pos >> PAGE_SHIFT;
450 	if (offset - prev_offset <= 1UL)
451 		goto initial_readahead;
452 
453 	/*
454 	 * Query the page cache and look for the traces(cached history pages)
455 	 * that a sequential stream would leave behind.
456 	 */
457 	if (try_context_readahead(mapping, ra, offset, req_size, max_pages))
458 		goto readit;
459 
460 	/*
461 	 * standalone, small random read
462 	 * Read as is, and do not pollute the readahead state.
463 	 */
464 	return __do_page_cache_readahead(mapping, filp, offset, req_size, 0);
465 
466 initial_readahead:
467 	ra->start = offset;
468 	ra->size = get_init_ra_size(req_size, max_pages);
469 	ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
470 
471 readit:
472 	/*
473 	 * Will this read hit the readahead marker made by itself?
474 	 * If so, trigger the readahead marker hit now, and merge
475 	 * the resulted next readahead window into the current one.
476 	 * Take care of maximum IO pages as above.
477 	 */
478 	if (offset == ra->start && ra->size == ra->async_size) {
479 		add_pages = get_next_ra_size(ra, max_pages);
480 		if (ra->size + add_pages <= max_pages) {
481 			ra->async_size = add_pages;
482 			ra->size += add_pages;
483 		} else {
484 			ra->size = max_pages;
485 			ra->async_size = max_pages >> 1;
486 		}
487 	}
488 
489 	return ra_submit(ra, mapping, filp);
490 }
491 
492 /**
493  * page_cache_sync_readahead - generic file readahead
494  * @mapping: address_space which holds the pagecache and I/O vectors
495  * @ra: file_ra_state which holds the readahead state
496  * @filp: passed on to ->readpage() and ->readpages()
497  * @offset: start offset into @mapping, in pagecache page-sized units
498  * @req_size: hint: total size of the read which the caller is performing in
499  *            pagecache pages
500  *
501  * page_cache_sync_readahead() should be called when a cache miss happened:
502  * it will submit the read.  The readahead logic may decide to piggyback more
503  * pages onto the read request if access patterns suggest it will improve
504  * performance.
505  */
506 void page_cache_sync_readahead(struct address_space *mapping,
507 			       struct file_ra_state *ra, struct file *filp,
508 			       pgoff_t offset, unsigned long req_size)
509 {
510 	/* no read-ahead */
511 	if (!ra->ra_pages)
512 		return;
513 
514 	if (blk_cgroup_congested())
515 		return;
516 
517 	/* be dumb */
518 	if (filp && (filp->f_mode & FMODE_RANDOM)) {
519 		force_page_cache_readahead(mapping, filp, offset, req_size);
520 		return;
521 	}
522 
523 	/* do read-ahead */
524 	ondemand_readahead(mapping, ra, filp, false, offset, req_size);
525 }
526 EXPORT_SYMBOL_GPL(page_cache_sync_readahead);
527 
528 /**
529  * page_cache_async_readahead - file readahead for marked pages
530  * @mapping: address_space which holds the pagecache and I/O vectors
531  * @ra: file_ra_state which holds the readahead state
532  * @filp: passed on to ->readpage() and ->readpages()
533  * @page: the page at @offset which has the PG_readahead flag set
534  * @offset: start offset into @mapping, in pagecache page-sized units
535  * @req_size: hint: total size of the read which the caller is performing in
536  *            pagecache pages
537  *
538  * page_cache_async_readahead() should be called when a page is used which
539  * has the PG_readahead flag; this is a marker to suggest that the application
540  * has used up enough of the readahead window that we should start pulling in
541  * more pages.
542  */
543 void
544 page_cache_async_readahead(struct address_space *mapping,
545 			   struct file_ra_state *ra, struct file *filp,
546 			   struct page *page, pgoff_t offset,
547 			   unsigned long req_size)
548 {
549 	/* no read-ahead */
550 	if (!ra->ra_pages)
551 		return;
552 
553 	/*
554 	 * Same bit is used for PG_readahead and PG_reclaim.
555 	 */
556 	if (PageWriteback(page))
557 		return;
558 
559 	ClearPageReadahead(page);
560 
561 	/*
562 	 * Defer asynchronous read-ahead on IO congestion.
563 	 */
564 	if (inode_read_congested(mapping->host))
565 		return;
566 
567 	if (blk_cgroup_congested())
568 		return;
569 
570 	/* do read-ahead */
571 	ondemand_readahead(mapping, ra, filp, true, offset, req_size);
572 }
573 EXPORT_SYMBOL_GPL(page_cache_async_readahead);
574 
575 ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
576 {
577 	ssize_t ret;
578 	struct fd f;
579 
580 	ret = -EBADF;
581 	f = fdget(fd);
582 	if (!f.file || !(f.file->f_mode & FMODE_READ))
583 		goto out;
584 
585 	/*
586 	 * The readahead() syscall is intended to run only on files
587 	 * that can execute readahead. If readahead is not possible
588 	 * on this file, then we must return -EINVAL.
589 	 */
590 	ret = -EINVAL;
591 	if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
592 	    !S_ISREG(file_inode(f.file)->i_mode))
593 		goto out;
594 
595 	ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
596 out:
597 	fdput(f);
598 	return ret;
599 }
600 
601 SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
602 {
603 	return ksys_readahead(fd, offset, count);
604 }
605