xref: /linux/mm/readahead.c (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1 /*
2  * mm/readahead.c - address_space-level file readahead.
3  *
4  * Copyright (C) 2002, Linus Torvalds
5  *
6  * 09Apr2002	akpm@zip.com.au
7  *		Initial version.
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/blkdev.h>
15 #include <linux/backing-dev.h>
16 #include <linux/pagevec.h>
17 
18 void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
19 {
20 }
21 EXPORT_SYMBOL(default_unplug_io_fn);
22 
23 struct backing_dev_info default_backing_dev_info = {
24 	.ra_pages	= (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE,
25 	.state		= 0,
26 	.capabilities	= BDI_CAP_MAP_COPY,
27 	.unplug_io_fn	= default_unplug_io_fn,
28 };
29 EXPORT_SYMBOL_GPL(default_backing_dev_info);
30 
31 /*
32  * Initialise a struct file's readahead state.  Assumes that the caller has
33  * memset *ra to zero.
34  */
35 void
36 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
37 {
38 	ra->ra_pages = mapping->backing_dev_info->ra_pages;
39 	ra->prev_page = -1;
40 }
41 
42 /*
43  * Return max readahead size for this inode in number-of-pages.
44  */
45 static inline unsigned long get_max_readahead(struct file_ra_state *ra)
46 {
47 	return ra->ra_pages;
48 }
49 
50 static inline unsigned long get_min_readahead(struct file_ra_state *ra)
51 {
52 	return (VM_MIN_READAHEAD * 1024) / PAGE_CACHE_SIZE;
53 }
54 
55 static inline void reset_ahead_window(struct file_ra_state *ra)
56 {
57 	/*
58 	 * ... but preserve ahead_start + ahead_size value,
59 	 * see 'recheck:' label in page_cache_readahead().
60 	 * Note: We never use ->ahead_size as rvalue without
61 	 * checking ->ahead_start != 0 first.
62 	 */
63 	ra->ahead_size += ra->ahead_start;
64 	ra->ahead_start = 0;
65 }
66 
67 static inline void ra_off(struct file_ra_state *ra)
68 {
69 	ra->start = 0;
70 	ra->flags = 0;
71 	ra->size = 0;
72 	reset_ahead_window(ra);
73 	return;
74 }
75 
76 /*
77  * Set the initial window size, round to next power of 2 and square
78  * for small size, x 4 for medium, and x 2 for large
79  * for 128k (32 page) max ra
80  * 1-8 page = 32k initial, > 8 page = 128k initial
81  */
82 static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
83 {
84 	unsigned long newsize = roundup_pow_of_two(size);
85 
86 	if (newsize <= max / 32)
87 		newsize = newsize * 4;
88 	else if (newsize <= max / 4)
89 		newsize = newsize * 2;
90 	else
91 		newsize = max;
92 	return newsize;
93 }
94 
95 /*
96  * Set the new window size, this is called only when I/O is to be submitted,
97  * not for each call to readahead.  If a cache miss occured, reduce next I/O
98  * size, else increase depending on how close to max we are.
99  */
100 static inline unsigned long get_next_ra_size(struct file_ra_state *ra)
101 {
102 	unsigned long max = get_max_readahead(ra);
103 	unsigned long min = get_min_readahead(ra);
104 	unsigned long cur = ra->size;
105 	unsigned long newsize;
106 
107 	if (ra->flags & RA_FLAG_MISS) {
108 		ra->flags &= ~RA_FLAG_MISS;
109 		newsize = max((cur - 2), min);
110 	} else if (cur < max / 16) {
111 		newsize = 4 * cur;
112 	} else {
113 		newsize = 2 * cur;
114 	}
115 	return min(newsize, max);
116 }
117 
118 #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
119 
120 /**
121  * read_cache_pages - populate an address space with some pages, and
122  * 			start reads against them.
123  * @mapping: the address_space
124  * @pages: The address of a list_head which contains the target pages.  These
125  *   pages have their ->index populated and are otherwise uninitialised.
126  * @filler: callback routine for filling a single page.
127  * @data: private data for the callback routine.
128  *
129  * Hides the details of the LRU cache etc from the filesystems.
130  */
131 int read_cache_pages(struct address_space *mapping, struct list_head *pages,
132 			int (*filler)(void *, struct page *), void *data)
133 {
134 	struct page *page;
135 	struct pagevec lru_pvec;
136 	int ret = 0;
137 
138 	pagevec_init(&lru_pvec, 0);
139 
140 	while (!list_empty(pages)) {
141 		page = list_to_page(pages);
142 		list_del(&page->lru);
143 		if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) {
144 			page_cache_release(page);
145 			continue;
146 		}
147 		ret = filler(data, page);
148 		if (!pagevec_add(&lru_pvec, page))
149 			__pagevec_lru_add(&lru_pvec);
150 		if (ret) {
151 			while (!list_empty(pages)) {
152 				struct page *victim;
153 
154 				victim = list_to_page(pages);
155 				list_del(&victim->lru);
156 				page_cache_release(victim);
157 			}
158 			break;
159 		}
160 	}
161 	pagevec_lru_add(&lru_pvec);
162 	return ret;
163 }
164 
165 EXPORT_SYMBOL(read_cache_pages);
166 
167 static int read_pages(struct address_space *mapping, struct file *filp,
168 		struct list_head *pages, unsigned nr_pages)
169 {
170 	unsigned page_idx;
171 	struct pagevec lru_pvec;
172 	int ret;
173 
174 	if (mapping->a_ops->readpages) {
175 		ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
176 		goto out;
177 	}
178 
179 	pagevec_init(&lru_pvec, 0);
180 	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
181 		struct page *page = list_to_page(pages);
182 		list_del(&page->lru);
183 		if (!add_to_page_cache(page, mapping,
184 					page->index, GFP_KERNEL)) {
185 			ret = mapping->a_ops->readpage(filp, page);
186 			if (ret != AOP_TRUNCATED_PAGE) {
187 				if (!pagevec_add(&lru_pvec, page))
188 					__pagevec_lru_add(&lru_pvec);
189 				continue;
190 			} /* else fall through to release */
191 		}
192 		page_cache_release(page);
193 	}
194 	pagevec_lru_add(&lru_pvec);
195 	ret = 0;
196 out:
197 	return ret;
198 }
199 
200 /*
201  * Readahead design.
202  *
203  * The fields in struct file_ra_state represent the most-recently-executed
204  * readahead attempt:
205  *
206  * start:	Page index at which we started the readahead
207  * size:	Number of pages in that read
208  *              Together, these form the "current window".
209  *              Together, start and size represent the `readahead window'.
210  * prev_page:   The page which the readahead algorithm most-recently inspected.
211  *              It is mainly used to detect sequential file reading.
212  *              If page_cache_readahead sees that it is again being called for
213  *              a page which it just looked at, it can return immediately without
214  *              making any state changes.
215  * ahead_start,
216  * ahead_size:  Together, these form the "ahead window".
217  * ra_pages:	The externally controlled max readahead for this fd.
218  *
219  * When readahead is in the off state (size == 0), readahead is disabled.
220  * In this state, prev_page is used to detect the resumption of sequential I/O.
221  *
222  * The readahead code manages two windows - the "current" and the "ahead"
223  * windows.  The intent is that while the application is walking the pages
224  * in the current window, I/O is underway on the ahead window.  When the
225  * current window is fully traversed, it is replaced by the ahead window
226  * and the ahead window is invalidated.  When this copying happens, the
227  * new current window's pages are probably still locked.  So
228  * we submit a new batch of I/O immediately, creating a new ahead window.
229  *
230  * So:
231  *
232  *   ----|----------------|----------------|-----
233  *       ^start           ^start+size
234  *                        ^ahead_start     ^ahead_start+ahead_size
235  *
236  *         ^ When this page is read, we submit I/O for the
237  *           ahead window.
238  *
239  * A `readahead hit' occurs when a read request is made against a page which is
240  * the next sequential page. Ahead window calculations are done only when it
241  * is time to submit a new IO.  The code ramps up the size agressively at first,
242  * but slow down as it approaches max_readhead.
243  *
244  * Any seek/ramdom IO will result in readahead being turned off.  It will resume
245  * at the first sequential access.
246  *
247  * There is a special-case: if the first page which the application tries to
248  * read happens to be the first page of the file, it is assumed that a linear
249  * read is about to happen and the window is immediately set to the initial size
250  * based on I/O request size and the max_readahead.
251  *
252  * This function is to be called for every read request, rather than when
253  * it is time to perform readahead.  It is called only once for the entire I/O
254  * regardless of size unless readahead is unable to start enough I/O to satisfy
255  * the request (I/O request > max_readahead).
256  */
257 
258 /*
259  * do_page_cache_readahead actually reads a chunk of disk.  It allocates all
260  * the pages first, then submits them all for I/O. This avoids the very bad
261  * behaviour which would occur if page allocations are causing VM writeback.
262  * We really don't want to intermingle reads and writes like that.
263  *
264  * Returns the number of pages requested, or the maximum amount of I/O allowed.
265  *
266  * do_page_cache_readahead() returns -1 if it encountered request queue
267  * congestion.
268  */
269 static int
270 __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
271 			pgoff_t offset, unsigned long nr_to_read)
272 {
273 	struct inode *inode = mapping->host;
274 	struct page *page;
275 	unsigned long end_index;	/* The last page we want to read */
276 	LIST_HEAD(page_pool);
277 	int page_idx;
278 	int ret = 0;
279 	loff_t isize = i_size_read(inode);
280 
281 	if (isize == 0)
282 		goto out;
283 
284  	end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
285 
286 	/*
287 	 * Preallocate as many pages as we will need.
288 	 */
289 	read_lock_irq(&mapping->tree_lock);
290 	for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
291 		pgoff_t page_offset = offset + page_idx;
292 
293 		if (page_offset > end_index)
294 			break;
295 
296 		page = radix_tree_lookup(&mapping->page_tree, page_offset);
297 		if (page)
298 			continue;
299 
300 		read_unlock_irq(&mapping->tree_lock);
301 		page = page_cache_alloc_cold(mapping);
302 		read_lock_irq(&mapping->tree_lock);
303 		if (!page)
304 			break;
305 		page->index = page_offset;
306 		list_add(&page->lru, &page_pool);
307 		ret++;
308 	}
309 	read_unlock_irq(&mapping->tree_lock);
310 
311 	/*
312 	 * Now start the IO.  We ignore I/O errors - if the page is not
313 	 * uptodate then the caller will launch readpage again, and
314 	 * will then handle the error.
315 	 */
316 	if (ret)
317 		read_pages(mapping, filp, &page_pool, ret);
318 	BUG_ON(!list_empty(&page_pool));
319 out:
320 	return ret;
321 }
322 
323 /*
324  * Chunk the readahead into 2 megabyte units, so that we don't pin too much
325  * memory at once.
326  */
327 int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
328 		pgoff_t offset, unsigned long nr_to_read)
329 {
330 	int ret = 0;
331 
332 	if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
333 		return -EINVAL;
334 
335 	while (nr_to_read) {
336 		int err;
337 
338 		unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE;
339 
340 		if (this_chunk > nr_to_read)
341 			this_chunk = nr_to_read;
342 		err = __do_page_cache_readahead(mapping, filp,
343 						offset, this_chunk);
344 		if (err < 0) {
345 			ret = err;
346 			break;
347 		}
348 		ret += err;
349 		offset += this_chunk;
350 		nr_to_read -= this_chunk;
351 	}
352 	return ret;
353 }
354 
355 /*
356  * Check how effective readahead is being.  If the amount of started IO is
357  * less than expected then the file is partly or fully in pagecache and
358  * readahead isn't helping.
359  *
360  */
361 static inline int check_ra_success(struct file_ra_state *ra,
362 			unsigned long nr_to_read, unsigned long actual)
363 {
364 	if (actual == 0) {
365 		ra->cache_hit += nr_to_read;
366 		if (ra->cache_hit >= VM_MAX_CACHE_HIT) {
367 			ra_off(ra);
368 			ra->flags |= RA_FLAG_INCACHE;
369 			return 0;
370 		}
371 	} else {
372 		ra->cache_hit=0;
373 	}
374 	return 1;
375 }
376 
377 /*
378  * This version skips the IO if the queue is read-congested, and will tell the
379  * block layer to abandon the readahead if request allocation would block.
380  *
381  * force_page_cache_readahead() will ignore queue congestion and will block on
382  * request queues.
383  */
384 int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
385 			pgoff_t offset, unsigned long nr_to_read)
386 {
387 	if (bdi_read_congested(mapping->backing_dev_info))
388 		return -1;
389 
390 	return __do_page_cache_readahead(mapping, filp, offset, nr_to_read);
391 }
392 
393 /*
394  * Read 'nr_to_read' pages starting at page 'offset'. If the flag 'block'
395  * is set wait till the read completes.  Otherwise attempt to read without
396  * blocking.
397  * Returns 1 meaning 'success' if read is succesfull without switching off
398  * readhaead mode. Otherwise return failure.
399  */
400 static int
401 blockable_page_cache_readahead(struct address_space *mapping, struct file *filp,
402 			pgoff_t offset, unsigned long nr_to_read,
403 			struct file_ra_state *ra, int block)
404 {
405 	int actual;
406 
407 	if (!block && bdi_read_congested(mapping->backing_dev_info))
408 		return 0;
409 
410 	actual = __do_page_cache_readahead(mapping, filp, offset, nr_to_read);
411 
412 	return check_ra_success(ra, nr_to_read, actual);
413 }
414 
415 static int make_ahead_window(struct address_space *mapping, struct file *filp,
416 				struct file_ra_state *ra, int force)
417 {
418 	int block, ret;
419 
420 	ra->ahead_size = get_next_ra_size(ra);
421 	ra->ahead_start = ra->start + ra->size;
422 
423 	block = force || (ra->prev_page >= ra->ahead_start);
424 	ret = blockable_page_cache_readahead(mapping, filp,
425 			ra->ahead_start, ra->ahead_size, ra, block);
426 
427 	if (!ret && !force) {
428 		/* A read failure in blocking mode, implies pages are
429 		 * all cached. So we can safely assume we have taken
430 		 * care of all the pages requested in this call.
431 		 * A read failure in non-blocking mode, implies we are
432 		 * reading more pages than requested in this call.  So
433 		 * we safely assume we have taken care of all the pages
434 		 * requested in this call.
435 		 *
436 		 * Just reset the ahead window in case we failed due to
437 		 * congestion.  The ahead window will any way be closed
438 		 * in case we failed due to excessive page cache hits.
439 		 */
440 		reset_ahead_window(ra);
441 	}
442 
443 	return ret;
444 }
445 
446 /**
447  * page_cache_readahead - generic adaptive readahead
448  * @mapping: address_space which holds the pagecache and I/O vectors
449  * @ra: file_ra_state which holds the readahead state
450  * @filp: passed on to ->readpage() and ->readpages()
451  * @offset: start offset into @mapping, in PAGE_CACHE_SIZE units
452  * @req_size: hint: total size of the read which the caller is performing in
453  *            PAGE_CACHE_SIZE units
454  *
455  * page_cache_readahead() is the main function.  If performs the adaptive
456  * readahead window size management and submits the readahead I/O.
457  *
458  * Note that @filp is purely used for passing on to the ->readpage[s]()
459  * handler: it may refer to a different file from @mapping (so we may not use
460  * @filp->f_mapping or @filp->f_dentry->d_inode here).
461  * Also, @ra may not be equal to &@filp->f_ra.
462  *
463  */
464 unsigned long
465 page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra,
466 		     struct file *filp, pgoff_t offset, unsigned long req_size)
467 {
468 	unsigned long max, newsize;
469 	int sequential;
470 
471 	/*
472 	 * We avoid doing extra work and bogusly perturbing the readahead
473 	 * window expansion logic.
474 	 */
475 	if (offset == ra->prev_page && --req_size)
476 		++offset;
477 
478 	/* Note that prev_page == -1 if it is a first read */
479 	sequential = (offset == ra->prev_page + 1);
480 	ra->prev_page = offset;
481 
482 	max = get_max_readahead(ra);
483 	newsize = min(req_size, max);
484 
485 	/* No readahead or sub-page sized read or file already in cache */
486 	if (newsize == 0 || (ra->flags & RA_FLAG_INCACHE))
487 		goto out;
488 
489 	ra->prev_page += newsize - 1;
490 
491 	/*
492 	 * Special case - first read at start of file. We'll assume it's
493 	 * a whole-file read and grow the window fast.  Or detect first
494 	 * sequential access
495 	 */
496 	if (sequential && ra->size == 0) {
497 		ra->size = get_init_ra_size(newsize, max);
498 		ra->start = offset;
499 		if (!blockable_page_cache_readahead(mapping, filp, offset,
500 							 ra->size, ra, 1))
501 			goto out;
502 
503 		/*
504 		 * If the request size is larger than our max readahead, we
505 		 * at least want to be sure that we get 2 IOs in flight and
506 		 * we know that we will definitly need the new I/O.
507 		 * once we do this, subsequent calls should be able to overlap
508 		 * IOs,* thus preventing stalls. so issue the ahead window
509 		 * immediately.
510 		 */
511 		if (req_size >= max)
512 			make_ahead_window(mapping, filp, ra, 1);
513 
514 		goto out;
515 	}
516 
517 	/*
518 	 * Now handle the random case:
519 	 * partial page reads and first access were handled above,
520 	 * so this must be the next page otherwise it is random
521 	 */
522 	if (!sequential) {
523 		ra_off(ra);
524 		blockable_page_cache_readahead(mapping, filp, offset,
525 				 newsize, ra, 1);
526 		goto out;
527 	}
528 
529 	/*
530 	 * If we get here we are doing sequential IO and this was not the first
531 	 * occurence (ie we have an existing window)
532 	 */
533 	if (ra->ahead_start == 0) {	 /* no ahead window yet */
534 		if (!make_ahead_window(mapping, filp, ra, 0))
535 			goto recheck;
536 	}
537 
538 	/*
539 	 * Already have an ahead window, check if we crossed into it.
540 	 * If so, shift windows and issue a new ahead window.
541 	 * Only return the #pages that are in the current window, so that
542 	 * we get called back on the first page of the ahead window which
543 	 * will allow us to submit more IO.
544 	 */
545 	if (ra->prev_page >= ra->ahead_start) {
546 		ra->start = ra->ahead_start;
547 		ra->size = ra->ahead_size;
548 		make_ahead_window(mapping, filp, ra, 0);
549 recheck:
550 		/* prev_page shouldn't overrun the ahead window */
551 		ra->prev_page = min(ra->prev_page,
552 			ra->ahead_start + ra->ahead_size - 1);
553 	}
554 
555 out:
556 	return ra->prev_page + 1;
557 }
558 EXPORT_SYMBOL_GPL(page_cache_readahead);
559 
560 /*
561  * handle_ra_miss() is called when it is known that a page which should have
562  * been present in the pagecache (we just did some readahead there) was in fact
563  * not found.  This will happen if it was evicted by the VM (readahead
564  * thrashing)
565  *
566  * Turn on the cache miss flag in the RA struct, this will cause the RA code
567  * to reduce the RA size on the next read.
568  */
569 void handle_ra_miss(struct address_space *mapping,
570 		struct file_ra_state *ra, pgoff_t offset)
571 {
572 	ra->flags |= RA_FLAG_MISS;
573 	ra->flags &= ~RA_FLAG_INCACHE;
574 	ra->cache_hit = 0;
575 }
576 
577 /*
578  * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
579  * sensible upper limit.
580  */
581 unsigned long max_sane_readahead(unsigned long nr)
582 {
583 	unsigned long active;
584 	unsigned long inactive;
585 	unsigned long free;
586 
587 	__get_zone_counts(&active, &inactive, &free, NODE_DATA(numa_node_id()));
588 	return min(nr, (inactive + free) / 2);
589 }
590