xref: /linux/mm/readahead.c (revision 2b8232ce512105e28453f301d1510de8363bccd1)
1 /*
2  * mm/readahead.c - address_space-level file readahead.
3  *
4  * Copyright (C) 2002, Linus Torvalds
5  *
6  * 09Apr2002	akpm@zip.com.au
7  *		Initial version.
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/blkdev.h>
15 #include <linux/backing-dev.h>
16 #include <linux/task_io_accounting_ops.h>
17 #include <linux/pagevec.h>
18 #include <linux/pagemap.h>
19 
20 void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
21 {
22 }
23 EXPORT_SYMBOL(default_unplug_io_fn);
24 
25 /*
26  * Convienent macros for min/max read-ahead pages.
27  * Note that MAX_RA_PAGES is rounded down, while MIN_RA_PAGES is rounded up.
28  * The latter is necessary for systems with large page size(i.e. 64k).
29  */
30 #define MAX_RA_PAGES	(VM_MAX_READAHEAD*1024 / PAGE_CACHE_SIZE)
31 #define MIN_RA_PAGES	DIV_ROUND_UP(VM_MIN_READAHEAD*1024, PAGE_CACHE_SIZE)
32 
33 struct backing_dev_info default_backing_dev_info = {
34 	.ra_pages	= MAX_RA_PAGES,
35 	.state		= 0,
36 	.capabilities	= BDI_CAP_MAP_COPY,
37 	.unplug_io_fn	= default_unplug_io_fn,
38 };
39 EXPORT_SYMBOL_GPL(default_backing_dev_info);
40 
41 /*
42  * Initialise a struct file's readahead state.  Assumes that the caller has
43  * memset *ra to zero.
44  */
45 void
46 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
47 {
48 	ra->ra_pages = mapping->backing_dev_info->ra_pages;
49 	ra->prev_index = -1;
50 }
51 EXPORT_SYMBOL_GPL(file_ra_state_init);
52 
53 #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
54 
55 /**
56  * read_cache_pages - populate an address space with some pages & start reads against them
57  * @mapping: the address_space
58  * @pages: The address of a list_head which contains the target pages.  These
59  *   pages have their ->index populated and are otherwise uninitialised.
60  * @filler: callback routine for filling a single page.
61  * @data: private data for the callback routine.
62  *
63  * Hides the details of the LRU cache etc from the filesystems.
64  */
65 int read_cache_pages(struct address_space *mapping, struct list_head *pages,
66 			int (*filler)(void *, struct page *), void *data)
67 {
68 	struct page *page;
69 	struct pagevec lru_pvec;
70 	int ret = 0;
71 
72 	pagevec_init(&lru_pvec, 0);
73 
74 	while (!list_empty(pages)) {
75 		page = list_to_page(pages);
76 		list_del(&page->lru);
77 		if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) {
78 			page_cache_release(page);
79 			continue;
80 		}
81 		ret = filler(data, page);
82 		if (!pagevec_add(&lru_pvec, page))
83 			__pagevec_lru_add(&lru_pvec);
84 		if (ret) {
85 			put_pages_list(pages);
86 			break;
87 		}
88 		task_io_account_read(PAGE_CACHE_SIZE);
89 	}
90 	pagevec_lru_add(&lru_pvec);
91 	return ret;
92 }
93 
94 EXPORT_SYMBOL(read_cache_pages);
95 
96 static int read_pages(struct address_space *mapping, struct file *filp,
97 		struct list_head *pages, unsigned nr_pages)
98 {
99 	unsigned page_idx;
100 	struct pagevec lru_pvec;
101 	int ret;
102 
103 	if (mapping->a_ops->readpages) {
104 		ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
105 		/* Clean up the remaining pages */
106 		put_pages_list(pages);
107 		goto out;
108 	}
109 
110 	pagevec_init(&lru_pvec, 0);
111 	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
112 		struct page *page = list_to_page(pages);
113 		list_del(&page->lru);
114 		if (!add_to_page_cache(page, mapping,
115 					page->index, GFP_KERNEL)) {
116 			mapping->a_ops->readpage(filp, page);
117 			if (!pagevec_add(&lru_pvec, page))
118 				__pagevec_lru_add(&lru_pvec);
119 		} else
120 			page_cache_release(page);
121 	}
122 	pagevec_lru_add(&lru_pvec);
123 	ret = 0;
124 out:
125 	return ret;
126 }
127 
128 /*
129  * do_page_cache_readahead actually reads a chunk of disk.  It allocates all
130  * the pages first, then submits them all for I/O. This avoids the very bad
131  * behaviour which would occur if page allocations are causing VM writeback.
132  * We really don't want to intermingle reads and writes like that.
133  *
134  * Returns the number of pages requested, or the maximum amount of I/O allowed.
135  *
136  * do_page_cache_readahead() returns -1 if it encountered request queue
137  * congestion.
138  */
139 static int
140 __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
141 			pgoff_t offset, unsigned long nr_to_read,
142 			unsigned long lookahead_size)
143 {
144 	struct inode *inode = mapping->host;
145 	struct page *page;
146 	unsigned long end_index;	/* The last page we want to read */
147 	LIST_HEAD(page_pool);
148 	int page_idx;
149 	int ret = 0;
150 	loff_t isize = i_size_read(inode);
151 
152 	if (isize == 0)
153 		goto out;
154 
155 	end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
156 
157 	/*
158 	 * Preallocate as many pages as we will need.
159 	 */
160 	read_lock_irq(&mapping->tree_lock);
161 	for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
162 		pgoff_t page_offset = offset + page_idx;
163 
164 		if (page_offset > end_index)
165 			break;
166 
167 		page = radix_tree_lookup(&mapping->page_tree, page_offset);
168 		if (page)
169 			continue;
170 
171 		read_unlock_irq(&mapping->tree_lock);
172 		page = page_cache_alloc_cold(mapping);
173 		read_lock_irq(&mapping->tree_lock);
174 		if (!page)
175 			break;
176 		page->index = page_offset;
177 		list_add(&page->lru, &page_pool);
178 		if (page_idx == nr_to_read - lookahead_size)
179 			SetPageReadahead(page);
180 		ret++;
181 	}
182 	read_unlock_irq(&mapping->tree_lock);
183 
184 	/*
185 	 * Now start the IO.  We ignore I/O errors - if the page is not
186 	 * uptodate then the caller will launch readpage again, and
187 	 * will then handle the error.
188 	 */
189 	if (ret)
190 		read_pages(mapping, filp, &page_pool, ret);
191 	BUG_ON(!list_empty(&page_pool));
192 out:
193 	return ret;
194 }
195 
196 /*
197  * Chunk the readahead into 2 megabyte units, so that we don't pin too much
198  * memory at once.
199  */
200 int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
201 		pgoff_t offset, unsigned long nr_to_read)
202 {
203 	int ret = 0;
204 
205 	if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
206 		return -EINVAL;
207 
208 	while (nr_to_read) {
209 		int err;
210 
211 		unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE;
212 
213 		if (this_chunk > nr_to_read)
214 			this_chunk = nr_to_read;
215 		err = __do_page_cache_readahead(mapping, filp,
216 						offset, this_chunk, 0);
217 		if (err < 0) {
218 			ret = err;
219 			break;
220 		}
221 		ret += err;
222 		offset += this_chunk;
223 		nr_to_read -= this_chunk;
224 	}
225 	return ret;
226 }
227 
228 /*
229  * This version skips the IO if the queue is read-congested, and will tell the
230  * block layer to abandon the readahead if request allocation would block.
231  *
232  * force_page_cache_readahead() will ignore queue congestion and will block on
233  * request queues.
234  */
235 int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
236 			pgoff_t offset, unsigned long nr_to_read)
237 {
238 	if (bdi_read_congested(mapping->backing_dev_info))
239 		return -1;
240 
241 	return __do_page_cache_readahead(mapping, filp, offset, nr_to_read, 0);
242 }
243 
244 /*
245  * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
246  * sensible upper limit.
247  */
248 unsigned long max_sane_readahead(unsigned long nr)
249 {
250 	return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE)
251 		+ node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2);
252 }
253 
254 /*
255  * Submit IO for the read-ahead request in file_ra_state.
256  */
257 static unsigned long ra_submit(struct file_ra_state *ra,
258 		       struct address_space *mapping, struct file *filp)
259 {
260 	int actual;
261 
262 	actual = __do_page_cache_readahead(mapping, filp,
263 					ra->start, ra->size, ra->async_size);
264 
265 	return actual;
266 }
267 
268 /*
269  * Set the initial window size, round to next power of 2 and square
270  * for small size, x 4 for medium, and x 2 for large
271  * for 128k (32 page) max ra
272  * 1-8 page = 32k initial, > 8 page = 128k initial
273  */
274 static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
275 {
276 	unsigned long newsize = roundup_pow_of_two(size);
277 
278 	if (newsize <= max / 32)
279 		newsize = newsize * 4;
280 	else if (newsize <= max / 4)
281 		newsize = newsize * 2;
282 	else
283 		newsize = max;
284 
285 	return newsize;
286 }
287 
288 /*
289  *  Get the previous window size, ramp it up, and
290  *  return it as the new window size.
291  */
292 static unsigned long get_next_ra_size(struct file_ra_state *ra,
293 						unsigned long max)
294 {
295 	unsigned long cur = ra->size;
296 	unsigned long newsize;
297 
298 	if (cur < max / 16)
299 		newsize = 4 * cur;
300 	else
301 		newsize = 2 * cur;
302 
303 	return min(newsize, max);
304 }
305 
306 /*
307  * On-demand readahead design.
308  *
309  * The fields in struct file_ra_state represent the most-recently-executed
310  * readahead attempt:
311  *
312  *                        |<----- async_size ---------|
313  *     |------------------- size -------------------->|
314  *     |==================#===========================|
315  *     ^start             ^page marked with PG_readahead
316  *
317  * To overlap application thinking time and disk I/O time, we do
318  * `readahead pipelining': Do not wait until the application consumed all
319  * readahead pages and stalled on the missing page at readahead_index;
320  * Instead, submit an asynchronous readahead I/O as soon as there are
321  * only async_size pages left in the readahead window. Normally async_size
322  * will be equal to size, for maximum pipelining.
323  *
324  * In interleaved sequential reads, concurrent streams on the same fd can
325  * be invalidating each other's readahead state. So we flag the new readahead
326  * page at (start+size-async_size) with PG_readahead, and use it as readahead
327  * indicator. The flag won't be set on already cached pages, to avoid the
328  * readahead-for-nothing fuss, saving pointless page cache lookups.
329  *
330  * prev_index tracks the last visited page in the _previous_ read request.
331  * It should be maintained by the caller, and will be used for detecting
332  * small random reads. Note that the readahead algorithm checks loosely
333  * for sequential patterns. Hence interleaved reads might be served as
334  * sequential ones.
335  *
336  * There is a special-case: if the first page which the application tries to
337  * read happens to be the first page of the file, it is assumed that a linear
338  * read is about to happen and the window is immediately set to the initial size
339  * based on I/O request size and the max_readahead.
340  *
341  * The code ramps up the readahead size aggressively at first, but slow down as
342  * it approaches max_readhead.
343  */
344 
345 /*
346  * A minimal readahead algorithm for trivial sequential/random reads.
347  */
348 static unsigned long
349 ondemand_readahead(struct address_space *mapping,
350 		   struct file_ra_state *ra, struct file *filp,
351 		   bool hit_readahead_marker, pgoff_t offset,
352 		   unsigned long req_size)
353 {
354 	unsigned long max;	/* max readahead pages */
355 	int sequential;
356 
357 	max = ra->ra_pages;
358 	sequential = (offset - ra->prev_index <= 1UL) || (req_size > max);
359 
360 	/*
361 	 * It's the expected callback offset, assume sequential access.
362 	 * Ramp up sizes, and push forward the readahead window.
363 	 */
364 	if (offset && (offset == (ra->start + ra->size - ra->async_size) ||
365 			offset == (ra->start + ra->size))) {
366 		ra->start += ra->size;
367 		ra->size = get_next_ra_size(ra, max);
368 		ra->async_size = ra->size;
369 		goto readit;
370 	}
371 
372 	/*
373 	 * Standalone, small read.
374 	 * Read as is, and do not pollute the readahead state.
375 	 */
376 	if (!hit_readahead_marker && !sequential) {
377 		return __do_page_cache_readahead(mapping, filp,
378 						offset, req_size, 0);
379 	}
380 
381 	/*
382 	 * It may be one of
383 	 * 	- first read on start of file
384 	 * 	- sequential cache miss
385 	 * 	- oversize random read
386 	 * Start readahead for it.
387 	 */
388 	ra->start = offset;
389 	ra->size = get_init_ra_size(req_size, max);
390 	ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
391 
392 	/*
393 	 * Hit on a marked page without valid readahead state.
394 	 * E.g. interleaved reads.
395 	 * Not knowing its readahead pos/size, bet on the minimal possible one.
396 	 */
397 	if (hit_readahead_marker) {
398 		ra->start++;
399 		ra->size = get_next_ra_size(ra, max);
400 	}
401 
402 readit:
403 	return ra_submit(ra, mapping, filp);
404 }
405 
406 /**
407  * page_cache_sync_readahead - generic file readahead
408  * @mapping: address_space which holds the pagecache and I/O vectors
409  * @ra: file_ra_state which holds the readahead state
410  * @filp: passed on to ->readpage() and ->readpages()
411  * @offset: start offset into @mapping, in pagecache page-sized units
412  * @req_size: hint: total size of the read which the caller is performing in
413  *            pagecache pages
414  *
415  * page_cache_sync_readahead() should be called when a cache miss happened:
416  * it will submit the read.  The readahead logic may decide to piggyback more
417  * pages onto the read request if access patterns suggest it will improve
418  * performance.
419  */
420 void page_cache_sync_readahead(struct address_space *mapping,
421 			       struct file_ra_state *ra, struct file *filp,
422 			       pgoff_t offset, unsigned long req_size)
423 {
424 	/* no read-ahead */
425 	if (!ra->ra_pages)
426 		return;
427 
428 	/* do read-ahead */
429 	ondemand_readahead(mapping, ra, filp, false, offset, req_size);
430 }
431 EXPORT_SYMBOL_GPL(page_cache_sync_readahead);
432 
433 /**
434  * page_cache_async_readahead - file readahead for marked pages
435  * @mapping: address_space which holds the pagecache and I/O vectors
436  * @ra: file_ra_state which holds the readahead state
437  * @filp: passed on to ->readpage() and ->readpages()
438  * @page: the page at @offset which has the PG_readahead flag set
439  * @offset: start offset into @mapping, in pagecache page-sized units
440  * @req_size: hint: total size of the read which the caller is performing in
441  *            pagecache pages
442  *
443  * page_cache_async_ondemand() should be called when a page is used which
444  * has the PG_readahead flag: this is a marker to suggest that the application
445  * has used up enough of the readahead window that we should start pulling in
446  * more pages. */
447 void
448 page_cache_async_readahead(struct address_space *mapping,
449 			   struct file_ra_state *ra, struct file *filp,
450 			   struct page *page, pgoff_t offset,
451 			   unsigned long req_size)
452 {
453 	/* no read-ahead */
454 	if (!ra->ra_pages)
455 		return;
456 
457 	/*
458 	 * Same bit is used for PG_readahead and PG_reclaim.
459 	 */
460 	if (PageWriteback(page))
461 		return;
462 
463 	ClearPageReadahead(page);
464 
465 	/*
466 	 * Defer asynchronous read-ahead on IO congestion.
467 	 */
468 	if (bdi_read_congested(mapping->backing_dev_info))
469 		return;
470 
471 	/* do read-ahead */
472 	ondemand_readahead(mapping, ra, filp, true, offset, req_size);
473 }
474 EXPORT_SYMBOL_GPL(page_cache_async_readahead);
475