11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * mm/readahead.c - address_space-level file readahead. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * 09Apr2002 akpm@zip.com.au 71da177e4SLinus Torvalds * Initial version. 81da177e4SLinus Torvalds */ 91da177e4SLinus Torvalds 101da177e4SLinus Torvalds #include <linux/kernel.h> 111da177e4SLinus Torvalds #include <linux/fs.h> 121da177e4SLinus Torvalds #include <linux/mm.h> 131da177e4SLinus Torvalds #include <linux/module.h> 141da177e4SLinus Torvalds #include <linux/blkdev.h> 151da177e4SLinus Torvalds #include <linux/backing-dev.h> 168bde37f0SAndrew Morton #include <linux/task_io_accounting_ops.h> 171da177e4SLinus Torvalds #include <linux/pagevec.h> 181da177e4SLinus Torvalds 191da177e4SLinus Torvalds void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) 201da177e4SLinus Torvalds { 211da177e4SLinus Torvalds } 221da177e4SLinus Torvalds EXPORT_SYMBOL(default_unplug_io_fn); 231da177e4SLinus Torvalds 24f615bfcaSFengguang Wu /* 25f615bfcaSFengguang Wu * Convienent macros for min/max read-ahead pages. 26f615bfcaSFengguang Wu * Note that MAX_RA_PAGES is rounded down, while MIN_RA_PAGES is rounded up. 27f615bfcaSFengguang Wu * The latter is necessary for systems with large page size(i.e. 64k). 28f615bfcaSFengguang Wu */ 29f615bfcaSFengguang Wu #define MAX_RA_PAGES (VM_MAX_READAHEAD*1024 / PAGE_CACHE_SIZE) 30f615bfcaSFengguang Wu #define MIN_RA_PAGES DIV_ROUND_UP(VM_MIN_READAHEAD*1024, PAGE_CACHE_SIZE) 31f615bfcaSFengguang Wu 321da177e4SLinus Torvalds struct backing_dev_info default_backing_dev_info = { 33f615bfcaSFengguang Wu .ra_pages = MAX_RA_PAGES, 341da177e4SLinus Torvalds .state = 0, 351da177e4SLinus Torvalds .capabilities = BDI_CAP_MAP_COPY, 361da177e4SLinus Torvalds .unplug_io_fn = default_unplug_io_fn, 371da177e4SLinus Torvalds }; 381da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(default_backing_dev_info); 391da177e4SLinus Torvalds 401da177e4SLinus Torvalds /* 411da177e4SLinus Torvalds * Initialise a struct file's readahead state. Assumes that the caller has 421da177e4SLinus Torvalds * memset *ra to zero. 431da177e4SLinus Torvalds */ 441da177e4SLinus Torvalds void 451da177e4SLinus Torvalds file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) 461da177e4SLinus Torvalds { 471da177e4SLinus Torvalds ra->ra_pages = mapping->backing_dev_info->ra_pages; 486ce745edSJan Kara ra->prev_index = -1; 491da177e4SLinus Torvalds } 50d41cc702SSteven Whitehouse EXPORT_SYMBOL_GPL(file_ra_state_init); 511da177e4SLinus Torvalds 521da177e4SLinus Torvalds #define list_to_page(head) (list_entry((head)->prev, struct page, lru)) 531da177e4SLinus Torvalds 541da177e4SLinus Torvalds /** 55bd40cddaSRandy Dunlap * read_cache_pages - populate an address space with some pages & start reads against them 561da177e4SLinus Torvalds * @mapping: the address_space 571da177e4SLinus Torvalds * @pages: The address of a list_head which contains the target pages. These 581da177e4SLinus Torvalds * pages have their ->index populated and are otherwise uninitialised. 591da177e4SLinus Torvalds * @filler: callback routine for filling a single page. 601da177e4SLinus Torvalds * @data: private data for the callback routine. 611da177e4SLinus Torvalds * 621da177e4SLinus Torvalds * Hides the details of the LRU cache etc from the filesystems. 631da177e4SLinus Torvalds */ 641da177e4SLinus Torvalds int read_cache_pages(struct address_space *mapping, struct list_head *pages, 651da177e4SLinus Torvalds int (*filler)(void *, struct page *), void *data) 661da177e4SLinus Torvalds { 671da177e4SLinus Torvalds struct page *page; 681da177e4SLinus Torvalds struct pagevec lru_pvec; 691da177e4SLinus Torvalds int ret = 0; 701da177e4SLinus Torvalds 711da177e4SLinus Torvalds pagevec_init(&lru_pvec, 0); 721da177e4SLinus Torvalds 731da177e4SLinus Torvalds while (!list_empty(pages)) { 741da177e4SLinus Torvalds page = list_to_page(pages); 751da177e4SLinus Torvalds list_del(&page->lru); 761da177e4SLinus Torvalds if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) { 771da177e4SLinus Torvalds page_cache_release(page); 781da177e4SLinus Torvalds continue; 791da177e4SLinus Torvalds } 801da177e4SLinus Torvalds ret = filler(data, page); 811da177e4SLinus Torvalds if (!pagevec_add(&lru_pvec, page)) 821da177e4SLinus Torvalds __pagevec_lru_add(&lru_pvec); 831da177e4SLinus Torvalds if (ret) { 8438da288bSOGAWA Hirofumi put_pages_list(pages); 851da177e4SLinus Torvalds break; 861da177e4SLinus Torvalds } 878bde37f0SAndrew Morton task_io_account_read(PAGE_CACHE_SIZE); 881da177e4SLinus Torvalds } 891da177e4SLinus Torvalds pagevec_lru_add(&lru_pvec); 901da177e4SLinus Torvalds return ret; 911da177e4SLinus Torvalds } 921da177e4SLinus Torvalds 931da177e4SLinus Torvalds EXPORT_SYMBOL(read_cache_pages); 941da177e4SLinus Torvalds 951da177e4SLinus Torvalds static int read_pages(struct address_space *mapping, struct file *filp, 961da177e4SLinus Torvalds struct list_head *pages, unsigned nr_pages) 971da177e4SLinus Torvalds { 981da177e4SLinus Torvalds unsigned page_idx; 991da177e4SLinus Torvalds struct pagevec lru_pvec; 100994fc28cSZach Brown int ret; 1011da177e4SLinus Torvalds 1021da177e4SLinus Torvalds if (mapping->a_ops->readpages) { 1031da177e4SLinus Torvalds ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); 104029e332eSOGAWA Hirofumi /* Clean up the remaining pages */ 105029e332eSOGAWA Hirofumi put_pages_list(pages); 1061da177e4SLinus Torvalds goto out; 1071da177e4SLinus Torvalds } 1081da177e4SLinus Torvalds 1091da177e4SLinus Torvalds pagevec_init(&lru_pvec, 0); 1101da177e4SLinus Torvalds for (page_idx = 0; page_idx < nr_pages; page_idx++) { 1111da177e4SLinus Torvalds struct page *page = list_to_page(pages); 1121da177e4SLinus Torvalds list_del(&page->lru); 1131da177e4SLinus Torvalds if (!add_to_page_cache(page, mapping, 1141da177e4SLinus Torvalds page->index, GFP_KERNEL)) { 1159f1a3cfcSZach Brown mapping->a_ops->readpage(filp, page); 1161da177e4SLinus Torvalds if (!pagevec_add(&lru_pvec, page)) 1171da177e4SLinus Torvalds __pagevec_lru_add(&lru_pvec); 1189f1a3cfcSZach Brown } else 1191da177e4SLinus Torvalds page_cache_release(page); 1201da177e4SLinus Torvalds } 1211da177e4SLinus Torvalds pagevec_lru_add(&lru_pvec); 122994fc28cSZach Brown ret = 0; 1231da177e4SLinus Torvalds out: 1241da177e4SLinus Torvalds return ret; 1251da177e4SLinus Torvalds } 1261da177e4SLinus Torvalds 1271da177e4SLinus Torvalds /* 1281da177e4SLinus Torvalds * do_page_cache_readahead actually reads a chunk of disk. It allocates all 1291da177e4SLinus Torvalds * the pages first, then submits them all for I/O. This avoids the very bad 1301da177e4SLinus Torvalds * behaviour which would occur if page allocations are causing VM writeback. 1311da177e4SLinus Torvalds * We really don't want to intermingle reads and writes like that. 1321da177e4SLinus Torvalds * 1331da177e4SLinus Torvalds * Returns the number of pages requested, or the maximum amount of I/O allowed. 1341da177e4SLinus Torvalds * 1351da177e4SLinus Torvalds * do_page_cache_readahead() returns -1 if it encountered request queue 1361da177e4SLinus Torvalds * congestion. 1371da177e4SLinus Torvalds */ 1381da177e4SLinus Torvalds static int 1391da177e4SLinus Torvalds __do_page_cache_readahead(struct address_space *mapping, struct file *filp, 14046fc3e7bSFengguang Wu pgoff_t offset, unsigned long nr_to_read, 14146fc3e7bSFengguang Wu unsigned long lookahead_size) 1421da177e4SLinus Torvalds { 1431da177e4SLinus Torvalds struct inode *inode = mapping->host; 1441da177e4SLinus Torvalds struct page *page; 1451da177e4SLinus Torvalds unsigned long end_index; /* The last page we want to read */ 1461da177e4SLinus Torvalds LIST_HEAD(page_pool); 1471da177e4SLinus Torvalds int page_idx; 1481da177e4SLinus Torvalds int ret = 0; 1491da177e4SLinus Torvalds loff_t isize = i_size_read(inode); 1501da177e4SLinus Torvalds 1511da177e4SLinus Torvalds if (isize == 0) 1521da177e4SLinus Torvalds goto out; 1531da177e4SLinus Torvalds 1541da177e4SLinus Torvalds end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); 1551da177e4SLinus Torvalds 1561da177e4SLinus Torvalds /* 1571da177e4SLinus Torvalds * Preallocate as many pages as we will need. 1581da177e4SLinus Torvalds */ 1591da177e4SLinus Torvalds read_lock_irq(&mapping->tree_lock); 1601da177e4SLinus Torvalds for (page_idx = 0; page_idx < nr_to_read; page_idx++) { 1617361f4d8SAndrew Morton pgoff_t page_offset = offset + page_idx; 1621da177e4SLinus Torvalds 1631da177e4SLinus Torvalds if (page_offset > end_index) 1641da177e4SLinus Torvalds break; 1651da177e4SLinus Torvalds 1661da177e4SLinus Torvalds page = radix_tree_lookup(&mapping->page_tree, page_offset); 1671da177e4SLinus Torvalds if (page) 1681da177e4SLinus Torvalds continue; 1691da177e4SLinus Torvalds 1701da177e4SLinus Torvalds read_unlock_irq(&mapping->tree_lock); 1711da177e4SLinus Torvalds page = page_cache_alloc_cold(mapping); 1721da177e4SLinus Torvalds read_lock_irq(&mapping->tree_lock); 1731da177e4SLinus Torvalds if (!page) 1741da177e4SLinus Torvalds break; 1751da177e4SLinus Torvalds page->index = page_offset; 1761da177e4SLinus Torvalds list_add(&page->lru, &page_pool); 17746fc3e7bSFengguang Wu if (page_idx == nr_to_read - lookahead_size) 17846fc3e7bSFengguang Wu SetPageReadahead(page); 1791da177e4SLinus Torvalds ret++; 1801da177e4SLinus Torvalds } 1811da177e4SLinus Torvalds read_unlock_irq(&mapping->tree_lock); 1821da177e4SLinus Torvalds 1831da177e4SLinus Torvalds /* 1841da177e4SLinus Torvalds * Now start the IO. We ignore I/O errors - if the page is not 1851da177e4SLinus Torvalds * uptodate then the caller will launch readpage again, and 1861da177e4SLinus Torvalds * will then handle the error. 1871da177e4SLinus Torvalds */ 1881da177e4SLinus Torvalds if (ret) 1891da177e4SLinus Torvalds read_pages(mapping, filp, &page_pool, ret); 1901da177e4SLinus Torvalds BUG_ON(!list_empty(&page_pool)); 1911da177e4SLinus Torvalds out: 1921da177e4SLinus Torvalds return ret; 1931da177e4SLinus Torvalds } 1941da177e4SLinus Torvalds 1951da177e4SLinus Torvalds /* 1961da177e4SLinus Torvalds * Chunk the readahead into 2 megabyte units, so that we don't pin too much 1971da177e4SLinus Torvalds * memory at once. 1981da177e4SLinus Torvalds */ 1991da177e4SLinus Torvalds int force_page_cache_readahead(struct address_space *mapping, struct file *filp, 2007361f4d8SAndrew Morton pgoff_t offset, unsigned long nr_to_read) 2011da177e4SLinus Torvalds { 2021da177e4SLinus Torvalds int ret = 0; 2031da177e4SLinus Torvalds 2041da177e4SLinus Torvalds if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages)) 2051da177e4SLinus Torvalds return -EINVAL; 2061da177e4SLinus Torvalds 2071da177e4SLinus Torvalds while (nr_to_read) { 2081da177e4SLinus Torvalds int err; 2091da177e4SLinus Torvalds 2101da177e4SLinus Torvalds unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE; 2111da177e4SLinus Torvalds 2121da177e4SLinus Torvalds if (this_chunk > nr_to_read) 2131da177e4SLinus Torvalds this_chunk = nr_to_read; 2141da177e4SLinus Torvalds err = __do_page_cache_readahead(mapping, filp, 21546fc3e7bSFengguang Wu offset, this_chunk, 0); 2161da177e4SLinus Torvalds if (err < 0) { 2171da177e4SLinus Torvalds ret = err; 2181da177e4SLinus Torvalds break; 2191da177e4SLinus Torvalds } 2201da177e4SLinus Torvalds ret += err; 2211da177e4SLinus Torvalds offset += this_chunk; 2221da177e4SLinus Torvalds nr_to_read -= this_chunk; 2231da177e4SLinus Torvalds } 2241da177e4SLinus Torvalds return ret; 2251da177e4SLinus Torvalds } 2261da177e4SLinus Torvalds 2271da177e4SLinus Torvalds /* 2281da177e4SLinus Torvalds * This version skips the IO if the queue is read-congested, and will tell the 2291da177e4SLinus Torvalds * block layer to abandon the readahead if request allocation would block. 2301da177e4SLinus Torvalds * 2311da177e4SLinus Torvalds * force_page_cache_readahead() will ignore queue congestion and will block on 2321da177e4SLinus Torvalds * request queues. 2331da177e4SLinus Torvalds */ 2341da177e4SLinus Torvalds int do_page_cache_readahead(struct address_space *mapping, struct file *filp, 2357361f4d8SAndrew Morton pgoff_t offset, unsigned long nr_to_read) 2361da177e4SLinus Torvalds { 2371da177e4SLinus Torvalds if (bdi_read_congested(mapping->backing_dev_info)) 2381da177e4SLinus Torvalds return -1; 2391da177e4SLinus Torvalds 24046fc3e7bSFengguang Wu return __do_page_cache_readahead(mapping, filp, offset, nr_to_read, 0); 2411da177e4SLinus Torvalds } 2421da177e4SLinus Torvalds 2431da177e4SLinus Torvalds /* 2441da177e4SLinus Torvalds * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a 2451da177e4SLinus Torvalds * sensible upper limit. 2461da177e4SLinus Torvalds */ 2471da177e4SLinus Torvalds unsigned long max_sane_readahead(unsigned long nr) 2481da177e4SLinus Torvalds { 24905a0416bSChristoph Lameter return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE) 25005a0416bSChristoph Lameter + node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2); 2511da177e4SLinus Torvalds } 2525ce1110bSFengguang Wu 2535ce1110bSFengguang Wu /* 2545ce1110bSFengguang Wu * Submit IO for the read-ahead request in file_ra_state. 2555ce1110bSFengguang Wu */ 2565ce1110bSFengguang Wu unsigned long ra_submit(struct file_ra_state *ra, 2575ce1110bSFengguang Wu struct address_space *mapping, struct file *filp) 2585ce1110bSFengguang Wu { 2595ce1110bSFengguang Wu unsigned long ra_size; 2605ce1110bSFengguang Wu unsigned long la_size; 2615ce1110bSFengguang Wu int actual; 2625ce1110bSFengguang Wu 2635ce1110bSFengguang Wu ra_size = ra_readahead_size(ra); 2645ce1110bSFengguang Wu la_size = ra_lookahead_size(ra); 2655ce1110bSFengguang Wu actual = __do_page_cache_readahead(mapping, filp, 2665ce1110bSFengguang Wu ra->ra_index, ra_size, la_size); 2675ce1110bSFengguang Wu 2685ce1110bSFengguang Wu return actual; 2695ce1110bSFengguang Wu } 2705ce1110bSFengguang Wu EXPORT_SYMBOL_GPL(ra_submit); 271122a21d1SFengguang Wu 272122a21d1SFengguang Wu /* 273*c743d96bSFengguang Wu * Set the initial window size, round to next power of 2 and square 274*c743d96bSFengguang Wu * for small size, x 4 for medium, and x 2 for large 275*c743d96bSFengguang Wu * for 128k (32 page) max ra 276*c743d96bSFengguang Wu * 1-8 page = 32k initial, > 8 page = 128k initial 277*c743d96bSFengguang Wu */ 278*c743d96bSFengguang Wu static unsigned long get_init_ra_size(unsigned long size, unsigned long max) 279*c743d96bSFengguang Wu { 280*c743d96bSFengguang Wu unsigned long newsize = roundup_pow_of_two(size); 281*c743d96bSFengguang Wu 282*c743d96bSFengguang Wu if (newsize <= max / 32) 283*c743d96bSFengguang Wu newsize = newsize * 4; 284*c743d96bSFengguang Wu else if (newsize <= max / 4) 285*c743d96bSFengguang Wu newsize = newsize * 2; 286*c743d96bSFengguang Wu else 287*c743d96bSFengguang Wu newsize = max; 288*c743d96bSFengguang Wu 289*c743d96bSFengguang Wu return newsize; 290*c743d96bSFengguang Wu } 291*c743d96bSFengguang Wu 292*c743d96bSFengguang Wu /* 293122a21d1SFengguang Wu * Get the previous window size, ramp it up, and 294122a21d1SFengguang Wu * return it as the new window size. 295122a21d1SFengguang Wu */ 296*c743d96bSFengguang Wu static unsigned long get_next_ra_size(struct file_ra_state *ra, 297122a21d1SFengguang Wu unsigned long max) 298122a21d1SFengguang Wu { 299122a21d1SFengguang Wu unsigned long cur = ra->readahead_index - ra->ra_index; 300122a21d1SFengguang Wu unsigned long newsize; 301122a21d1SFengguang Wu 302122a21d1SFengguang Wu if (cur < max / 16) 303*c743d96bSFengguang Wu newsize = 4 * cur; 304122a21d1SFengguang Wu else 305*c743d96bSFengguang Wu newsize = 2 * cur; 306122a21d1SFengguang Wu 307122a21d1SFengguang Wu return min(newsize, max); 308122a21d1SFengguang Wu } 309122a21d1SFengguang Wu 310122a21d1SFengguang Wu /* 311122a21d1SFengguang Wu * On-demand readahead design. 312122a21d1SFengguang Wu * 313122a21d1SFengguang Wu * The fields in struct file_ra_state represent the most-recently-executed 314122a21d1SFengguang Wu * readahead attempt: 315122a21d1SFengguang Wu * 316122a21d1SFengguang Wu * |-------- last readahead window -------->| 317122a21d1SFengguang Wu * |-- application walking here -->| 318122a21d1SFengguang Wu * ======#============|==================#=====================| 319122a21d1SFengguang Wu * ^la_index ^ra_index ^lookahead_index ^readahead_index 320122a21d1SFengguang Wu * 321122a21d1SFengguang Wu * [ra_index, readahead_index) represents the last readahead window. 322122a21d1SFengguang Wu * 323122a21d1SFengguang Wu * [la_index, lookahead_index] is where the application would be walking(in 324122a21d1SFengguang Wu * the common case of cache-cold sequential reads): the last window was 325122a21d1SFengguang Wu * established when the application was at la_index, and the next window will 326122a21d1SFengguang Wu * be bring in when the application reaches lookahead_index. 327122a21d1SFengguang Wu * 328122a21d1SFengguang Wu * To overlap application thinking time and disk I/O time, we do 329122a21d1SFengguang Wu * `readahead pipelining': Do not wait until the application consumed all 330122a21d1SFengguang Wu * readahead pages and stalled on the missing page at readahead_index; 331122a21d1SFengguang Wu * Instead, submit an asynchronous readahead I/O as early as the application 332122a21d1SFengguang Wu * reads on the page at lookahead_index. Normally lookahead_index will be 333122a21d1SFengguang Wu * equal to ra_index, for maximum pipelining. 334122a21d1SFengguang Wu * 335122a21d1SFengguang Wu * In interleaved sequential reads, concurrent streams on the same fd can 336122a21d1SFengguang Wu * be invalidating each other's readahead state. So we flag the new readahead 337122a21d1SFengguang Wu * page at lookahead_index with PG_readahead, and use it as readahead 338122a21d1SFengguang Wu * indicator. The flag won't be set on already cached pages, to avoid the 339122a21d1SFengguang Wu * readahead-for-nothing fuss, saving pointless page cache lookups. 340122a21d1SFengguang Wu * 341122a21d1SFengguang Wu * prev_index tracks the last visited page in the _previous_ read request. 342122a21d1SFengguang Wu * It should be maintained by the caller, and will be used for detecting 343122a21d1SFengguang Wu * small random reads. Note that the readahead algorithm checks loosely 344122a21d1SFengguang Wu * for sequential patterns. Hence interleaved reads might be served as 345122a21d1SFengguang Wu * sequential ones. 346122a21d1SFengguang Wu * 347122a21d1SFengguang Wu * There is a special-case: if the first page which the application tries to 348122a21d1SFengguang Wu * read happens to be the first page of the file, it is assumed that a linear 349122a21d1SFengguang Wu * read is about to happen and the window is immediately set to the initial size 350122a21d1SFengguang Wu * based on I/O request size and the max_readahead. 351122a21d1SFengguang Wu * 352122a21d1SFengguang Wu * The code ramps up the readahead size aggressively at first, but slow down as 353122a21d1SFengguang Wu * it approaches max_readhead. 354122a21d1SFengguang Wu */ 355122a21d1SFengguang Wu 356122a21d1SFengguang Wu /* 357122a21d1SFengguang Wu * A minimal readahead algorithm for trivial sequential/random reads. 358122a21d1SFengguang Wu */ 359122a21d1SFengguang Wu static unsigned long 360122a21d1SFengguang Wu ondemand_readahead(struct address_space *mapping, 361122a21d1SFengguang Wu struct file_ra_state *ra, struct file *filp, 362122a21d1SFengguang Wu struct page *page, pgoff_t offset, 363122a21d1SFengguang Wu unsigned long req_size) 364122a21d1SFengguang Wu { 365122a21d1SFengguang Wu unsigned long max; /* max readahead pages */ 366122a21d1SFengguang Wu pgoff_t ra_index; /* readahead index */ 367122a21d1SFengguang Wu unsigned long ra_size; /* readahead size */ 368122a21d1SFengguang Wu unsigned long la_size; /* lookahead size */ 369122a21d1SFengguang Wu int sequential; 370122a21d1SFengguang Wu 371122a21d1SFengguang Wu max = ra->ra_pages; 372122a21d1SFengguang Wu sequential = (offset - ra->prev_index <= 1UL) || (req_size > max); 373122a21d1SFengguang Wu 374122a21d1SFengguang Wu /* 375122a21d1SFengguang Wu * Lookahead/readahead hit, assume sequential access. 376122a21d1SFengguang Wu * Ramp up sizes, and push forward the readahead window. 377122a21d1SFengguang Wu */ 378122a21d1SFengguang Wu if (offset && (offset == ra->lookahead_index || 379122a21d1SFengguang Wu offset == ra->readahead_index)) { 380122a21d1SFengguang Wu ra_index = ra->readahead_index; 381*c743d96bSFengguang Wu ra_size = get_next_ra_size(ra, max); 382122a21d1SFengguang Wu la_size = ra_size; 383122a21d1SFengguang Wu goto fill_ra; 384122a21d1SFengguang Wu } 385122a21d1SFengguang Wu 386122a21d1SFengguang Wu /* 387122a21d1SFengguang Wu * Standalone, small read. 388122a21d1SFengguang Wu * Read as is, and do not pollute the readahead state. 389122a21d1SFengguang Wu */ 390122a21d1SFengguang Wu if (!page && !sequential) { 391122a21d1SFengguang Wu return __do_page_cache_readahead(mapping, filp, 392122a21d1SFengguang Wu offset, req_size, 0); 393122a21d1SFengguang Wu } 394122a21d1SFengguang Wu 395122a21d1SFengguang Wu /* 396122a21d1SFengguang Wu * It may be one of 397122a21d1SFengguang Wu * - first read on start of file 398122a21d1SFengguang Wu * - sequential cache miss 399122a21d1SFengguang Wu * - oversize random read 400122a21d1SFengguang Wu * Start readahead for it. 401122a21d1SFengguang Wu */ 402122a21d1SFengguang Wu ra_index = offset; 403122a21d1SFengguang Wu ra_size = get_init_ra_size(req_size, max); 404122a21d1SFengguang Wu la_size = ra_size > req_size ? ra_size - req_size : ra_size; 405122a21d1SFengguang Wu 406122a21d1SFengguang Wu /* 407122a21d1SFengguang Wu * Hit on a lookahead page without valid readahead state. 408122a21d1SFengguang Wu * E.g. interleaved reads. 409122a21d1SFengguang Wu * Not knowing its readahead pos/size, bet on the minimal possible one. 410122a21d1SFengguang Wu */ 411122a21d1SFengguang Wu if (page) { 412122a21d1SFengguang Wu ra_index++; 413122a21d1SFengguang Wu ra_size = min(4 * ra_size, max); 414122a21d1SFengguang Wu } 415122a21d1SFengguang Wu 416122a21d1SFengguang Wu fill_ra: 417122a21d1SFengguang Wu ra_set_index(ra, offset, ra_index); 418122a21d1SFengguang Wu ra_set_size(ra, ra_size, la_size); 419122a21d1SFengguang Wu 420122a21d1SFengguang Wu return ra_submit(ra, mapping, filp); 421122a21d1SFengguang Wu } 422122a21d1SFengguang Wu 423122a21d1SFengguang Wu /** 424122a21d1SFengguang Wu * page_cache_readahead_ondemand - generic file readahead 425122a21d1SFengguang Wu * @mapping: address_space which holds the pagecache and I/O vectors 426122a21d1SFengguang Wu * @ra: file_ra_state which holds the readahead state 427122a21d1SFengguang Wu * @filp: passed on to ->readpage() and ->readpages() 428122a21d1SFengguang Wu * @page: the page at @offset, or NULL if non-present 429122a21d1SFengguang Wu * @offset: start offset into @mapping, in PAGE_CACHE_SIZE units 430122a21d1SFengguang Wu * @req_size: hint: total size of the read which the caller is performing in 431122a21d1SFengguang Wu * PAGE_CACHE_SIZE units 432122a21d1SFengguang Wu * 433122a21d1SFengguang Wu * page_cache_readahead_ondemand() is the entry point of readahead logic. 434122a21d1SFengguang Wu * This function should be called when it is time to perform readahead: 435122a21d1SFengguang Wu * 1) @page == NULL 436122a21d1SFengguang Wu * A cache miss happened, time for synchronous readahead. 437122a21d1SFengguang Wu * 2) @page != NULL && PageReadahead(@page) 438122a21d1SFengguang Wu * A look-ahead hit occured, time for asynchronous readahead. 439122a21d1SFengguang Wu */ 440122a21d1SFengguang Wu unsigned long 441122a21d1SFengguang Wu page_cache_readahead_ondemand(struct address_space *mapping, 442122a21d1SFengguang Wu struct file_ra_state *ra, struct file *filp, 443122a21d1SFengguang Wu struct page *page, pgoff_t offset, 444122a21d1SFengguang Wu unsigned long req_size) 445122a21d1SFengguang Wu { 446122a21d1SFengguang Wu /* no read-ahead */ 447122a21d1SFengguang Wu if (!ra->ra_pages) 448122a21d1SFengguang Wu return 0; 449122a21d1SFengguang Wu 450122a21d1SFengguang Wu if (page) { 451122a21d1SFengguang Wu ClearPageReadahead(page); 452122a21d1SFengguang Wu 453122a21d1SFengguang Wu /* 454122a21d1SFengguang Wu * Defer asynchronous read-ahead on IO congestion. 455122a21d1SFengguang Wu */ 456122a21d1SFengguang Wu if (bdi_read_congested(mapping->backing_dev_info)) 457122a21d1SFengguang Wu return 0; 458122a21d1SFengguang Wu } 459122a21d1SFengguang Wu 460122a21d1SFengguang Wu /* do read-ahead */ 461122a21d1SFengguang Wu return ondemand_readahead(mapping, ra, filp, page, 462122a21d1SFengguang Wu offset, req_size); 463122a21d1SFengguang Wu } 464122a21d1SFengguang Wu EXPORT_SYMBOL_GPL(page_cache_readahead_ondemand); 465