11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * mm/readahead.c - address_space-level file readahead. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds 51da177e4SLinus Torvalds * 6e1f8e874SFrancois Cami * 09Apr2002 Andrew Morton 71da177e4SLinus Torvalds * Initial version. 81da177e4SLinus Torvalds */ 91da177e4SLinus Torvalds 101da177e4SLinus Torvalds #include <linux/kernel.h> 111da177e4SLinus Torvalds #include <linux/fs.h> 121da177e4SLinus Torvalds #include <linux/mm.h> 131da177e4SLinus Torvalds #include <linux/module.h> 141da177e4SLinus Torvalds #include <linux/blkdev.h> 151da177e4SLinus Torvalds #include <linux/backing-dev.h> 168bde37f0SAndrew Morton #include <linux/task_io_accounting_ops.h> 171da177e4SLinus Torvalds #include <linux/pagevec.h> 18f5ff8422SJens Axboe #include <linux/pagemap.h> 191da177e4SLinus Torvalds 201da177e4SLinus Torvalds /* 211da177e4SLinus Torvalds * Initialise a struct file's readahead state. Assumes that the caller has 221da177e4SLinus Torvalds * memset *ra to zero. 231da177e4SLinus Torvalds */ 241da177e4SLinus Torvalds void 251da177e4SLinus Torvalds file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) 261da177e4SLinus Torvalds { 271da177e4SLinus Torvalds ra->ra_pages = mapping->backing_dev_info->ra_pages; 28f4e6b498SFengguang Wu ra->prev_pos = -1; 291da177e4SLinus Torvalds } 30d41cc702SSteven Whitehouse EXPORT_SYMBOL_GPL(file_ra_state_init); 311da177e4SLinus Torvalds 321da177e4SLinus Torvalds #define list_to_page(head) (list_entry((head)->prev, struct page, lru)) 331da177e4SLinus Torvalds 3403fb3d2aSDavid Howells /* 3503fb3d2aSDavid Howells * see if a page needs releasing upon read_cache_pages() failure 36266cf658SDavid Howells * - the caller of read_cache_pages() may have set PG_private or PG_fscache 37266cf658SDavid Howells * before calling, such as the NFS fs marking pages that are cached locally 38266cf658SDavid Howells * on disk, thus we need to give the fs a chance to clean up in the event of 39266cf658SDavid Howells * an error 4003fb3d2aSDavid Howells */ 4103fb3d2aSDavid Howells static void read_cache_pages_invalidate_page(struct address_space *mapping, 4203fb3d2aSDavid Howells struct page *page) 4303fb3d2aSDavid Howells { 44266cf658SDavid Howells if (page_has_private(page)) { 4503fb3d2aSDavid Howells if (!trylock_page(page)) 4603fb3d2aSDavid Howells BUG(); 4703fb3d2aSDavid Howells page->mapping = mapping; 4803fb3d2aSDavid Howells do_invalidatepage(page, 0); 4903fb3d2aSDavid Howells page->mapping = NULL; 5003fb3d2aSDavid Howells unlock_page(page); 5103fb3d2aSDavid Howells } 5203fb3d2aSDavid Howells page_cache_release(page); 5303fb3d2aSDavid Howells } 5403fb3d2aSDavid Howells 5503fb3d2aSDavid Howells /* 5603fb3d2aSDavid Howells * release a list of pages, invalidating them first if need be 5703fb3d2aSDavid Howells */ 5803fb3d2aSDavid Howells static void read_cache_pages_invalidate_pages(struct address_space *mapping, 5903fb3d2aSDavid Howells struct list_head *pages) 6003fb3d2aSDavid Howells { 6103fb3d2aSDavid Howells struct page *victim; 6203fb3d2aSDavid Howells 6303fb3d2aSDavid Howells while (!list_empty(pages)) { 6403fb3d2aSDavid Howells victim = list_to_page(pages); 6503fb3d2aSDavid Howells list_del(&victim->lru); 6603fb3d2aSDavid Howells read_cache_pages_invalidate_page(mapping, victim); 6703fb3d2aSDavid Howells } 6803fb3d2aSDavid Howells } 6903fb3d2aSDavid Howells 701da177e4SLinus Torvalds /** 71bd40cddaSRandy Dunlap * read_cache_pages - populate an address space with some pages & start reads against them 721da177e4SLinus Torvalds * @mapping: the address_space 731da177e4SLinus Torvalds * @pages: The address of a list_head which contains the target pages. These 741da177e4SLinus Torvalds * pages have their ->index populated and are otherwise uninitialised. 751da177e4SLinus Torvalds * @filler: callback routine for filling a single page. 761da177e4SLinus Torvalds * @data: private data for the callback routine. 771da177e4SLinus Torvalds * 781da177e4SLinus Torvalds * Hides the details of the LRU cache etc from the filesystems. 791da177e4SLinus Torvalds */ 801da177e4SLinus Torvalds int read_cache_pages(struct address_space *mapping, struct list_head *pages, 811da177e4SLinus Torvalds int (*filler)(void *, struct page *), void *data) 821da177e4SLinus Torvalds { 831da177e4SLinus Torvalds struct page *page; 841da177e4SLinus Torvalds int ret = 0; 851da177e4SLinus Torvalds 861da177e4SLinus Torvalds while (!list_empty(pages)) { 871da177e4SLinus Torvalds page = list_to_page(pages); 881da177e4SLinus Torvalds list_del(&page->lru); 89eb2be189SNick Piggin if (add_to_page_cache_lru(page, mapping, 90eb2be189SNick Piggin page->index, GFP_KERNEL)) { 9103fb3d2aSDavid Howells read_cache_pages_invalidate_page(mapping, page); 921da177e4SLinus Torvalds continue; 931da177e4SLinus Torvalds } 94eb2be189SNick Piggin page_cache_release(page); 95eb2be189SNick Piggin 961da177e4SLinus Torvalds ret = filler(data, page); 97eb2be189SNick Piggin if (unlikely(ret)) { 9803fb3d2aSDavid Howells read_cache_pages_invalidate_pages(mapping, pages); 991da177e4SLinus Torvalds break; 1001da177e4SLinus Torvalds } 1018bde37f0SAndrew Morton task_io_account_read(PAGE_CACHE_SIZE); 1021da177e4SLinus Torvalds } 1031da177e4SLinus Torvalds return ret; 1041da177e4SLinus Torvalds } 1051da177e4SLinus Torvalds 1061da177e4SLinus Torvalds EXPORT_SYMBOL(read_cache_pages); 1071da177e4SLinus Torvalds 1081da177e4SLinus Torvalds static int read_pages(struct address_space *mapping, struct file *filp, 1091da177e4SLinus Torvalds struct list_head *pages, unsigned nr_pages) 1101da177e4SLinus Torvalds { 1111da177e4SLinus Torvalds unsigned page_idx; 112994fc28cSZach Brown int ret; 1131da177e4SLinus Torvalds 1141da177e4SLinus Torvalds if (mapping->a_ops->readpages) { 1151da177e4SLinus Torvalds ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); 116029e332eSOGAWA Hirofumi /* Clean up the remaining pages */ 117029e332eSOGAWA Hirofumi put_pages_list(pages); 1181da177e4SLinus Torvalds goto out; 1191da177e4SLinus Torvalds } 1201da177e4SLinus Torvalds 1211da177e4SLinus Torvalds for (page_idx = 0; page_idx < nr_pages; page_idx++) { 1221da177e4SLinus Torvalds struct page *page = list_to_page(pages); 1231da177e4SLinus Torvalds list_del(&page->lru); 124eb2be189SNick Piggin if (!add_to_page_cache_lru(page, mapping, 1251da177e4SLinus Torvalds page->index, GFP_KERNEL)) { 1269f1a3cfcSZach Brown mapping->a_ops->readpage(filp, page); 127eb2be189SNick Piggin } 1281da177e4SLinus Torvalds page_cache_release(page); 1291da177e4SLinus Torvalds } 130994fc28cSZach Brown ret = 0; 1311da177e4SLinus Torvalds out: 1321da177e4SLinus Torvalds return ret; 1331da177e4SLinus Torvalds } 1341da177e4SLinus Torvalds 1351da177e4SLinus Torvalds /* 1361da177e4SLinus Torvalds * do_page_cache_readahead actually reads a chunk of disk. It allocates all 1371da177e4SLinus Torvalds * the pages first, then submits them all for I/O. This avoids the very bad 1381da177e4SLinus Torvalds * behaviour which would occur if page allocations are causing VM writeback. 1391da177e4SLinus Torvalds * We really don't want to intermingle reads and writes like that. 1401da177e4SLinus Torvalds * 1411da177e4SLinus Torvalds * Returns the number of pages requested, or the maximum amount of I/O allowed. 1421da177e4SLinus Torvalds * 1431da177e4SLinus Torvalds * do_page_cache_readahead() returns -1 if it encountered request queue 1441da177e4SLinus Torvalds * congestion. 1451da177e4SLinus Torvalds */ 1461da177e4SLinus Torvalds static int 1471da177e4SLinus Torvalds __do_page_cache_readahead(struct address_space *mapping, struct file *filp, 14846fc3e7bSFengguang Wu pgoff_t offset, unsigned long nr_to_read, 14946fc3e7bSFengguang Wu unsigned long lookahead_size) 1501da177e4SLinus Torvalds { 1511da177e4SLinus Torvalds struct inode *inode = mapping->host; 1521da177e4SLinus Torvalds struct page *page; 1531da177e4SLinus Torvalds unsigned long end_index; /* The last page we want to read */ 1541da177e4SLinus Torvalds LIST_HEAD(page_pool); 1551da177e4SLinus Torvalds int page_idx; 1561da177e4SLinus Torvalds int ret = 0; 1571da177e4SLinus Torvalds loff_t isize = i_size_read(inode); 1581da177e4SLinus Torvalds 1591da177e4SLinus Torvalds if (isize == 0) 1601da177e4SLinus Torvalds goto out; 1611da177e4SLinus Torvalds 1621da177e4SLinus Torvalds end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); 1631da177e4SLinus Torvalds 1641da177e4SLinus Torvalds /* 1651da177e4SLinus Torvalds * Preallocate as many pages as we will need. 1661da177e4SLinus Torvalds */ 1671da177e4SLinus Torvalds for (page_idx = 0; page_idx < nr_to_read; page_idx++) { 1687361f4d8SAndrew Morton pgoff_t page_offset = offset + page_idx; 1691da177e4SLinus Torvalds 1701da177e4SLinus Torvalds if (page_offset > end_index) 1711da177e4SLinus Torvalds break; 1721da177e4SLinus Torvalds 17300128188SNick Piggin rcu_read_lock(); 1741da177e4SLinus Torvalds page = radix_tree_lookup(&mapping->page_tree, page_offset); 17500128188SNick Piggin rcu_read_unlock(); 1761da177e4SLinus Torvalds if (page) 1771da177e4SLinus Torvalds continue; 1781da177e4SLinus Torvalds 1791da177e4SLinus Torvalds page = page_cache_alloc_cold(mapping); 1801da177e4SLinus Torvalds if (!page) 1811da177e4SLinus Torvalds break; 1821da177e4SLinus Torvalds page->index = page_offset; 1831da177e4SLinus Torvalds list_add(&page->lru, &page_pool); 18446fc3e7bSFengguang Wu if (page_idx == nr_to_read - lookahead_size) 18546fc3e7bSFengguang Wu SetPageReadahead(page); 1861da177e4SLinus Torvalds ret++; 1871da177e4SLinus Torvalds } 1881da177e4SLinus Torvalds 1891da177e4SLinus Torvalds /* 1901da177e4SLinus Torvalds * Now start the IO. We ignore I/O errors - if the page is not 1911da177e4SLinus Torvalds * uptodate then the caller will launch readpage again, and 1921da177e4SLinus Torvalds * will then handle the error. 1931da177e4SLinus Torvalds */ 1941da177e4SLinus Torvalds if (ret) 1951da177e4SLinus Torvalds read_pages(mapping, filp, &page_pool, ret); 1961da177e4SLinus Torvalds BUG_ON(!list_empty(&page_pool)); 1971da177e4SLinus Torvalds out: 1981da177e4SLinus Torvalds return ret; 1991da177e4SLinus Torvalds } 2001da177e4SLinus Torvalds 2011da177e4SLinus Torvalds /* 2021da177e4SLinus Torvalds * Chunk the readahead into 2 megabyte units, so that we don't pin too much 2031da177e4SLinus Torvalds * memory at once. 2041da177e4SLinus Torvalds */ 2051da177e4SLinus Torvalds int force_page_cache_readahead(struct address_space *mapping, struct file *filp, 2067361f4d8SAndrew Morton pgoff_t offset, unsigned long nr_to_read) 2071da177e4SLinus Torvalds { 2081da177e4SLinus Torvalds int ret = 0; 2091da177e4SLinus Torvalds 2101da177e4SLinus Torvalds if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages)) 2111da177e4SLinus Torvalds return -EINVAL; 2121da177e4SLinus Torvalds 213f7e839ddSWu Fengguang nr_to_read = max_sane_readahead(nr_to_read); 2141da177e4SLinus Torvalds while (nr_to_read) { 2151da177e4SLinus Torvalds int err; 2161da177e4SLinus Torvalds 2171da177e4SLinus Torvalds unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE; 2181da177e4SLinus Torvalds 2191da177e4SLinus Torvalds if (this_chunk > nr_to_read) 2201da177e4SLinus Torvalds this_chunk = nr_to_read; 2211da177e4SLinus Torvalds err = __do_page_cache_readahead(mapping, filp, 22246fc3e7bSFengguang Wu offset, this_chunk, 0); 2231da177e4SLinus Torvalds if (err < 0) { 2241da177e4SLinus Torvalds ret = err; 2251da177e4SLinus Torvalds break; 2261da177e4SLinus Torvalds } 2271da177e4SLinus Torvalds ret += err; 2281da177e4SLinus Torvalds offset += this_chunk; 2291da177e4SLinus Torvalds nr_to_read -= this_chunk; 2301da177e4SLinus Torvalds } 2311da177e4SLinus Torvalds return ret; 2321da177e4SLinus Torvalds } 2331da177e4SLinus Torvalds 2341da177e4SLinus Torvalds /* 2351da177e4SLinus Torvalds * This version skips the IO if the queue is read-congested, and will tell the 2361da177e4SLinus Torvalds * block layer to abandon the readahead if request allocation would block. 2371da177e4SLinus Torvalds * 2381da177e4SLinus Torvalds * force_page_cache_readahead() will ignore queue congestion and will block on 2391da177e4SLinus Torvalds * request queues. 2401da177e4SLinus Torvalds */ 2411da177e4SLinus Torvalds int do_page_cache_readahead(struct address_space *mapping, struct file *filp, 2427361f4d8SAndrew Morton pgoff_t offset, unsigned long nr_to_read) 2431da177e4SLinus Torvalds { 2441da177e4SLinus Torvalds if (bdi_read_congested(mapping->backing_dev_info)) 2451da177e4SLinus Torvalds return -1; 2461da177e4SLinus Torvalds 24746fc3e7bSFengguang Wu return __do_page_cache_readahead(mapping, filp, offset, nr_to_read, 0); 2481da177e4SLinus Torvalds } 2491da177e4SLinus Torvalds 2501da177e4SLinus Torvalds /* 2511da177e4SLinus Torvalds * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a 2521da177e4SLinus Torvalds * sensible upper limit. 2531da177e4SLinus Torvalds */ 2541da177e4SLinus Torvalds unsigned long max_sane_readahead(unsigned long nr) 2551da177e4SLinus Torvalds { 2564f98a2feSRik van Riel return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE_FILE) 25705a0416bSChristoph Lameter + node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2); 2581da177e4SLinus Torvalds } 2595ce1110bSFengguang Wu 2605ce1110bSFengguang Wu /* 2615ce1110bSFengguang Wu * Submit IO for the read-ahead request in file_ra_state. 2625ce1110bSFengguang Wu */ 263f9acc8c7SFengguang Wu static unsigned long ra_submit(struct file_ra_state *ra, 2645ce1110bSFengguang Wu struct address_space *mapping, struct file *filp) 2655ce1110bSFengguang Wu { 2665ce1110bSFengguang Wu int actual; 2675ce1110bSFengguang Wu 2685ce1110bSFengguang Wu actual = __do_page_cache_readahead(mapping, filp, 269f9acc8c7SFengguang Wu ra->start, ra->size, ra->async_size); 2705ce1110bSFengguang Wu 2715ce1110bSFengguang Wu return actual; 2725ce1110bSFengguang Wu } 273122a21d1SFengguang Wu 274122a21d1SFengguang Wu /* 275c743d96bSFengguang Wu * Set the initial window size, round to next power of 2 and square 276c743d96bSFengguang Wu * for small size, x 4 for medium, and x 2 for large 277c743d96bSFengguang Wu * for 128k (32 page) max ra 278c743d96bSFengguang Wu * 1-8 page = 32k initial, > 8 page = 128k initial 279c743d96bSFengguang Wu */ 280c743d96bSFengguang Wu static unsigned long get_init_ra_size(unsigned long size, unsigned long max) 281c743d96bSFengguang Wu { 282c743d96bSFengguang Wu unsigned long newsize = roundup_pow_of_two(size); 283c743d96bSFengguang Wu 284c743d96bSFengguang Wu if (newsize <= max / 32) 285c743d96bSFengguang Wu newsize = newsize * 4; 286c743d96bSFengguang Wu else if (newsize <= max / 4) 287c743d96bSFengguang Wu newsize = newsize * 2; 288c743d96bSFengguang Wu else 289c743d96bSFengguang Wu newsize = max; 290c743d96bSFengguang Wu 291c743d96bSFengguang Wu return newsize; 292c743d96bSFengguang Wu } 293c743d96bSFengguang Wu 294c743d96bSFengguang Wu /* 295122a21d1SFengguang Wu * Get the previous window size, ramp it up, and 296122a21d1SFengguang Wu * return it as the new window size. 297122a21d1SFengguang Wu */ 298c743d96bSFengguang Wu static unsigned long get_next_ra_size(struct file_ra_state *ra, 299122a21d1SFengguang Wu unsigned long max) 300122a21d1SFengguang Wu { 301f9acc8c7SFengguang Wu unsigned long cur = ra->size; 302122a21d1SFengguang Wu unsigned long newsize; 303122a21d1SFengguang Wu 304122a21d1SFengguang Wu if (cur < max / 16) 305c743d96bSFengguang Wu newsize = 4 * cur; 306122a21d1SFengguang Wu else 307c743d96bSFengguang Wu newsize = 2 * cur; 308122a21d1SFengguang Wu 309122a21d1SFengguang Wu return min(newsize, max); 310122a21d1SFengguang Wu } 311122a21d1SFengguang Wu 312122a21d1SFengguang Wu /* 313122a21d1SFengguang Wu * On-demand readahead design. 314122a21d1SFengguang Wu * 315122a21d1SFengguang Wu * The fields in struct file_ra_state represent the most-recently-executed 316122a21d1SFengguang Wu * readahead attempt: 317122a21d1SFengguang Wu * 318f9acc8c7SFengguang Wu * |<----- async_size ---------| 319f9acc8c7SFengguang Wu * |------------------- size -------------------->| 320f9acc8c7SFengguang Wu * |==================#===========================| 321f9acc8c7SFengguang Wu * ^start ^page marked with PG_readahead 322122a21d1SFengguang Wu * 323122a21d1SFengguang Wu * To overlap application thinking time and disk I/O time, we do 324122a21d1SFengguang Wu * `readahead pipelining': Do not wait until the application consumed all 325122a21d1SFengguang Wu * readahead pages and stalled on the missing page at readahead_index; 326f9acc8c7SFengguang Wu * Instead, submit an asynchronous readahead I/O as soon as there are 327f9acc8c7SFengguang Wu * only async_size pages left in the readahead window. Normally async_size 328f9acc8c7SFengguang Wu * will be equal to size, for maximum pipelining. 329122a21d1SFengguang Wu * 330122a21d1SFengguang Wu * In interleaved sequential reads, concurrent streams on the same fd can 331122a21d1SFengguang Wu * be invalidating each other's readahead state. So we flag the new readahead 332f9acc8c7SFengguang Wu * page at (start+size-async_size) with PG_readahead, and use it as readahead 333122a21d1SFengguang Wu * indicator. The flag won't be set on already cached pages, to avoid the 334122a21d1SFengguang Wu * readahead-for-nothing fuss, saving pointless page cache lookups. 335122a21d1SFengguang Wu * 336f4e6b498SFengguang Wu * prev_pos tracks the last visited byte in the _previous_ read request. 337122a21d1SFengguang Wu * It should be maintained by the caller, and will be used for detecting 338122a21d1SFengguang Wu * small random reads. Note that the readahead algorithm checks loosely 339122a21d1SFengguang Wu * for sequential patterns. Hence interleaved reads might be served as 340122a21d1SFengguang Wu * sequential ones. 341122a21d1SFengguang Wu * 342122a21d1SFengguang Wu * There is a special-case: if the first page which the application tries to 343122a21d1SFengguang Wu * read happens to be the first page of the file, it is assumed that a linear 344122a21d1SFengguang Wu * read is about to happen and the window is immediately set to the initial size 345122a21d1SFengguang Wu * based on I/O request size and the max_readahead. 346122a21d1SFengguang Wu * 347122a21d1SFengguang Wu * The code ramps up the readahead size aggressively at first, but slow down as 348122a21d1SFengguang Wu * it approaches max_readhead. 349122a21d1SFengguang Wu */ 350122a21d1SFengguang Wu 351122a21d1SFengguang Wu /* 352122a21d1SFengguang Wu * A minimal readahead algorithm for trivial sequential/random reads. 353122a21d1SFengguang Wu */ 354122a21d1SFengguang Wu static unsigned long 355122a21d1SFengguang Wu ondemand_readahead(struct address_space *mapping, 356122a21d1SFengguang Wu struct file_ra_state *ra, struct file *filp, 357cf914a7dSRusty Russell bool hit_readahead_marker, pgoff_t offset, 358122a21d1SFengguang Wu unsigned long req_size) 359122a21d1SFengguang Wu { 360fc31d16aSWu Fengguang unsigned long max = max_sane_readahead(ra->ra_pages); 361f4e6b498SFengguang Wu pgoff_t prev_offset; 362122a21d1SFengguang Wu int sequential; 363122a21d1SFengguang Wu 364122a21d1SFengguang Wu /* 365f9acc8c7SFengguang Wu * It's the expected callback offset, assume sequential access. 366122a21d1SFengguang Wu * Ramp up sizes, and push forward the readahead window. 367122a21d1SFengguang Wu */ 368f9acc8c7SFengguang Wu if (offset && (offset == (ra->start + ra->size - ra->async_size) || 369f9acc8c7SFengguang Wu offset == (ra->start + ra->size))) { 370f9acc8c7SFengguang Wu ra->start += ra->size; 371f9acc8c7SFengguang Wu ra->size = get_next_ra_size(ra, max); 372f9acc8c7SFengguang Wu ra->async_size = ra->size; 373f9acc8c7SFengguang Wu goto readit; 374122a21d1SFengguang Wu } 375122a21d1SFengguang Wu 376f4e6b498SFengguang Wu prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT; 377f4e6b498SFengguang Wu sequential = offset - prev_offset <= 1UL || req_size > max; 378f4e6b498SFengguang Wu 379122a21d1SFengguang Wu /* 380122a21d1SFengguang Wu * Standalone, small read. 381122a21d1SFengguang Wu * Read as is, and do not pollute the readahead state. 382122a21d1SFengguang Wu */ 383cf914a7dSRusty Russell if (!hit_readahead_marker && !sequential) { 384122a21d1SFengguang Wu return __do_page_cache_readahead(mapping, filp, 385122a21d1SFengguang Wu offset, req_size, 0); 386122a21d1SFengguang Wu } 387122a21d1SFengguang Wu 388122a21d1SFengguang Wu /* 3896b10c6c9SFengguang Wu * Hit a marked page without valid readahead state. 3906b10c6c9SFengguang Wu * E.g. interleaved reads. 3916b10c6c9SFengguang Wu * Query the pagecache for async_size, which normally equals to 3926b10c6c9SFengguang Wu * readahead size. Ramp it up and use it as the new readahead size. 3936b10c6c9SFengguang Wu */ 3946b10c6c9SFengguang Wu if (hit_readahead_marker) { 3956b10c6c9SFengguang Wu pgoff_t start; 3966b10c6c9SFengguang Wu 39730002ed2SNick Piggin rcu_read_lock(); 398caca7cb7SWu Fengguang start = radix_tree_next_hole(&mapping->page_tree, offset+1,max); 39930002ed2SNick Piggin rcu_read_unlock(); 4006b10c6c9SFengguang Wu 4016b10c6c9SFengguang Wu if (!start || start - offset > max) 4026b10c6c9SFengguang Wu return 0; 4036b10c6c9SFengguang Wu 4046b10c6c9SFengguang Wu ra->start = start; 4056b10c6c9SFengguang Wu ra->size = start - offset; /* old async_size */ 406*160334a0SWu Fengguang ra->size += req_size; 4076b10c6c9SFengguang Wu ra->size = get_next_ra_size(ra, max); 4086b10c6c9SFengguang Wu ra->async_size = ra->size; 4096b10c6c9SFengguang Wu goto readit; 4106b10c6c9SFengguang Wu } 4116b10c6c9SFengguang Wu 4126b10c6c9SFengguang Wu /* 413122a21d1SFengguang Wu * It may be one of 414122a21d1SFengguang Wu * - first read on start of file 415122a21d1SFengguang Wu * - sequential cache miss 416122a21d1SFengguang Wu * - oversize random read 417122a21d1SFengguang Wu * Start readahead for it. 418122a21d1SFengguang Wu */ 419f9acc8c7SFengguang Wu ra->start = offset; 420f9acc8c7SFengguang Wu ra->size = get_init_ra_size(req_size, max); 421f9acc8c7SFengguang Wu ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size; 422122a21d1SFengguang Wu 423f9acc8c7SFengguang Wu readit: 424122a21d1SFengguang Wu return ra_submit(ra, mapping, filp); 425122a21d1SFengguang Wu } 426122a21d1SFengguang Wu 427122a21d1SFengguang Wu /** 428cf914a7dSRusty Russell * page_cache_sync_readahead - generic file readahead 429122a21d1SFengguang Wu * @mapping: address_space which holds the pagecache and I/O vectors 430122a21d1SFengguang Wu * @ra: file_ra_state which holds the readahead state 431122a21d1SFengguang Wu * @filp: passed on to ->readpage() and ->readpages() 432cf914a7dSRusty Russell * @offset: start offset into @mapping, in pagecache page-sized units 433122a21d1SFengguang Wu * @req_size: hint: total size of the read which the caller is performing in 434cf914a7dSRusty Russell * pagecache pages 435122a21d1SFengguang Wu * 436cf914a7dSRusty Russell * page_cache_sync_readahead() should be called when a cache miss happened: 437cf914a7dSRusty Russell * it will submit the read. The readahead logic may decide to piggyback more 438cf914a7dSRusty Russell * pages onto the read request if access patterns suggest it will improve 439cf914a7dSRusty Russell * performance. 440122a21d1SFengguang Wu */ 441cf914a7dSRusty Russell void page_cache_sync_readahead(struct address_space *mapping, 442cf914a7dSRusty Russell struct file_ra_state *ra, struct file *filp, 443cf914a7dSRusty Russell pgoff_t offset, unsigned long req_size) 444cf914a7dSRusty Russell { 445cf914a7dSRusty Russell /* no read-ahead */ 446cf914a7dSRusty Russell if (!ra->ra_pages) 447cf914a7dSRusty Russell return; 448cf914a7dSRusty Russell 449cf914a7dSRusty Russell /* do read-ahead */ 450cf914a7dSRusty Russell ondemand_readahead(mapping, ra, filp, false, offset, req_size); 451cf914a7dSRusty Russell } 452cf914a7dSRusty Russell EXPORT_SYMBOL_GPL(page_cache_sync_readahead); 453cf914a7dSRusty Russell 454cf914a7dSRusty Russell /** 455cf914a7dSRusty Russell * page_cache_async_readahead - file readahead for marked pages 456cf914a7dSRusty Russell * @mapping: address_space which holds the pagecache and I/O vectors 457cf914a7dSRusty Russell * @ra: file_ra_state which holds the readahead state 458cf914a7dSRusty Russell * @filp: passed on to ->readpage() and ->readpages() 459cf914a7dSRusty Russell * @page: the page at @offset which has the PG_readahead flag set 460cf914a7dSRusty Russell * @offset: start offset into @mapping, in pagecache page-sized units 461cf914a7dSRusty Russell * @req_size: hint: total size of the read which the caller is performing in 462cf914a7dSRusty Russell * pagecache pages 463cf914a7dSRusty Russell * 464cf914a7dSRusty Russell * page_cache_async_ondemand() should be called when a page is used which 465f7850d93SRandy Dunlap * has the PG_readahead flag; this is a marker to suggest that the application 466cf914a7dSRusty Russell * has used up enough of the readahead window that we should start pulling in 467f7850d93SRandy Dunlap * more pages. 468f7850d93SRandy Dunlap */ 469cf914a7dSRusty Russell void 470cf914a7dSRusty Russell page_cache_async_readahead(struct address_space *mapping, 471122a21d1SFengguang Wu struct file_ra_state *ra, struct file *filp, 472122a21d1SFengguang Wu struct page *page, pgoff_t offset, 473122a21d1SFengguang Wu unsigned long req_size) 474122a21d1SFengguang Wu { 475122a21d1SFengguang Wu /* no read-ahead */ 476122a21d1SFengguang Wu if (!ra->ra_pages) 477cf914a7dSRusty Russell return; 478122a21d1SFengguang Wu 479fe3cba17SFengguang Wu /* 480cf914a7dSRusty Russell * Same bit is used for PG_readahead and PG_reclaim. 481fe3cba17SFengguang Wu */ 482fe3cba17SFengguang Wu if (PageWriteback(page)) 483cf914a7dSRusty Russell return; 484fe3cba17SFengguang Wu 485122a21d1SFengguang Wu ClearPageReadahead(page); 486122a21d1SFengguang Wu 487122a21d1SFengguang Wu /* 488122a21d1SFengguang Wu * Defer asynchronous read-ahead on IO congestion. 489122a21d1SFengguang Wu */ 490122a21d1SFengguang Wu if (bdi_read_congested(mapping->backing_dev_info)) 491cf914a7dSRusty Russell return; 492122a21d1SFengguang Wu 493122a21d1SFengguang Wu /* do read-ahead */ 494cf914a7dSRusty Russell ondemand_readahead(mapping, ra, filp, true, offset, req_size); 495122a21d1SFengguang Wu } 496cf914a7dSRusty Russell EXPORT_SYMBOL_GPL(page_cache_async_readahead); 497