11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * mm/readahead.c - address_space-level file readahead. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2002, Linus Torvalds 51da177e4SLinus Torvalds * 6e1f8e874SFrancois Cami * 09Apr2002 Andrew Morton 71da177e4SLinus Torvalds * Initial version. 81da177e4SLinus Torvalds */ 91da177e4SLinus Torvalds 101da177e4SLinus Torvalds #include <linux/kernel.h> 1111bd969fSRoss Zwisler #include <linux/dax.h> 125a0e3ad6STejun Heo #include <linux/gfp.h> 13b95f1b31SPaul Gortmaker #include <linux/export.h> 141da177e4SLinus Torvalds #include <linux/blkdev.h> 151da177e4SLinus Torvalds #include <linux/backing-dev.h> 168bde37f0SAndrew Morton #include <linux/task_io_accounting_ops.h> 171da177e4SLinus Torvalds #include <linux/pagevec.h> 18f5ff8422SJens Axboe #include <linux/pagemap.h> 19782182e5SCong Wang #include <linux/syscalls.h> 20782182e5SCong Wang #include <linux/file.h> 21d72ee911SGeliang Tang #include <linux/mm_inline.h> 221da177e4SLinus Torvalds 2329f175d1SFabian Frederick #include "internal.h" 2429f175d1SFabian Frederick 251da177e4SLinus Torvalds /* 261da177e4SLinus Torvalds * Initialise a struct file's readahead state. Assumes that the caller has 271da177e4SLinus Torvalds * memset *ra to zero. 281da177e4SLinus Torvalds */ 291da177e4SLinus Torvalds void 301da177e4SLinus Torvalds file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) 311da177e4SLinus Torvalds { 32de1414a6SChristoph Hellwig ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; 33f4e6b498SFengguang Wu ra->prev_pos = -1; 341da177e4SLinus Torvalds } 35d41cc702SSteven Whitehouse EXPORT_SYMBOL_GPL(file_ra_state_init); 361da177e4SLinus Torvalds 3703fb3d2aSDavid Howells /* 3803fb3d2aSDavid Howells * see if a page needs releasing upon read_cache_pages() failure 39266cf658SDavid Howells * - the caller of read_cache_pages() may have set PG_private or PG_fscache 40266cf658SDavid Howells * before calling, such as the NFS fs marking pages that are cached locally 41266cf658SDavid Howells * on disk, thus we need to give the fs a chance to clean up in the event of 42266cf658SDavid Howells * an error 4303fb3d2aSDavid Howells */ 4403fb3d2aSDavid Howells static void read_cache_pages_invalidate_page(struct address_space *mapping, 4503fb3d2aSDavid Howells struct page *page) 4603fb3d2aSDavid Howells { 47266cf658SDavid Howells if (page_has_private(page)) { 4803fb3d2aSDavid Howells if (!trylock_page(page)) 4903fb3d2aSDavid Howells BUG(); 5003fb3d2aSDavid Howells page->mapping = mapping; 5109cbfeafSKirill A. Shutemov do_invalidatepage(page, 0, PAGE_SIZE); 5203fb3d2aSDavid Howells page->mapping = NULL; 5303fb3d2aSDavid Howells unlock_page(page); 5403fb3d2aSDavid Howells } 5509cbfeafSKirill A. Shutemov put_page(page); 5603fb3d2aSDavid Howells } 5703fb3d2aSDavid Howells 5803fb3d2aSDavid Howells /* 5903fb3d2aSDavid Howells * release a list of pages, invalidating them first if need be 6003fb3d2aSDavid Howells */ 6103fb3d2aSDavid Howells static void read_cache_pages_invalidate_pages(struct address_space *mapping, 6203fb3d2aSDavid Howells struct list_head *pages) 6303fb3d2aSDavid Howells { 6403fb3d2aSDavid Howells struct page *victim; 6503fb3d2aSDavid Howells 6603fb3d2aSDavid Howells while (!list_empty(pages)) { 67c8ad6302SGeliang Tang victim = lru_to_page(pages); 6803fb3d2aSDavid Howells list_del(&victim->lru); 6903fb3d2aSDavid Howells read_cache_pages_invalidate_page(mapping, victim); 7003fb3d2aSDavid Howells } 7103fb3d2aSDavid Howells } 7203fb3d2aSDavid Howells 731da177e4SLinus Torvalds /** 74bd40cddaSRandy Dunlap * read_cache_pages - populate an address space with some pages & start reads against them 751da177e4SLinus Torvalds * @mapping: the address_space 761da177e4SLinus Torvalds * @pages: The address of a list_head which contains the target pages. These 771da177e4SLinus Torvalds * pages have their ->index populated and are otherwise uninitialised. 781da177e4SLinus Torvalds * @filler: callback routine for filling a single page. 791da177e4SLinus Torvalds * @data: private data for the callback routine. 801da177e4SLinus Torvalds * 811da177e4SLinus Torvalds * Hides the details of the LRU cache etc from the filesystems. 821da177e4SLinus Torvalds */ 831da177e4SLinus Torvalds int read_cache_pages(struct address_space *mapping, struct list_head *pages, 841da177e4SLinus Torvalds int (*filler)(void *, struct page *), void *data) 851da177e4SLinus Torvalds { 861da177e4SLinus Torvalds struct page *page; 871da177e4SLinus Torvalds int ret = 0; 881da177e4SLinus Torvalds 891da177e4SLinus Torvalds while (!list_empty(pages)) { 90c8ad6302SGeliang Tang page = lru_to_page(pages); 911da177e4SLinus Torvalds list_del(&page->lru); 92063d99b4SMichal Hocko if (add_to_page_cache_lru(page, mapping, page->index, 938a5c743eSMichal Hocko readahead_gfp_mask(mapping))) { 9403fb3d2aSDavid Howells read_cache_pages_invalidate_page(mapping, page); 951da177e4SLinus Torvalds continue; 961da177e4SLinus Torvalds } 9709cbfeafSKirill A. Shutemov put_page(page); 98eb2be189SNick Piggin 991da177e4SLinus Torvalds ret = filler(data, page); 100eb2be189SNick Piggin if (unlikely(ret)) { 10103fb3d2aSDavid Howells read_cache_pages_invalidate_pages(mapping, pages); 1021da177e4SLinus Torvalds break; 1031da177e4SLinus Torvalds } 10409cbfeafSKirill A. Shutemov task_io_account_read(PAGE_SIZE); 1051da177e4SLinus Torvalds } 1061da177e4SLinus Torvalds return ret; 1071da177e4SLinus Torvalds } 1081da177e4SLinus Torvalds 1091da177e4SLinus Torvalds EXPORT_SYMBOL(read_cache_pages); 1101da177e4SLinus Torvalds 1111da177e4SLinus Torvalds static int read_pages(struct address_space *mapping, struct file *filp, 1128a5c743eSMichal Hocko struct list_head *pages, unsigned int nr_pages, gfp_t gfp) 1131da177e4SLinus Torvalds { 1145b417b18SJens Axboe struct blk_plug plug; 1151da177e4SLinus Torvalds unsigned page_idx; 116994fc28cSZach Brown int ret; 1171da177e4SLinus Torvalds 1185b417b18SJens Axboe blk_start_plug(&plug); 1195b417b18SJens Axboe 1201da177e4SLinus Torvalds if (mapping->a_ops->readpages) { 1211da177e4SLinus Torvalds ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); 122029e332eSOGAWA Hirofumi /* Clean up the remaining pages */ 123029e332eSOGAWA Hirofumi put_pages_list(pages); 1241da177e4SLinus Torvalds goto out; 1251da177e4SLinus Torvalds } 1261da177e4SLinus Torvalds 1271da177e4SLinus Torvalds for (page_idx = 0; page_idx < nr_pages; page_idx++) { 128c8ad6302SGeliang Tang struct page *page = lru_to_page(pages); 1291da177e4SLinus Torvalds list_del(&page->lru); 1308a5c743eSMichal Hocko if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) 1319f1a3cfcSZach Brown mapping->a_ops->readpage(filp, page); 13209cbfeafSKirill A. Shutemov put_page(page); 1331da177e4SLinus Torvalds } 134994fc28cSZach Brown ret = 0; 1355b417b18SJens Axboe 1361da177e4SLinus Torvalds out: 1375b417b18SJens Axboe blk_finish_plug(&plug); 1385b417b18SJens Axboe 1391da177e4SLinus Torvalds return ret; 1401da177e4SLinus Torvalds } 1411da177e4SLinus Torvalds 1421da177e4SLinus Torvalds /* 143*b3751e6aSChristoph Hellwig * __do_page_cache_readahead() actually reads a chunk of disk. It allocates 144*b3751e6aSChristoph Hellwig * the pages first, then submits them for I/O. This avoids the very bad 1451da177e4SLinus Torvalds * behaviour which would occur if page allocations are causing VM writeback. 1461da177e4SLinus Torvalds * We really don't want to intermingle reads and writes like that. 1471da177e4SLinus Torvalds * 1481da177e4SLinus Torvalds * Returns the number of pages requested, or the maximum amount of I/O allowed. 1491da177e4SLinus Torvalds */ 150c534aa3fSChristoph Hellwig unsigned int __do_page_cache_readahead(struct address_space *mapping, 151c534aa3fSChristoph Hellwig struct file *filp, pgoff_t offset, unsigned long nr_to_read, 15246fc3e7bSFengguang Wu unsigned long lookahead_size) 1531da177e4SLinus Torvalds { 1541da177e4SLinus Torvalds struct inode *inode = mapping->host; 1551da177e4SLinus Torvalds struct page *page; 1561da177e4SLinus Torvalds unsigned long end_index; /* The last page we want to read */ 1571da177e4SLinus Torvalds LIST_HEAD(page_pool); 1581da177e4SLinus Torvalds int page_idx; 159c534aa3fSChristoph Hellwig unsigned int nr_pages = 0; 1601da177e4SLinus Torvalds loff_t isize = i_size_read(inode); 1618a5c743eSMichal Hocko gfp_t gfp_mask = readahead_gfp_mask(mapping); 1621da177e4SLinus Torvalds 1631da177e4SLinus Torvalds if (isize == 0) 1641da177e4SLinus Torvalds goto out; 1651da177e4SLinus Torvalds 16609cbfeafSKirill A. Shutemov end_index = ((isize - 1) >> PAGE_SHIFT); 1671da177e4SLinus Torvalds 1681da177e4SLinus Torvalds /* 1691da177e4SLinus Torvalds * Preallocate as many pages as we will need. 1701da177e4SLinus Torvalds */ 1711da177e4SLinus Torvalds for (page_idx = 0; page_idx < nr_to_read; page_idx++) { 1727361f4d8SAndrew Morton pgoff_t page_offset = offset + page_idx; 1731da177e4SLinus Torvalds 1741da177e4SLinus Torvalds if (page_offset > end_index) 1751da177e4SLinus Torvalds break; 1761da177e4SLinus Torvalds 17700128188SNick Piggin rcu_read_lock(); 178b93b0163SMatthew Wilcox page = radix_tree_lookup(&mapping->i_pages, page_offset); 17900128188SNick Piggin rcu_read_unlock(); 180*b3751e6aSChristoph Hellwig if (page && !radix_tree_exceptional_entry(page)) { 181*b3751e6aSChristoph Hellwig /* 182*b3751e6aSChristoph Hellwig * Page already present? Kick off the current batch of 183*b3751e6aSChristoph Hellwig * contiguous pages before continuing with the next 184*b3751e6aSChristoph Hellwig * batch. 185*b3751e6aSChristoph Hellwig */ 186*b3751e6aSChristoph Hellwig if (nr_pages) 187*b3751e6aSChristoph Hellwig read_pages(mapping, filp, &page_pool, nr_pages, 188*b3751e6aSChristoph Hellwig gfp_mask); 189*b3751e6aSChristoph Hellwig nr_pages = 0; 1901da177e4SLinus Torvalds continue; 191*b3751e6aSChristoph Hellwig } 1921da177e4SLinus Torvalds 1938a5c743eSMichal Hocko page = __page_cache_alloc(gfp_mask); 1941da177e4SLinus Torvalds if (!page) 1951da177e4SLinus Torvalds break; 1961da177e4SLinus Torvalds page->index = page_offset; 1971da177e4SLinus Torvalds list_add(&page->lru, &page_pool); 19846fc3e7bSFengguang Wu if (page_idx == nr_to_read - lookahead_size) 19946fc3e7bSFengguang Wu SetPageReadahead(page); 200836978b3SChristoph Hellwig nr_pages++; 2011da177e4SLinus Torvalds } 2021da177e4SLinus Torvalds 2031da177e4SLinus Torvalds /* 2041da177e4SLinus Torvalds * Now start the IO. We ignore I/O errors - if the page is not 2051da177e4SLinus Torvalds * uptodate then the caller will launch readpage again, and 2061da177e4SLinus Torvalds * will then handle the error. 2071da177e4SLinus Torvalds */ 208836978b3SChristoph Hellwig if (nr_pages) 209836978b3SChristoph Hellwig read_pages(mapping, filp, &page_pool, nr_pages, gfp_mask); 2101da177e4SLinus Torvalds BUG_ON(!list_empty(&page_pool)); 2111da177e4SLinus Torvalds out: 212836978b3SChristoph Hellwig return nr_pages; 2131da177e4SLinus Torvalds } 2141da177e4SLinus Torvalds 2151da177e4SLinus Torvalds /* 2161da177e4SLinus Torvalds * Chunk the readahead into 2 megabyte units, so that we don't pin too much 2171da177e4SLinus Torvalds * memory at once. 2181da177e4SLinus Torvalds */ 2191da177e4SLinus Torvalds int force_page_cache_readahead(struct address_space *mapping, struct file *filp, 2207361f4d8SAndrew Morton pgoff_t offset, unsigned long nr_to_read) 2211da177e4SLinus Torvalds { 2229491ae4aSJens Axboe struct backing_dev_info *bdi = inode_to_bdi(mapping->host); 2239491ae4aSJens Axboe struct file_ra_state *ra = &filp->f_ra; 2249491ae4aSJens Axboe unsigned long max_pages; 2259491ae4aSJens Axboe 2261da177e4SLinus Torvalds if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages)) 2271da177e4SLinus Torvalds return -EINVAL; 2281da177e4SLinus Torvalds 2299491ae4aSJens Axboe /* 2309491ae4aSJens Axboe * If the request exceeds the readahead window, allow the read to 2319491ae4aSJens Axboe * be up to the optimal hardware IO size 2329491ae4aSJens Axboe */ 2339491ae4aSJens Axboe max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages); 2349491ae4aSJens Axboe nr_to_read = min(nr_to_read, max_pages); 2351da177e4SLinus Torvalds while (nr_to_read) { 23609cbfeafSKirill A. Shutemov unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE; 2371da177e4SLinus Torvalds 2381da177e4SLinus Torvalds if (this_chunk > nr_to_read) 2391da177e4SLinus Torvalds this_chunk = nr_to_read; 240c534aa3fSChristoph Hellwig __do_page_cache_readahead(mapping, filp, offset, this_chunk, 0); 24158d5640eSMark Rutland 2421da177e4SLinus Torvalds offset += this_chunk; 2431da177e4SLinus Torvalds nr_to_read -= this_chunk; 2441da177e4SLinus Torvalds } 24558d5640eSMark Rutland return 0; 2461da177e4SLinus Torvalds } 2471da177e4SLinus Torvalds 2485ce1110bSFengguang Wu /* 249c743d96bSFengguang Wu * Set the initial window size, round to next power of 2 and square 250c743d96bSFengguang Wu * for small size, x 4 for medium, and x 2 for large 251c743d96bSFengguang Wu * for 128k (32 page) max ra 252c743d96bSFengguang Wu * 1-8 page = 32k initial, > 8 page = 128k initial 253c743d96bSFengguang Wu */ 254c743d96bSFengguang Wu static unsigned long get_init_ra_size(unsigned long size, unsigned long max) 255c743d96bSFengguang Wu { 256c743d96bSFengguang Wu unsigned long newsize = roundup_pow_of_two(size); 257c743d96bSFengguang Wu 258c743d96bSFengguang Wu if (newsize <= max / 32) 259c743d96bSFengguang Wu newsize = newsize * 4; 260c743d96bSFengguang Wu else if (newsize <= max / 4) 261c743d96bSFengguang Wu newsize = newsize * 2; 262c743d96bSFengguang Wu else 263c743d96bSFengguang Wu newsize = max; 264c743d96bSFengguang Wu 265c743d96bSFengguang Wu return newsize; 266c743d96bSFengguang Wu } 267c743d96bSFengguang Wu 268c743d96bSFengguang Wu /* 269122a21d1SFengguang Wu * Get the previous window size, ramp it up, and 270122a21d1SFengguang Wu * return it as the new window size. 271122a21d1SFengguang Wu */ 272c743d96bSFengguang Wu static unsigned long get_next_ra_size(struct file_ra_state *ra, 273122a21d1SFengguang Wu unsigned long max) 274122a21d1SFengguang Wu { 275f9acc8c7SFengguang Wu unsigned long cur = ra->size; 276122a21d1SFengguang Wu unsigned long newsize; 277122a21d1SFengguang Wu 278122a21d1SFengguang Wu if (cur < max / 16) 279c743d96bSFengguang Wu newsize = 4 * cur; 280122a21d1SFengguang Wu else 281c743d96bSFengguang Wu newsize = 2 * cur; 282122a21d1SFengguang Wu 283122a21d1SFengguang Wu return min(newsize, max); 284122a21d1SFengguang Wu } 285122a21d1SFengguang Wu 286122a21d1SFengguang Wu /* 287122a21d1SFengguang Wu * On-demand readahead design. 288122a21d1SFengguang Wu * 289122a21d1SFengguang Wu * The fields in struct file_ra_state represent the most-recently-executed 290122a21d1SFengguang Wu * readahead attempt: 291122a21d1SFengguang Wu * 292f9acc8c7SFengguang Wu * |<----- async_size ---------| 293f9acc8c7SFengguang Wu * |------------------- size -------------------->| 294f9acc8c7SFengguang Wu * |==================#===========================| 295f9acc8c7SFengguang Wu * ^start ^page marked with PG_readahead 296122a21d1SFengguang Wu * 297122a21d1SFengguang Wu * To overlap application thinking time and disk I/O time, we do 298122a21d1SFengguang Wu * `readahead pipelining': Do not wait until the application consumed all 299122a21d1SFengguang Wu * readahead pages and stalled on the missing page at readahead_index; 300f9acc8c7SFengguang Wu * Instead, submit an asynchronous readahead I/O as soon as there are 301f9acc8c7SFengguang Wu * only async_size pages left in the readahead window. Normally async_size 302f9acc8c7SFengguang Wu * will be equal to size, for maximum pipelining. 303122a21d1SFengguang Wu * 304122a21d1SFengguang Wu * In interleaved sequential reads, concurrent streams on the same fd can 305122a21d1SFengguang Wu * be invalidating each other's readahead state. So we flag the new readahead 306f9acc8c7SFengguang Wu * page at (start+size-async_size) with PG_readahead, and use it as readahead 307122a21d1SFengguang Wu * indicator. The flag won't be set on already cached pages, to avoid the 308122a21d1SFengguang Wu * readahead-for-nothing fuss, saving pointless page cache lookups. 309122a21d1SFengguang Wu * 310f4e6b498SFengguang Wu * prev_pos tracks the last visited byte in the _previous_ read request. 311122a21d1SFengguang Wu * It should be maintained by the caller, and will be used for detecting 312122a21d1SFengguang Wu * small random reads. Note that the readahead algorithm checks loosely 313122a21d1SFengguang Wu * for sequential patterns. Hence interleaved reads might be served as 314122a21d1SFengguang Wu * sequential ones. 315122a21d1SFengguang Wu * 316122a21d1SFengguang Wu * There is a special-case: if the first page which the application tries to 317122a21d1SFengguang Wu * read happens to be the first page of the file, it is assumed that a linear 318122a21d1SFengguang Wu * read is about to happen and the window is immediately set to the initial size 319122a21d1SFengguang Wu * based on I/O request size and the max_readahead. 320122a21d1SFengguang Wu * 321122a21d1SFengguang Wu * The code ramps up the readahead size aggressively at first, but slow down as 322122a21d1SFengguang Wu * it approaches max_readhead. 323122a21d1SFengguang Wu */ 324122a21d1SFengguang Wu 325122a21d1SFengguang Wu /* 32610be0b37SWu Fengguang * Count contiguously cached pages from @offset-1 to @offset-@max, 32710be0b37SWu Fengguang * this count is a conservative estimation of 32810be0b37SWu Fengguang * - length of the sequential read sequence, or 32910be0b37SWu Fengguang * - thrashing threshold in memory tight systems 33010be0b37SWu Fengguang */ 33110be0b37SWu Fengguang static pgoff_t count_history_pages(struct address_space *mapping, 33210be0b37SWu Fengguang pgoff_t offset, unsigned long max) 33310be0b37SWu Fengguang { 33410be0b37SWu Fengguang pgoff_t head; 33510be0b37SWu Fengguang 33610be0b37SWu Fengguang rcu_read_lock(); 337e7b563bbSJohannes Weiner head = page_cache_prev_hole(mapping, offset - 1, max); 33810be0b37SWu Fengguang rcu_read_unlock(); 33910be0b37SWu Fengguang 34010be0b37SWu Fengguang return offset - 1 - head; 34110be0b37SWu Fengguang } 34210be0b37SWu Fengguang 34310be0b37SWu Fengguang /* 34410be0b37SWu Fengguang * page cache context based read-ahead 34510be0b37SWu Fengguang */ 34610be0b37SWu Fengguang static int try_context_readahead(struct address_space *mapping, 34710be0b37SWu Fengguang struct file_ra_state *ra, 34810be0b37SWu Fengguang pgoff_t offset, 34910be0b37SWu Fengguang unsigned long req_size, 35010be0b37SWu Fengguang unsigned long max) 35110be0b37SWu Fengguang { 35210be0b37SWu Fengguang pgoff_t size; 35310be0b37SWu Fengguang 3543e2faa08SFabian Frederick size = count_history_pages(mapping, offset, max); 35510be0b37SWu Fengguang 35610be0b37SWu Fengguang /* 3572cad4018SFengguang Wu * not enough history pages: 35810be0b37SWu Fengguang * it could be a random read 35910be0b37SWu Fengguang */ 3602cad4018SFengguang Wu if (size <= req_size) 36110be0b37SWu Fengguang return 0; 36210be0b37SWu Fengguang 36310be0b37SWu Fengguang /* 36410be0b37SWu Fengguang * starts from beginning of file: 36510be0b37SWu Fengguang * it is a strong indication of long-run stream (or whole-file-read) 36610be0b37SWu Fengguang */ 36710be0b37SWu Fengguang if (size >= offset) 36810be0b37SWu Fengguang size *= 2; 36910be0b37SWu Fengguang 37010be0b37SWu Fengguang ra->start = offset; 3712cad4018SFengguang Wu ra->size = min(size + req_size, max); 3722cad4018SFengguang Wu ra->async_size = 1; 37310be0b37SWu Fengguang 37410be0b37SWu Fengguang return 1; 37510be0b37SWu Fengguang } 37610be0b37SWu Fengguang 37710be0b37SWu Fengguang /* 378122a21d1SFengguang Wu * A minimal readahead algorithm for trivial sequential/random reads. 379122a21d1SFengguang Wu */ 380122a21d1SFengguang Wu static unsigned long 381122a21d1SFengguang Wu ondemand_readahead(struct address_space *mapping, 382122a21d1SFengguang Wu struct file_ra_state *ra, struct file *filp, 383cf914a7dSRusty Russell bool hit_readahead_marker, pgoff_t offset, 384122a21d1SFengguang Wu unsigned long req_size) 385122a21d1SFengguang Wu { 3869491ae4aSJens Axboe struct backing_dev_info *bdi = inode_to_bdi(mapping->host); 3879491ae4aSJens Axboe unsigned long max_pages = ra->ra_pages; 388af248a0cSDamien Ramonda pgoff_t prev_offset; 389045a2529SWu Fengguang 390045a2529SWu Fengguang /* 3919491ae4aSJens Axboe * If the request exceeds the readahead window, allow the read to 3929491ae4aSJens Axboe * be up to the optimal hardware IO size 3939491ae4aSJens Axboe */ 3949491ae4aSJens Axboe if (req_size > max_pages && bdi->io_pages > max_pages) 3959491ae4aSJens Axboe max_pages = min(req_size, bdi->io_pages); 3969491ae4aSJens Axboe 3979491ae4aSJens Axboe /* 398045a2529SWu Fengguang * start of file 399045a2529SWu Fengguang */ 400045a2529SWu Fengguang if (!offset) 401045a2529SWu Fengguang goto initial_readahead; 402122a21d1SFengguang Wu 403122a21d1SFengguang Wu /* 404f9acc8c7SFengguang Wu * It's the expected callback offset, assume sequential access. 405122a21d1SFengguang Wu * Ramp up sizes, and push forward the readahead window. 406122a21d1SFengguang Wu */ 407045a2529SWu Fengguang if ((offset == (ra->start + ra->size - ra->async_size) || 408f9acc8c7SFengguang Wu offset == (ra->start + ra->size))) { 409f9acc8c7SFengguang Wu ra->start += ra->size; 4109491ae4aSJens Axboe ra->size = get_next_ra_size(ra, max_pages); 411f9acc8c7SFengguang Wu ra->async_size = ra->size; 412f9acc8c7SFengguang Wu goto readit; 413122a21d1SFengguang Wu } 414122a21d1SFengguang Wu 415122a21d1SFengguang Wu /* 4166b10c6c9SFengguang Wu * Hit a marked page without valid readahead state. 4176b10c6c9SFengguang Wu * E.g. interleaved reads. 4186b10c6c9SFengguang Wu * Query the pagecache for async_size, which normally equals to 4196b10c6c9SFengguang Wu * readahead size. Ramp it up and use it as the new readahead size. 4206b10c6c9SFengguang Wu */ 4216b10c6c9SFengguang Wu if (hit_readahead_marker) { 4226b10c6c9SFengguang Wu pgoff_t start; 4236b10c6c9SFengguang Wu 42430002ed2SNick Piggin rcu_read_lock(); 4259491ae4aSJens Axboe start = page_cache_next_hole(mapping, offset + 1, max_pages); 42630002ed2SNick Piggin rcu_read_unlock(); 4276b10c6c9SFengguang Wu 4289491ae4aSJens Axboe if (!start || start - offset > max_pages) 4296b10c6c9SFengguang Wu return 0; 4306b10c6c9SFengguang Wu 4316b10c6c9SFengguang Wu ra->start = start; 4326b10c6c9SFengguang Wu ra->size = start - offset; /* old async_size */ 433160334a0SWu Fengguang ra->size += req_size; 4349491ae4aSJens Axboe ra->size = get_next_ra_size(ra, max_pages); 4356b10c6c9SFengguang Wu ra->async_size = ra->size; 4366b10c6c9SFengguang Wu goto readit; 4376b10c6c9SFengguang Wu } 4386b10c6c9SFengguang Wu 4396b10c6c9SFengguang Wu /* 440045a2529SWu Fengguang * oversize read 441122a21d1SFengguang Wu */ 4429491ae4aSJens Axboe if (req_size > max_pages) 443045a2529SWu Fengguang goto initial_readahead; 444045a2529SWu Fengguang 445045a2529SWu Fengguang /* 446045a2529SWu Fengguang * sequential cache miss 447af248a0cSDamien Ramonda * trivial case: (offset - prev_offset) == 1 448af248a0cSDamien Ramonda * unaligned reads: (offset - prev_offset) == 0 449045a2529SWu Fengguang */ 45009cbfeafSKirill A. Shutemov prev_offset = (unsigned long long)ra->prev_pos >> PAGE_SHIFT; 451af248a0cSDamien Ramonda if (offset - prev_offset <= 1UL) 452045a2529SWu Fengguang goto initial_readahead; 453045a2529SWu Fengguang 454045a2529SWu Fengguang /* 45510be0b37SWu Fengguang * Query the page cache and look for the traces(cached history pages) 45610be0b37SWu Fengguang * that a sequential stream would leave behind. 45710be0b37SWu Fengguang */ 4589491ae4aSJens Axboe if (try_context_readahead(mapping, ra, offset, req_size, max_pages)) 45910be0b37SWu Fengguang goto readit; 46010be0b37SWu Fengguang 46110be0b37SWu Fengguang /* 462045a2529SWu Fengguang * standalone, small random read 463045a2529SWu Fengguang * Read as is, and do not pollute the readahead state. 464045a2529SWu Fengguang */ 465045a2529SWu Fengguang return __do_page_cache_readahead(mapping, filp, offset, req_size, 0); 466045a2529SWu Fengguang 467045a2529SWu Fengguang initial_readahead: 468f9acc8c7SFengguang Wu ra->start = offset; 4699491ae4aSJens Axboe ra->size = get_init_ra_size(req_size, max_pages); 470f9acc8c7SFengguang Wu ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size; 471122a21d1SFengguang Wu 472f9acc8c7SFengguang Wu readit: 47351daa88eSWu Fengguang /* 47451daa88eSWu Fengguang * Will this read hit the readahead marker made by itself? 47551daa88eSWu Fengguang * If so, trigger the readahead marker hit now, and merge 47651daa88eSWu Fengguang * the resulted next readahead window into the current one. 47751daa88eSWu Fengguang */ 47851daa88eSWu Fengguang if (offset == ra->start && ra->size == ra->async_size) { 4799491ae4aSJens Axboe ra->async_size = get_next_ra_size(ra, max_pages); 48051daa88eSWu Fengguang ra->size += ra->async_size; 48151daa88eSWu Fengguang } 48251daa88eSWu Fengguang 483122a21d1SFengguang Wu return ra_submit(ra, mapping, filp); 484122a21d1SFengguang Wu } 485122a21d1SFengguang Wu 486122a21d1SFengguang Wu /** 487cf914a7dSRusty Russell * page_cache_sync_readahead - generic file readahead 488122a21d1SFengguang Wu * @mapping: address_space which holds the pagecache and I/O vectors 489122a21d1SFengguang Wu * @ra: file_ra_state which holds the readahead state 490122a21d1SFengguang Wu * @filp: passed on to ->readpage() and ->readpages() 491cf914a7dSRusty Russell * @offset: start offset into @mapping, in pagecache page-sized units 492122a21d1SFengguang Wu * @req_size: hint: total size of the read which the caller is performing in 493cf914a7dSRusty Russell * pagecache pages 494122a21d1SFengguang Wu * 495cf914a7dSRusty Russell * page_cache_sync_readahead() should be called when a cache miss happened: 496cf914a7dSRusty Russell * it will submit the read. The readahead logic may decide to piggyback more 497cf914a7dSRusty Russell * pages onto the read request if access patterns suggest it will improve 498cf914a7dSRusty Russell * performance. 499122a21d1SFengguang Wu */ 500cf914a7dSRusty Russell void page_cache_sync_readahead(struct address_space *mapping, 501cf914a7dSRusty Russell struct file_ra_state *ra, struct file *filp, 502cf914a7dSRusty Russell pgoff_t offset, unsigned long req_size) 503cf914a7dSRusty Russell { 504cf914a7dSRusty Russell /* no read-ahead */ 505cf914a7dSRusty Russell if (!ra->ra_pages) 506cf914a7dSRusty Russell return; 507cf914a7dSRusty Russell 5080141450fSWu Fengguang /* be dumb */ 50970655c06SWu Fengguang if (filp && (filp->f_mode & FMODE_RANDOM)) { 5100141450fSWu Fengguang force_page_cache_readahead(mapping, filp, offset, req_size); 5110141450fSWu Fengguang return; 5120141450fSWu Fengguang } 5130141450fSWu Fengguang 514cf914a7dSRusty Russell /* do read-ahead */ 515cf914a7dSRusty Russell ondemand_readahead(mapping, ra, filp, false, offset, req_size); 516cf914a7dSRusty Russell } 517cf914a7dSRusty Russell EXPORT_SYMBOL_GPL(page_cache_sync_readahead); 518cf914a7dSRusty Russell 519cf914a7dSRusty Russell /** 520cf914a7dSRusty Russell * page_cache_async_readahead - file readahead for marked pages 521cf914a7dSRusty Russell * @mapping: address_space which holds the pagecache and I/O vectors 522cf914a7dSRusty Russell * @ra: file_ra_state which holds the readahead state 523cf914a7dSRusty Russell * @filp: passed on to ->readpage() and ->readpages() 524cf914a7dSRusty Russell * @page: the page at @offset which has the PG_readahead flag set 525cf914a7dSRusty Russell * @offset: start offset into @mapping, in pagecache page-sized units 526cf914a7dSRusty Russell * @req_size: hint: total size of the read which the caller is performing in 527cf914a7dSRusty Russell * pagecache pages 528cf914a7dSRusty Russell * 529bf8abe8bSHuang Shijie * page_cache_async_readahead() should be called when a page is used which 530f7850d93SRandy Dunlap * has the PG_readahead flag; this is a marker to suggest that the application 531cf914a7dSRusty Russell * has used up enough of the readahead window that we should start pulling in 532f7850d93SRandy Dunlap * more pages. 533f7850d93SRandy Dunlap */ 534cf914a7dSRusty Russell void 535cf914a7dSRusty Russell page_cache_async_readahead(struct address_space *mapping, 536122a21d1SFengguang Wu struct file_ra_state *ra, struct file *filp, 537122a21d1SFengguang Wu struct page *page, pgoff_t offset, 538122a21d1SFengguang Wu unsigned long req_size) 539122a21d1SFengguang Wu { 540122a21d1SFengguang Wu /* no read-ahead */ 541122a21d1SFengguang Wu if (!ra->ra_pages) 542cf914a7dSRusty Russell return; 543122a21d1SFengguang Wu 544fe3cba17SFengguang Wu /* 545cf914a7dSRusty Russell * Same bit is used for PG_readahead and PG_reclaim. 546fe3cba17SFengguang Wu */ 547fe3cba17SFengguang Wu if (PageWriteback(page)) 548cf914a7dSRusty Russell return; 549fe3cba17SFengguang Wu 550122a21d1SFengguang Wu ClearPageReadahead(page); 551122a21d1SFengguang Wu 552122a21d1SFengguang Wu /* 553122a21d1SFengguang Wu * Defer asynchronous read-ahead on IO congestion. 554122a21d1SFengguang Wu */ 555703c2708STejun Heo if (inode_read_congested(mapping->host)) 556cf914a7dSRusty Russell return; 557122a21d1SFengguang Wu 558122a21d1SFengguang Wu /* do read-ahead */ 559cf914a7dSRusty Russell ondemand_readahead(mapping, ra, filp, true, offset, req_size); 560122a21d1SFengguang Wu } 561cf914a7dSRusty Russell EXPORT_SYMBOL_GPL(page_cache_async_readahead); 562782182e5SCong Wang 563782182e5SCong Wang static ssize_t 564782182e5SCong Wang do_readahead(struct address_space *mapping, struct file *filp, 565782182e5SCong Wang pgoff_t index, unsigned long nr) 566782182e5SCong Wang { 56763d0f0a3SAndrew Morton if (!mapping || !mapping->a_ops) 568782182e5SCong Wang return -EINVAL; 569782182e5SCong Wang 57011bd969fSRoss Zwisler /* 57111bd969fSRoss Zwisler * Readahead doesn't make sense for DAX inodes, but we don't want it 57211bd969fSRoss Zwisler * to report a failure either. Instead, we just return success and 57311bd969fSRoss Zwisler * don't do any work. 57411bd969fSRoss Zwisler */ 57511bd969fSRoss Zwisler if (dax_mapping(mapping)) 57611bd969fSRoss Zwisler return 0; 57711bd969fSRoss Zwisler 57858d5640eSMark Rutland return force_page_cache_readahead(mapping, filp, index, nr); 579782182e5SCong Wang } 580782182e5SCong Wang 581c7b95d51SDominik Brodowski ssize_t ksys_readahead(int fd, loff_t offset, size_t count) 582782182e5SCong Wang { 583782182e5SCong Wang ssize_t ret; 5842903ff01SAl Viro struct fd f; 585782182e5SCong Wang 586782182e5SCong Wang ret = -EBADF; 5872903ff01SAl Viro f = fdget(fd); 5882903ff01SAl Viro if (f.file) { 5892903ff01SAl Viro if (f.file->f_mode & FMODE_READ) { 5902903ff01SAl Viro struct address_space *mapping = f.file->f_mapping; 59109cbfeafSKirill A. Shutemov pgoff_t start = offset >> PAGE_SHIFT; 59209cbfeafSKirill A. Shutemov pgoff_t end = (offset + count - 1) >> PAGE_SHIFT; 593782182e5SCong Wang unsigned long len = end - start + 1; 5942903ff01SAl Viro ret = do_readahead(mapping, f.file, start, len); 595782182e5SCong Wang } 5962903ff01SAl Viro fdput(f); 597782182e5SCong Wang } 598782182e5SCong Wang return ret; 599782182e5SCong Wang } 600c7b95d51SDominik Brodowski 601c7b95d51SDominik Brodowski SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count) 602c7b95d51SDominik Brodowski { 603c7b95d51SDominik Brodowski return ksys_readahead(fd, offset, count); 604c7b95d51SDominik Brodowski } 605