1 /* 2 * mm/readahead.c - address_space-level file readahead. 3 * 4 * Copyright (C) 2002, Linus Torvalds 5 * 6 * 09Apr2002 akpm@zip.com.au 7 * Initial version. 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/fs.h> 12 #include <linux/mm.h> 13 #include <linux/module.h> 14 #include <linux/blkdev.h> 15 #include <linux/backing-dev.h> 16 #include <linux/pagevec.h> 17 18 void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) 19 { 20 } 21 EXPORT_SYMBOL(default_unplug_io_fn); 22 23 struct backing_dev_info default_backing_dev_info = { 24 .ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE, 25 .state = 0, 26 .capabilities = BDI_CAP_MAP_COPY, 27 .unplug_io_fn = default_unplug_io_fn, 28 }; 29 EXPORT_SYMBOL_GPL(default_backing_dev_info); 30 31 /* 32 * Initialise a struct file's readahead state. Assumes that the caller has 33 * memset *ra to zero. 34 */ 35 void 36 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) 37 { 38 ra->ra_pages = mapping->backing_dev_info->ra_pages; 39 ra->prev_page = -1; 40 } 41 EXPORT_SYMBOL_GPL(file_ra_state_init); 42 43 /* 44 * Return max readahead size for this inode in number-of-pages. 45 */ 46 static inline unsigned long get_max_readahead(struct file_ra_state *ra) 47 { 48 return ra->ra_pages; 49 } 50 51 static inline unsigned long get_min_readahead(struct file_ra_state *ra) 52 { 53 return (VM_MIN_READAHEAD * 1024) / PAGE_CACHE_SIZE; 54 } 55 56 static inline void reset_ahead_window(struct file_ra_state *ra) 57 { 58 /* 59 * ... but preserve ahead_start + ahead_size value, 60 * see 'recheck:' label in page_cache_readahead(). 61 * Note: We never use ->ahead_size as rvalue without 62 * checking ->ahead_start != 0 first. 63 */ 64 ra->ahead_size += ra->ahead_start; 65 ra->ahead_start = 0; 66 } 67 68 static inline void ra_off(struct file_ra_state *ra) 69 { 70 ra->start = 0; 71 ra->flags = 0; 72 ra->size = 0; 73 reset_ahead_window(ra); 74 return; 75 } 76 77 /* 78 * Set the initial window size, round to next power of 2 and square 79 * for small size, x 4 for medium, and x 2 for large 80 * for 128k (32 page) max ra 81 * 1-8 page = 32k initial, > 8 page = 128k initial 82 */ 83 static unsigned long get_init_ra_size(unsigned long size, unsigned long max) 84 { 85 unsigned long newsize = roundup_pow_of_two(size); 86 87 if (newsize <= max / 32) 88 newsize = newsize * 4; 89 else if (newsize <= max / 4) 90 newsize = newsize * 2; 91 else 92 newsize = max; 93 return newsize; 94 } 95 96 /* 97 * Set the new window size, this is called only when I/O is to be submitted, 98 * not for each call to readahead. If a cache miss occured, reduce next I/O 99 * size, else increase depending on how close to max we are. 100 */ 101 static inline unsigned long get_next_ra_size(struct file_ra_state *ra) 102 { 103 unsigned long max = get_max_readahead(ra); 104 unsigned long min = get_min_readahead(ra); 105 unsigned long cur = ra->size; 106 unsigned long newsize; 107 108 if (ra->flags & RA_FLAG_MISS) { 109 ra->flags &= ~RA_FLAG_MISS; 110 newsize = max((cur - 2), min); 111 } else if (cur < max / 16) { 112 newsize = 4 * cur; 113 } else { 114 newsize = 2 * cur; 115 } 116 return min(newsize, max); 117 } 118 119 #define list_to_page(head) (list_entry((head)->prev, struct page, lru)) 120 121 /** 122 * read_cache_pages - populate an address space with some pages & start reads against them 123 * @mapping: the address_space 124 * @pages: The address of a list_head which contains the target pages. These 125 * pages have their ->index populated and are otherwise uninitialised. 126 * @filler: callback routine for filling a single page. 127 * @data: private data for the callback routine. 128 * 129 * Hides the details of the LRU cache etc from the filesystems. 130 */ 131 int read_cache_pages(struct address_space *mapping, struct list_head *pages, 132 int (*filler)(void *, struct page *), void *data) 133 { 134 struct page *page; 135 struct pagevec lru_pvec; 136 int ret = 0; 137 138 pagevec_init(&lru_pvec, 0); 139 140 while (!list_empty(pages)) { 141 page = list_to_page(pages); 142 list_del(&page->lru); 143 if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) { 144 page_cache_release(page); 145 continue; 146 } 147 ret = filler(data, page); 148 if (!pagevec_add(&lru_pvec, page)) 149 __pagevec_lru_add(&lru_pvec); 150 if (ret) { 151 put_pages_list(pages); 152 break; 153 } 154 } 155 pagevec_lru_add(&lru_pvec); 156 return ret; 157 } 158 159 EXPORT_SYMBOL(read_cache_pages); 160 161 static int read_pages(struct address_space *mapping, struct file *filp, 162 struct list_head *pages, unsigned nr_pages) 163 { 164 unsigned page_idx; 165 struct pagevec lru_pvec; 166 int ret; 167 168 if (mapping->a_ops->readpages) { 169 ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); 170 /* Clean up the remaining pages */ 171 put_pages_list(pages); 172 goto out; 173 } 174 175 pagevec_init(&lru_pvec, 0); 176 for (page_idx = 0; page_idx < nr_pages; page_idx++) { 177 struct page *page = list_to_page(pages); 178 list_del(&page->lru); 179 if (!add_to_page_cache(page, mapping, 180 page->index, GFP_KERNEL)) { 181 mapping->a_ops->readpage(filp, page); 182 if (!pagevec_add(&lru_pvec, page)) 183 __pagevec_lru_add(&lru_pvec); 184 } else 185 page_cache_release(page); 186 } 187 pagevec_lru_add(&lru_pvec); 188 ret = 0; 189 out: 190 return ret; 191 } 192 193 /* 194 * Readahead design. 195 * 196 * The fields in struct file_ra_state represent the most-recently-executed 197 * readahead attempt: 198 * 199 * start: Page index at which we started the readahead 200 * size: Number of pages in that read 201 * Together, these form the "current window". 202 * Together, start and size represent the `readahead window'. 203 * prev_page: The page which the readahead algorithm most-recently inspected. 204 * It is mainly used to detect sequential file reading. 205 * If page_cache_readahead sees that it is again being called for 206 * a page which it just looked at, it can return immediately without 207 * making any state changes. 208 * ahead_start, 209 * ahead_size: Together, these form the "ahead window". 210 * ra_pages: The externally controlled max readahead for this fd. 211 * 212 * When readahead is in the off state (size == 0), readahead is disabled. 213 * In this state, prev_page is used to detect the resumption of sequential I/O. 214 * 215 * The readahead code manages two windows - the "current" and the "ahead" 216 * windows. The intent is that while the application is walking the pages 217 * in the current window, I/O is underway on the ahead window. When the 218 * current window is fully traversed, it is replaced by the ahead window 219 * and the ahead window is invalidated. When this copying happens, the 220 * new current window's pages are probably still locked. So 221 * we submit a new batch of I/O immediately, creating a new ahead window. 222 * 223 * So: 224 * 225 * ----|----------------|----------------|----- 226 * ^start ^start+size 227 * ^ahead_start ^ahead_start+ahead_size 228 * 229 * ^ When this page is read, we submit I/O for the 230 * ahead window. 231 * 232 * A `readahead hit' occurs when a read request is made against a page which is 233 * the next sequential page. Ahead window calculations are done only when it 234 * is time to submit a new IO. The code ramps up the size agressively at first, 235 * but slow down as it approaches max_readhead. 236 * 237 * Any seek/ramdom IO will result in readahead being turned off. It will resume 238 * at the first sequential access. 239 * 240 * There is a special-case: if the first page which the application tries to 241 * read happens to be the first page of the file, it is assumed that a linear 242 * read is about to happen and the window is immediately set to the initial size 243 * based on I/O request size and the max_readahead. 244 * 245 * This function is to be called for every read request, rather than when 246 * it is time to perform readahead. It is called only once for the entire I/O 247 * regardless of size unless readahead is unable to start enough I/O to satisfy 248 * the request (I/O request > max_readahead). 249 */ 250 251 /* 252 * do_page_cache_readahead actually reads a chunk of disk. It allocates all 253 * the pages first, then submits them all for I/O. This avoids the very bad 254 * behaviour which would occur if page allocations are causing VM writeback. 255 * We really don't want to intermingle reads and writes like that. 256 * 257 * Returns the number of pages requested, or the maximum amount of I/O allowed. 258 * 259 * do_page_cache_readahead() returns -1 if it encountered request queue 260 * congestion. 261 */ 262 static int 263 __do_page_cache_readahead(struct address_space *mapping, struct file *filp, 264 pgoff_t offset, unsigned long nr_to_read) 265 { 266 struct inode *inode = mapping->host; 267 struct page *page; 268 unsigned long end_index; /* The last page we want to read */ 269 LIST_HEAD(page_pool); 270 int page_idx; 271 int ret = 0; 272 loff_t isize = i_size_read(inode); 273 274 if (isize == 0) 275 goto out; 276 277 end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); 278 279 /* 280 * Preallocate as many pages as we will need. 281 */ 282 read_lock_irq(&mapping->tree_lock); 283 for (page_idx = 0; page_idx < nr_to_read; page_idx++) { 284 pgoff_t page_offset = offset + page_idx; 285 286 if (page_offset > end_index) 287 break; 288 289 page = radix_tree_lookup(&mapping->page_tree, page_offset); 290 if (page) 291 continue; 292 293 read_unlock_irq(&mapping->tree_lock); 294 page = page_cache_alloc_cold(mapping); 295 read_lock_irq(&mapping->tree_lock); 296 if (!page) 297 break; 298 page->index = page_offset; 299 list_add(&page->lru, &page_pool); 300 ret++; 301 } 302 read_unlock_irq(&mapping->tree_lock); 303 304 /* 305 * Now start the IO. We ignore I/O errors - if the page is not 306 * uptodate then the caller will launch readpage again, and 307 * will then handle the error. 308 */ 309 if (ret) 310 read_pages(mapping, filp, &page_pool, ret); 311 BUG_ON(!list_empty(&page_pool)); 312 out: 313 return ret; 314 } 315 316 /* 317 * Chunk the readahead into 2 megabyte units, so that we don't pin too much 318 * memory at once. 319 */ 320 int force_page_cache_readahead(struct address_space *mapping, struct file *filp, 321 pgoff_t offset, unsigned long nr_to_read) 322 { 323 int ret = 0; 324 325 if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages)) 326 return -EINVAL; 327 328 while (nr_to_read) { 329 int err; 330 331 unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE; 332 333 if (this_chunk > nr_to_read) 334 this_chunk = nr_to_read; 335 err = __do_page_cache_readahead(mapping, filp, 336 offset, this_chunk); 337 if (err < 0) { 338 ret = err; 339 break; 340 } 341 ret += err; 342 offset += this_chunk; 343 nr_to_read -= this_chunk; 344 } 345 return ret; 346 } 347 348 /* 349 * Check how effective readahead is being. If the amount of started IO is 350 * less than expected then the file is partly or fully in pagecache and 351 * readahead isn't helping. 352 * 353 */ 354 static inline int check_ra_success(struct file_ra_state *ra, 355 unsigned long nr_to_read, unsigned long actual) 356 { 357 if (actual == 0) { 358 ra->cache_hit += nr_to_read; 359 if (ra->cache_hit >= VM_MAX_CACHE_HIT) { 360 ra_off(ra); 361 ra->flags |= RA_FLAG_INCACHE; 362 return 0; 363 } 364 } else { 365 ra->cache_hit=0; 366 } 367 return 1; 368 } 369 370 /* 371 * This version skips the IO if the queue is read-congested, and will tell the 372 * block layer to abandon the readahead if request allocation would block. 373 * 374 * force_page_cache_readahead() will ignore queue congestion and will block on 375 * request queues. 376 */ 377 int do_page_cache_readahead(struct address_space *mapping, struct file *filp, 378 pgoff_t offset, unsigned long nr_to_read) 379 { 380 if (bdi_read_congested(mapping->backing_dev_info)) 381 return -1; 382 383 return __do_page_cache_readahead(mapping, filp, offset, nr_to_read); 384 } 385 386 /* 387 * Read 'nr_to_read' pages starting at page 'offset'. If the flag 'block' 388 * is set wait till the read completes. Otherwise attempt to read without 389 * blocking. 390 * Returns 1 meaning 'success' if read is successful without switching off 391 * readahead mode. Otherwise return failure. 392 */ 393 static int 394 blockable_page_cache_readahead(struct address_space *mapping, struct file *filp, 395 pgoff_t offset, unsigned long nr_to_read, 396 struct file_ra_state *ra, int block) 397 { 398 int actual; 399 400 if (!block && bdi_read_congested(mapping->backing_dev_info)) 401 return 0; 402 403 actual = __do_page_cache_readahead(mapping, filp, offset, nr_to_read); 404 405 return check_ra_success(ra, nr_to_read, actual); 406 } 407 408 static int make_ahead_window(struct address_space *mapping, struct file *filp, 409 struct file_ra_state *ra, int force) 410 { 411 int block, ret; 412 413 ra->ahead_size = get_next_ra_size(ra); 414 ra->ahead_start = ra->start + ra->size; 415 416 block = force || (ra->prev_page >= ra->ahead_start); 417 ret = blockable_page_cache_readahead(mapping, filp, 418 ra->ahead_start, ra->ahead_size, ra, block); 419 420 if (!ret && !force) { 421 /* A read failure in blocking mode, implies pages are 422 * all cached. So we can safely assume we have taken 423 * care of all the pages requested in this call. 424 * A read failure in non-blocking mode, implies we are 425 * reading more pages than requested in this call. So 426 * we safely assume we have taken care of all the pages 427 * requested in this call. 428 * 429 * Just reset the ahead window in case we failed due to 430 * congestion. The ahead window will any way be closed 431 * in case we failed due to excessive page cache hits. 432 */ 433 reset_ahead_window(ra); 434 } 435 436 return ret; 437 } 438 439 /** 440 * page_cache_readahead - generic adaptive readahead 441 * @mapping: address_space which holds the pagecache and I/O vectors 442 * @ra: file_ra_state which holds the readahead state 443 * @filp: passed on to ->readpage() and ->readpages() 444 * @offset: start offset into @mapping, in PAGE_CACHE_SIZE units 445 * @req_size: hint: total size of the read which the caller is performing in 446 * PAGE_CACHE_SIZE units 447 * 448 * page_cache_readahead() is the main function. If performs the adaptive 449 * readahead window size management and submits the readahead I/O. 450 * 451 * Note that @filp is purely used for passing on to the ->readpage[s]() 452 * handler: it may refer to a different file from @mapping (so we may not use 453 * @filp->f_mapping or @filp->f_path.dentry->d_inode here). 454 * Also, @ra may not be equal to &@filp->f_ra. 455 * 456 */ 457 unsigned long 458 page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra, 459 struct file *filp, pgoff_t offset, unsigned long req_size) 460 { 461 unsigned long max, newsize; 462 int sequential; 463 464 /* 465 * We avoid doing extra work and bogusly perturbing the readahead 466 * window expansion logic. 467 */ 468 if (offset == ra->prev_page && --req_size) 469 ++offset; 470 471 /* Note that prev_page == -1 if it is a first read */ 472 sequential = (offset == ra->prev_page + 1); 473 ra->prev_page = offset; 474 475 max = get_max_readahead(ra); 476 newsize = min(req_size, max); 477 478 /* No readahead or sub-page sized read or file already in cache */ 479 if (newsize == 0 || (ra->flags & RA_FLAG_INCACHE)) 480 goto out; 481 482 ra->prev_page += newsize - 1; 483 484 /* 485 * Special case - first read at start of file. We'll assume it's 486 * a whole-file read and grow the window fast. Or detect first 487 * sequential access 488 */ 489 if (sequential && ra->size == 0) { 490 ra->size = get_init_ra_size(newsize, max); 491 ra->start = offset; 492 if (!blockable_page_cache_readahead(mapping, filp, offset, 493 ra->size, ra, 1)) 494 goto out; 495 496 /* 497 * If the request size is larger than our max readahead, we 498 * at least want to be sure that we get 2 IOs in flight and 499 * we know that we will definitly need the new I/O. 500 * once we do this, subsequent calls should be able to overlap 501 * IOs,* thus preventing stalls. so issue the ahead window 502 * immediately. 503 */ 504 if (req_size >= max) 505 make_ahead_window(mapping, filp, ra, 1); 506 507 goto out; 508 } 509 510 /* 511 * Now handle the random case: 512 * partial page reads and first access were handled above, 513 * so this must be the next page otherwise it is random 514 */ 515 if (!sequential) { 516 ra_off(ra); 517 blockable_page_cache_readahead(mapping, filp, offset, 518 newsize, ra, 1); 519 goto out; 520 } 521 522 /* 523 * If we get here we are doing sequential IO and this was not the first 524 * occurence (ie we have an existing window) 525 */ 526 if (ra->ahead_start == 0) { /* no ahead window yet */ 527 if (!make_ahead_window(mapping, filp, ra, 0)) 528 goto recheck; 529 } 530 531 /* 532 * Already have an ahead window, check if we crossed into it. 533 * If so, shift windows and issue a new ahead window. 534 * Only return the #pages that are in the current window, so that 535 * we get called back on the first page of the ahead window which 536 * will allow us to submit more IO. 537 */ 538 if (ra->prev_page >= ra->ahead_start) { 539 ra->start = ra->ahead_start; 540 ra->size = ra->ahead_size; 541 make_ahead_window(mapping, filp, ra, 0); 542 recheck: 543 /* prev_page shouldn't overrun the ahead window */ 544 ra->prev_page = min(ra->prev_page, 545 ra->ahead_start + ra->ahead_size - 1); 546 } 547 548 out: 549 return ra->prev_page + 1; 550 } 551 EXPORT_SYMBOL_GPL(page_cache_readahead); 552 553 /* 554 * handle_ra_miss() is called when it is known that a page which should have 555 * been present in the pagecache (we just did some readahead there) was in fact 556 * not found. This will happen if it was evicted by the VM (readahead 557 * thrashing) 558 * 559 * Turn on the cache miss flag in the RA struct, this will cause the RA code 560 * to reduce the RA size on the next read. 561 */ 562 void handle_ra_miss(struct address_space *mapping, 563 struct file_ra_state *ra, pgoff_t offset) 564 { 565 ra->flags |= RA_FLAG_MISS; 566 ra->flags &= ~RA_FLAG_INCACHE; 567 ra->cache_hit = 0; 568 } 569 570 /* 571 * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a 572 * sensible upper limit. 573 */ 574 unsigned long max_sane_readahead(unsigned long nr) 575 { 576 unsigned long active; 577 unsigned long inactive; 578 unsigned long free; 579 580 __get_zone_counts(&active, &inactive, &free, NODE_DATA(numa_node_id())); 581 return min(nr, (inactive + free) / 2); 582 } 583