Lines Matching +full:start +full:- +full:up

1 // SPDX-License-Identifier: GPL-2.0-only
3 * mm/readahead.c - address_space-level file readahead.
17 * folio is present but not up-to-date, readahead will not try to read
18 * it. In that case a simple ->read_folio() will be requested.
29 * contains ->size being the total number of pages, and ->async_size
37 * to be determined: the start of the region to read, the size of the
40 * The start of the region is simply the first page address at or after
46 * this would be less than zero - then zero is used. NOTE THIS
47 * CALCULATION IS WRONG WHEN THE START OF THE REGION IS NOT THE ACCESSED
57 * page to the start of the new readahead. In these cases, the size of
65 * larger than the current request, and it is not scaled up, unless it
66 * is at the start of file.
68 * In general readahead is accelerated at the start of the file, as
77 * Readahead requests are sent to the filesystem using the ->readahead()
79 * implementation. ->readahead() should normally initiate reads on all
81 * error. The page cache reading code will issue a ->read_folio() request
82 * for any folio which ->readahead() did not read, and only an error
85 * ->readahead() will generally call readahead_folio() repeatedly to get
103 * considered to be important and ->readahead() should not fail them due
113 * ->read_folio() which may be less efficient.
121 #include <linux/backing-dev.h>
128 #include <linux/blk-cgroup.h>
141 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages;
142 ra->prev_pos = -1;
148 const struct address_space_operations *aops = rac->mapping->a_ops;
155 if (unlikely(rac->_workingset))
156 psi_memstall_enter(&rac->_pflags);
159 if (aops->readahead) {
160 aops->readahead(rac);
161 /* Clean up the remaining folios. */
170 aops->read_folio(rac->file, folio);
174 if (unlikely(rac->_workingset))
175 psi_memstall_leave(&rac->_pflags);
176 rac->_workingset = false;
187 if (folio && ractl->dropbehind)
194 * page_cache_ra_unbounded - Start unchecked readahead.
197 * @lookahead_size: Where to start the next readahead.
199 * This function is for filesystems to call when they want to start
210 struct address_space *mapping = ractl->mapping;
222 * touch file-backed pages, preventing a deadlock. Most (all?)
241 nr_to_read - lookahead_size,
243 mark = ra_folio_index - index;
245 nr_to_read += readahead_index(ractl) - index;
246 ractl->_index = index;
252 struct folio *folio = xa_load(&mapping->i_pages, index + i);
265 ractl->_index += min_nrpages;
266 i = ractl->_index + ractl->_nr_pages - index;
278 if (ret == -ENOMEM)
281 ractl->_index += min_nrpages;
282 i = ractl->_index + ractl->_nr_pages - index;
287 ractl->_workingset |= folio_test_workingset(folio);
288 ractl->_nr_pages += min_nrpages;
293 * Now start the IO. We ignore I/O errors - if the folio is not
312 struct inode *inode = ractl->mapping->host;
320 end_index = (isize - 1) >> PAGE_SHIFT;
324 if (nr_to_read > end_index - index)
325 nr_to_read = end_index - index + 1;
337 struct address_space *mapping = ractl->mapping;
338 struct file_ra_state *ra = ractl->ra;
339 struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
342 if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead))
347 * be up to the optimal hardware IO size
349 max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
358 nr_to_read -= this_chunk;
366 * 1-2 page = 16k, 3-4 page 32k, 5-8 page = 64k, > 8 page = 128k initial
383 * Get the previous window size, ramp it up, and
389 unsigned long cur = ra->size;
399 * On-demand readahead design.
401 * The fields in struct file_ra_state represent the most-recently-executed
404 * |<----- async_size ---------|
405 * |------------------- size -------------------->|
407 * ^start ^page marked with PG_readahead
418 * page at (start+size-async_size) with PG_readahead, and use it as readahead
420 * readahead-for-nothing fuss, saving pointless page cache lookups.
428 * There is a special-case: if the first page which the application tries to
433 * The code ramps up the readahead size aggressively at first, but slow down as
444 return -ENOMEM;
448 err = filemap_add_folio(ractl->mapping, folio, index, gfp);
454 ractl->_nr_pages += 1UL << order;
455 ractl->_workingset |= folio_test_workingset(folio);
462 struct address_space *mapping = ractl->mapping;
463 pgoff_t start = readahead_index(ractl);
464 pgoff_t index = start;
466 pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT;
467 pgoff_t mark = index + ra->size - ra->async_size;
471 unsigned int new_order = ra->order;
474 ra->order = 0;
478 limit = min(limit, index + ra->size - 1);
481 new_order = min_t(unsigned int, new_order, ilog2(ra->size));
484 ra->order = new_order;
494 ractl->_index = mapping_align_index(mapping, index);
501 if (index & ((1UL << order) - 1))
504 while (order > min_order && index + (1UL << order) - 1 > limit)
505 order--;
525 * ->readahead() may have updated readahead window size so we have to
528 if (ra->size > index - start)
529 do_page_cache_ra(ractl, ra->size - (index - start),
530 ra->async_size);
536 struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host);
537 unsigned long max_pages = ractl->ra->ra_pages;
541 * be up to the optimal hardware IO size
543 if (req_size > max_pages && bdi->io_pages > max_pages)
544 max_pages = min(req_size, bdi->io_pages);
552 bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM);
553 struct file_ra_state *ra = ractl->ra;
563 if (!ra->ra_pages || blk_cgroup_congested()) {
564 if (!ractl->file)
577 prev_index = (unsigned long long)ra->prev_pos >> PAGE_SHIFT;
579 * A start of file, oversized read, or sequential cache miss:
580 * trivial case: (index - prev_index) == 1
581 * unaligned reads: (index - prev_index) == 0
583 if (!index || req_count > max_pages || index - prev_index <= 1UL) {
584 ra->start = index;
585 ra->size = get_init_ra_size(req_count, max_pages);
586 ra->async_size = ra->size > req_count ? ra->size - req_count :
587 ra->size >> 1;
596 miss = page_cache_prev_miss(ractl->mapping, index - 1, max_pages);
598 contig_count = index - miss - 1;
609 * it is a strong indication of long-run stream (or whole-file-read)
613 ra->start = index;
614 ra->size = min(contig_count + req_count, max_pages);
615 ra->async_size = 1;
617 ra->order = 0;
618 ractl->_index = ra->start;
627 struct file_ra_state *ra = ractl->ra;
629 pgoff_t expected, start, end, aligned_end, align;
632 if (!ra->ra_pages)
649 * Ramp up sizes, and push forward the readahead window.
651 expected = round_down(ra->start + ra->size - ra->async_size,
654 ra->start += ra->size;
659 ra->size = max(ra->size, get_next_ra_size(ra, max_pages));
667 * readahead size. Ramp it up and use it as the new readahead size.
670 start = page_cache_next_miss(ractl->mapping, index + 1, max_pages);
673 if (!start || start - index > max_pages)
676 ra->start = start;
677 ra->size = start - index; /* old async_size */
678 ra->size += req_count;
679 ra->size = get_next_ra_size(ra, max_pages);
681 ra->order += 2;
682 align = 1UL << min(ra->order, ffs(max_pages) - 1);
683 end = ra->start + ra->size;
685 if (aligned_end > ra->start)
686 ra->size -= end - aligned_end;
687 ra->async_size = ra->size;
688 ractl->_index = ra->start;
700 return -EBADF;
703 if (!(file->f_mode & FMODE_READ))
704 return -EBADF;
709 * on this file, then we must return -EINVAL.
711 if (!file->f_mapping)
712 return -EINVAL;
713 if (!file->f_mapping->a_ops)
714 return -EINVAL;
717 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
718 return -EINVAL;
720 return -EINVAL;
738 * readahead_expand - Expand a readahead request
740 * @new_start: The revised start
758 struct address_space *mapping = ractl->mapping;
759 struct file_ra_state *ra = ractl->ra;
767 * Readahead code should have aligned the ractl->_index to
770 VM_BUG_ON(!IS_ALIGNED(ractl->_index, min_nrpages));
773 while (ractl->_index > new_index) {
774 unsigned long index = ractl->_index - 1;
775 struct folio *folio = xa_load(&mapping->i_pages, index);
790 !ractl->_workingset) {
791 ractl->_workingset = true;
792 psi_memstall_enter(&ractl->_pflags);
794 ractl->_nr_pages += min_nrpages;
795 ractl->_index = folio->index;
798 new_len += new_start - readahead_pos(ractl);
802 while (ractl->_nr_pages < new_nr_pages) {
803 unsigned long index = ractl->_index + ractl->_nr_pages;
804 struct folio *folio = xa_load(&mapping->i_pages, index);
819 !ractl->_workingset) {
820 ractl->_workingset = true;
821 psi_memstall_enter(&ractl->_pflags);
823 ractl->_nr_pages += min_nrpages;
825 ra->size += min_nrpages;
826 ra->async_size += min_nrpages;