Lines Matching refs:mapping

142 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)  in file_ra_state_init()  argument
144 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; in file_ra_state_init()
151 const struct address_space_operations *aops = rac->mapping->a_ops; in read_pages()
213 struct address_space *mapping = ractl->mapping; in page_cache_ra_unbounded() local
215 gfp_t gfp_mask = readahead_gfp_mask(mapping); in page_cache_ra_unbounded()
217 unsigned int min_nrpages = mapping_min_folio_nrpages(mapping); in page_cache_ra_unbounded()
231 trace_page_cache_ra_unbounded(mapping->host, index, nr_to_read, in page_cache_ra_unbounded()
233 filemap_invalidate_lock_shared(mapping); in page_cache_ra_unbounded()
234 index = mapping_align_index(mapping, index); in page_cache_ra_unbounded()
257 struct folio *folio = xa_load(&mapping->i_pages, index + i); in page_cache_ra_unbounded()
276 mapping_min_folio_order(mapping)); in page_cache_ra_unbounded()
280 ret = filemap_add_folio(mapping, folio, index + i, gfp_mask); in page_cache_ra_unbounded()
303 filemap_invalidate_unlock_shared(mapping); in page_cache_ra_unbounded()
317 struct inode *inode = ractl->mapping->host; in do_page_cache_ra()
342 struct address_space *mapping = ractl->mapping; in force_page_cache_ra() local
344 struct backing_dev_info *bdi = inode_to_bdi(mapping->host); in force_page_cache_ra()
347 if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead)) in force_page_cache_ra()
453 err = filemap_add_folio(ractl->mapping, folio, index, gfp); in ra_alloc_folio()
467 struct address_space *mapping = ractl->mapping; in page_cache_ra_order() local
470 unsigned int min_order = mapping_min_folio_order(mapping); in page_cache_ra_order()
471 pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT; in page_cache_ra_order()
475 gfp_t gfp = readahead_gfp_mask(mapping); in page_cache_ra_order()
478 trace_page_cache_ra_order(mapping->host, start, ra); in page_cache_ra_order()
479 if (!mapping_large_folio_support(mapping)) { in page_cache_ra_order()
486 new_order = min(mapping_max_folio_order(mapping), new_order); in page_cache_ra_order()
494 filemap_invalidate_lock_shared(mapping); in page_cache_ra_order()
500 ractl->_index = mapping_align_index(mapping, index); in page_cache_ra_order()
519 filemap_invalidate_unlock_shared(mapping); in page_cache_ra_order()
542 struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host); in ractl_max_pages()
563 trace_page_cache_sync_ra(ractl->mapping->host, index, ra, req_count); in page_cache_sync_ra()
603 miss = page_cache_prev_miss(ractl->mapping, index - 1, max_pages); in page_cache_sync_ra()
648 trace_page_cache_async_ra(ractl->mapping->host, index, ra, req_count); in page_cache_async_ra()
678 start = page_cache_next_miss(ractl->mapping, index + 1, max_pages); in page_cache_async_ra()
766 struct address_space *mapping = ractl->mapping; in readahead_expand() local
769 gfp_t gfp_mask = readahead_gfp_mask(mapping); in readahead_expand()
770 unsigned long min_nrpages = mapping_min_folio_nrpages(mapping); in readahead_expand()
771 unsigned int min_order = mapping_min_folio_order(mapping); in readahead_expand()
783 struct folio *folio = xa_load(&mapping->i_pages, index); in readahead_expand()
792 index = mapping_align_index(mapping, index); in readahead_expand()
793 if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) { in readahead_expand()
812 struct folio *folio = xa_load(&mapping->i_pages, index); in readahead_expand()
821 index = mapping_align_index(mapping, index); in readahead_expand()
822 if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) { in readahead_expand()