Lines Matching full:mapping
26 static void clear_shadow_entries(struct address_space *mapping, in clear_shadow_entries() argument
29 XA_STATE(xas, &mapping->i_pages, start); in clear_shadow_entries()
33 if (shmem_mapping(mapping) || dax_mapping(mapping)) in clear_shadow_entries()
38 spin_lock(&mapping->host->i_lock); in clear_shadow_entries()
48 if (mapping_shrinkable(mapping)) in clear_shadow_entries()
49 inode_lru_list_add(mapping->host); in clear_shadow_entries()
50 spin_unlock(&mapping->host->i_lock); in clear_shadow_entries()
60 static void truncate_folio_batch_exceptionals(struct address_space *mapping, in truncate_folio_batch_exceptionals() argument
63 XA_STATE(xas, &mapping->i_pages, indices[0]); in truncate_folio_batch_exceptionals()
69 if (shmem_mapping(mapping)) in truncate_folio_batch_exceptionals()
79 if (dax_mapping(mapping)) { in truncate_folio_batch_exceptionals()
92 * Delete the mapping so truncate_pagecache() in truncate_folio_batch_exceptionals()
95 dax_delete_mapping_entry(mapping, indices[i]); in truncate_folio_batch_exceptionals()
104 spin_lock(&mapping->host->i_lock); in truncate_folio_batch_exceptionals()
113 if (mapping_shrinkable(mapping)) in truncate_folio_batch_exceptionals()
114 inode_lru_list_add(mapping->host); in truncate_folio_batch_exceptionals()
115 spin_unlock(&mapping->host->i_lock); in truncate_folio_batch_exceptionals()
137 const struct address_space_operations *aops = folio->mapping->a_ops; in folio_invalidate()
149 * We need to bail out if page->mapping is no longer equal to the original
150 * mapping. This happens a) when the VM reclaimed the page while we waited on
170 int truncate_inode_folio(struct address_space *mapping, struct folio *folio) in truncate_inode_folio() argument
172 if (folio->mapping != mapping) in truncate_inode_folio()
198 if (ret && !shmem_mapping(folio->mapping)) { in try_folio_split_or_unmap()
236 truncate_inode_folio(folio->mapping, folio); in truncate_inode_partial_folio()
245 if (!mapping_inaccessible(folio->mapping)) in truncate_inode_partial_folio()
253 min_order = mapping_min_folio_order(folio->mapping); in truncate_inode_partial_folio()
279 /* make sure folio2 is large and does not change its mapping */ in truncate_inode_partial_folio()
281 folio2->mapping == folio->mapping) in truncate_inode_partial_folio()
292 truncate_inode_folio(folio->mapping, folio); in truncate_inode_partial_folio()
299 int generic_error_remove_folio(struct address_space *mapping, in generic_error_remove_folio() argument
302 if (!mapping) in generic_error_remove_folio()
308 if (!S_ISREG(mapping->host->i_mode)) in generic_error_remove_folio()
310 return truncate_inode_folio(mapping, folio); in generic_error_remove_folio()
316 * @mapping: The mapping this folio belongs to.
325 long mapping_evict_folio(struct address_space *mapping, struct folio *folio) in mapping_evict_folio() argument
328 if (!mapping) in mapping_evict_folio()
339 return remove_mapping(mapping, folio); in mapping_evict_folio()
344 * @mapping: mapping to truncate
359 * mapping is large, it is probably the case that the final pages are the most
366 void truncate_inode_pages_range(struct address_space *mapping, in truncate_inode_pages_range() argument
378 if (mapping_empty(mapping)) in truncate_inode_pages_range()
400 while (index < end && find_lock_entries(mapping, &index, end - 1, in truncate_inode_pages_range()
402 truncate_folio_batch_exceptionals(mapping, &fbatch, indices); in truncate_inode_pages_range()
405 delete_from_page_cache_batch(mapping, &fbatch); in truncate_inode_pages_range()
413 folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0); in truncate_inode_pages_range()
427 folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT, in truncate_inode_pages_range()
440 if (!find_get_entries(mapping, &index, end - 1, &fbatch, in truncate_inode_pages_range()
461 truncate_inode_folio(mapping, folio); in truncate_inode_pages_range()
464 truncate_folio_batch_exceptionals(mapping, &fbatch, indices); in truncate_inode_pages_range()
472 * @mapping: mapping to truncate
476 * mapping->invalidate_lock.
480 * mapping->nrpages can be non-zero when this function returns even after
481 * truncation of the whole mapping.
483 void truncate_inode_pages(struct address_space *mapping, loff_t lstart) in truncate_inode_pages() argument
485 truncate_inode_pages_range(mapping, lstart, (loff_t)-1); in truncate_inode_pages()
491 * @mapping: mapping to truncate
498 void truncate_inode_pages_final(struct address_space *mapping) in truncate_inode_pages_final() argument
507 mapping_set_exiting(mapping); in truncate_inode_pages_final()
509 if (!mapping_empty(mapping)) { in truncate_inode_pages_final()
516 xa_lock_irq(&mapping->i_pages); in truncate_inode_pages_final()
517 xa_unlock_irq(&mapping->i_pages); in truncate_inode_pages_final()
520 truncate_inode_pages(mapping, 0); in truncate_inode_pages_final()
526 * @mapping: the address_space which holds the folios to invalidate
534 unsigned long mapping_try_invalidate(struct address_space *mapping, in mapping_try_invalidate() argument
545 while (find_lock_entries(mapping, &index, end, &fbatch, indices)) { in mapping_try_invalidate()
560 ret = mapping_evict_folio(mapping, folio); in mapping_try_invalidate()
576 clear_shadow_entries(mapping, indices[0], indices[nr-1]); in mapping_try_invalidate()
587 * @mapping: the address_space which holds the cache to invalidate
599 unsigned long invalidate_mapping_pages(struct address_space *mapping, in invalidate_mapping_pages() argument
602 return mapping_try_invalidate(mapping, start, end, NULL); in invalidate_mapping_pages()
606 static int folio_launder(struct address_space *mapping, struct folio *folio) in folio_launder() argument
610 if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL) in folio_launder()
612 return mapping->a_ops->launder_folio(folio); in folio_launder()
622 int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio, in folio_unmap_invalidate() argument
633 ret = folio_launder(mapping, folio); in folio_unmap_invalidate()
636 if (folio->mapping != mapping) in folio_unmap_invalidate()
641 spin_lock(&mapping->host->i_lock); in folio_unmap_invalidate()
642 xa_lock_irq(&mapping->i_pages); in folio_unmap_invalidate()
648 xa_unlock_irq(&mapping->i_pages); in folio_unmap_invalidate()
649 if (mapping_shrinkable(mapping)) in folio_unmap_invalidate()
650 inode_lru_list_add(mapping->host); in folio_unmap_invalidate()
651 spin_unlock(&mapping->host->i_lock); in folio_unmap_invalidate()
653 filemap_free_folio(mapping, folio); in folio_unmap_invalidate()
656 xa_unlock_irq(&mapping->i_pages); in folio_unmap_invalidate()
657 spin_unlock(&mapping->host->i_lock); in folio_unmap_invalidate()
663 * @mapping: the address_space
672 int invalidate_inode_pages2_range(struct address_space *mapping, in invalidate_inode_pages2_range() argument
683 if (mapping_empty(mapping)) in invalidate_inode_pages2_range()
688 while (find_get_entries(mapping, &index, end, &fbatch, indices)) { in invalidate_inode_pages2_range()
699 if (dax_mapping(mapping) && in invalidate_inode_pages2_range()
700 !dax_invalidate_mapping_entry_sync(mapping, indices[i])) in invalidate_inode_pages2_range()
710 unmap_mapping_pages(mapping, indices[i], in invalidate_inode_pages2_range()
716 if (unlikely(folio->mapping != mapping)) { in invalidate_inode_pages2_range()
722 ret2 = folio_unmap_invalidate(mapping, folio, GFP_KERNEL); in invalidate_inode_pages2_range()
729 clear_shadow_entries(mapping, indices[0], indices[nr-1]); in invalidate_inode_pages2_range()
742 if (dax_mapping(mapping)) { in invalidate_inode_pages2_range()
743 unmap_mapping_pages(mapping, start, end - start + 1, false); in invalidate_inode_pages2_range()
751 * @mapping: the address_space
758 int invalidate_inode_pages2(struct address_space *mapping) in invalidate_inode_pages2() argument
760 return invalidate_inode_pages2_range(mapping, 0, -1); in invalidate_inode_pages2()
781 struct address_space *mapping = inode->i_mapping; in truncate_pagecache() local
793 unmap_mapping_range(mapping, holebegin, 0, 1); in truncate_pagecache()
794 truncate_inode_pages(mapping, newsize); in truncate_pagecache()
795 unmap_mapping_range(mapping, holebegin, 0, 1); in truncate_pagecache()
902 struct address_space *mapping = inode->i_mapping; in truncate_pagecache_range() local
919 unmap_mapping_range(mapping, unmap_start, in truncate_pagecache_range()
921 truncate_inode_pages_range(mapping, lstart, lend); in truncate_pagecache_range()