/linux/fs/hugetlbfs/ |
H A D | inode.c | 397 struct rb_root_cached *root = &mapping->i_mmap; in hugetlb_unmap_file_folio() 641 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) in hugetlb_vmtruncate() 642 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0, in hugetlb_vmtruncate() 702 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) in hugetlbfs_punch_hole() 703 hugetlb_vmdelete_list(&mapping->i_mmap, in hugetlbfs_punch_hole()
|
/linux/arch/arm/mm/ |
H A D | fault-armv.c | 143 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { in make_coherent()
|
H A D | flush.c | 254 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff_end) { in __flush_dcache_aliases()
|
/linux/mm/ |
H A D | nommu.c | 546 vma_interval_tree_insert(vma, &mapping->i_mmap); in setup_vma_to_mm() 562 vma_interval_tree_remove(vma, &mapping->i_mmap); in cleanup_vma_from_mm() 1730 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) { in nommu_shrink_inode_mappings() 1746 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) { in nommu_shrink_inode_mappings()
|
H A D | vma.c | 187 vma_interval_tree_insert(vma, &mapping->i_mmap); in __vma_link_file() 201 vma_interval_tree_remove(vma, &mapping->i_mmap); in __remove_shared_vm_struct() 272 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap); in vma_prepare() 275 &vp->mapping->i_mmap); in vma_prepare() 294 &vp->mapping->i_mmap); in vma_complete() 295 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap); in vma_complete()
|
H A D | memory-failure.c | 665 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, in collect_procs_file() 717 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { in collect_procs_fsdax()
|
H A D | khugepaged.c | 1721 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { in retract_page_tables() 2155 vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) { in collapse_file()
|
H A D | memory.c | 3898 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) in unmap_mapping_folio() 3899 unmap_mapping_range_tree(&mapping->i_mmap, first_index, in unmap_mapping_folio() 3928 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) in unmap_mapping_pages() 3929 unmap_mapping_range_tree(&mapping->i_mmap, first_index, in unmap_mapping_pages()
|
H A D | hugetlb.c | 5796 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { in unmap_ref_private() 7242 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { in huge_pmd_share()
|
/linux/arch/nios2/mm/ |
H A D | cacheflush.c | 85 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) { in flush_aliases()
|
/linux/Documentation/mm/ |
H A D | process_addrs.rst | 249 mapping is file-backed, to place the VMA i_mmap write. 251 :c:member:`!struct address_space->i_mmap` 254 interval tree if the VMA is file-backed. i_mmap write. 275 then it can be in both the :c:type:`!anon_vma` and :c:type:`!i_mmap` 668 address_space->i_mmap` interval trees) can have its page tables torn down. 688 :c:func:`!retract_page_tables`, which is performed under the i_mmap
|
/linux/arch/parisc/kernel/ |
H A D | cache.c | 506 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) { in flush_dcache_folio()
|
/linux/include/linux/ |
H A D | fs.h | 512 struct rb_root_cached i_mmap; member 587 return !RB_EMPTY_ROOT(&mapping->i_mmap.rb_root); in mapping_mapped()
|
/linux/fs/ |
H A D | dax.c | 993 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) { in dax_writeback_one()
|
/linux/kernel/events/ |
H A D | uprobes.c | 1186 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { in build_map_info()
|
/linux/kernel/ |
H A D | fork.c | 742 &mapping->i_mmap); in dup_mmap()
|