Lines Matching refs:vma

149 static void anon_vma_chain_link(struct vm_area_struct *vma,  in anon_vma_chain_link()  argument
153 avc->vma = vma; in anon_vma_chain_link()
155 list_add(&avc->same_vma, &vma->anon_vma_chain); in anon_vma_chain_link()
185 int __anon_vma_prepare(struct vm_area_struct *vma) in __anon_vma_prepare() argument
187 struct mm_struct *mm = vma->vm_mm; in __anon_vma_prepare()
198 anon_vma = find_mergeable_anon_vma(vma); in __anon_vma_prepare()
211 if (likely(!vma->anon_vma)) { in __anon_vma_prepare()
212 vma->anon_vma = anon_vma; in __anon_vma_prepare()
213 anon_vma_chain_link(vma, avc, anon_vma); in __anon_vma_prepare()
333 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) in anon_vma_fork() argument
344 vma->anon_vma = NULL; in anon_vma_fork()
350 error = anon_vma_clone(vma, pvma); in anon_vma_fork()
355 if (vma->anon_vma) in anon_vma_fork()
380 vma->anon_vma = anon_vma; in anon_vma_fork()
382 anon_vma_chain_link(vma, avc, anon_vma); in anon_vma_fork()
391 unlink_anon_vmas(vma); in anon_vma_fork()
395 void unlink_anon_vmas(struct vm_area_struct *vma) in unlink_anon_vmas() argument
404 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { in unlink_anon_vmas()
422 if (vma->anon_vma) { in unlink_anon_vmas()
423 vma->anon_vma->num_active_vmas--; in unlink_anon_vmas()
429 vma->anon_vma = NULL; in unlink_anon_vmas()
438 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { in unlink_anon_vmas()
764 const struct page *page, const struct vm_area_struct *vma) in page_address_in_vma() argument
772 if (!vma->anon_vma || !anon_vma || in page_address_in_vma()
773 vma->anon_vma->root != anon_vma->root) in page_address_in_vma()
775 } else if (!vma->vm_file) { in page_address_in_vma()
777 } else if (vma->vm_file->f_mapping != folio->mapping) { in page_address_in_vma()
782 return vma_address(vma, page_pgoff(folio, page), 1); in page_address_in_vma()
825 struct vm_area_struct *vma, unsigned long address, void *arg) in folio_referenced_one() argument
828 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); in folio_referenced_one()
834 if (vma->vm_flags & VM_LOCKED) { in folio_referenced_one()
853 mlock_vma_folio(folio, vma); in folio_referenced_one()
864 if ((!atomic_read(&vma->vm_mm->mm_users) || in folio_referenced_one()
865 check_stable_address_space(vma->vm_mm)) && in folio_referenced_one()
877 if (ptep_clear_flush_young_notify(vma, address, in folio_referenced_one()
881 if (pmdp_clear_flush_young_notify(vma, address, in folio_referenced_one()
899 pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; in folio_referenced_one()
908 static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg) in invalid_folio_referenced_vma() argument
919 if (!vma_has_recency(vma)) in invalid_folio_referenced_vma()
926 if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) in invalid_folio_referenced_vma()
985 struct vm_area_struct *vma = pvmw->vma; in page_vma_mkclean_one() local
994 vma->vm_mm, address, vma_address_end(pvmw)); in page_vma_mkclean_one()
1016 flush_cache_page(vma, address, pte_pfn(entry)); in page_vma_mkclean_one()
1017 entry = ptep_clear_flush(vma, address, pte); in page_vma_mkclean_one()
1020 set_pte_at(vma->vm_mm, address, pte, entry); in page_vma_mkclean_one()
1037 flush_cache_range(vma, address, in page_vma_mkclean_one()
1039 entry = pmdp_invalidate(vma, address, pmd); in page_vma_mkclean_one()
1042 set_pmd_at(vma->vm_mm, address, pmd, entry); in page_vma_mkclean_one()
1059 static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, in page_mkclean_one() argument
1062 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); in page_mkclean_one()
1070 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) in invalid_mkclean_vma() argument
1072 if (vma->vm_flags & VM_SHARED) in invalid_mkclean_vma()
1111 struct vm_area_struct *vma, unsigned long address, void *arg) in mapping_wrprotect_range_one() argument
1118 .vma = vma, in mapping_wrprotect_range_one()
1190 struct vm_area_struct *vma) in pfn_mkclean_range() argument
1196 .vma = vma, in pfn_mkclean_range()
1200 if (invalid_mkclean_vma(vma, NULL)) in pfn_mkclean_range()
1203 pvmw.address = vma_address(vma, pgoff, nr_pages); in pfn_mkclean_range()
1204 VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma); in pfn_mkclean_range()
1232 struct page *page, int nr_pages, struct vm_area_struct *vma, in __folio_add_rmap() argument
1249 nr = folio_add_return_large_mapcount(folio, orig_nr_pages, vma); in __folio_add_rmap()
1266 folio_add_large_mapcount(folio, orig_nr_pages, vma); in __folio_add_rmap()
1274 nr = folio_inc_return_large_mapcount(folio, vma); in __folio_add_rmap()
1302 folio_inc_large_mapcount(folio, vma); in __folio_add_rmap()
1319 void folio_move_anon_rmap(struct folio *folio, struct vm_area_struct *vma) in folio_move_anon_rmap() argument
1321 void *anon_vma = vma->anon_vma; in folio_move_anon_rmap()
1324 VM_BUG_ON_VMA(!anon_vma, vma); in folio_move_anon_rmap()
1342 static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma, in __folio_set_anon() argument
1345 struct anon_vma *anon_vma = vma->anon_vma; in __folio_set_anon()
1364 folio->index = linear_page_index(vma, address); in __folio_set_anon()
1375 const struct page *page, struct vm_area_struct *vma, in __page_check_anon_rmap() argument
1389 VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, in __page_check_anon_rmap()
1391 VM_BUG_ON_PAGE(page_pgoff(folio, page) != linear_page_index(vma, address), in __page_check_anon_rmap()
1396 struct page *page, int nr_pages, struct vm_area_struct *vma, in __folio_add_anon_rmap() argument
1403 __folio_add_rmap(folio, page, nr_pages, vma, level); in __folio_add_anon_rmap()
1406 __page_check_anon_rmap(folio, page, vma, address); in __folio_add_anon_rmap()
1455 mlock_vma_folio(folio, vma); in __folio_add_anon_rmap()
1475 int nr_pages, struct vm_area_struct *vma, unsigned long address, in folio_add_anon_rmap_ptes() argument
1478 __folio_add_anon_rmap(folio, page, nr_pages, vma, address, flags, in folio_add_anon_rmap_ptes()
1496 struct vm_area_struct *vma, unsigned long address, rmap_t flags) in folio_add_anon_rmap_pmd() argument
1499 __folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags, in folio_add_anon_rmap_pmd()
1521 void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, in folio_add_new_anon_rmap() argument
1534 if (!folio_test_swapbacked(folio) && !(vma->vm_flags & VM_DROPPABLE)) in folio_add_new_anon_rmap()
1536 __folio_set_anon(folio, vma, address, exclusive); in folio_add_new_anon_rmap()
1557 folio_set_large_mapcount(folio, nr, vma); in folio_add_new_anon_rmap()
1564 folio_set_large_mapcount(folio, 1, vma); in folio_add_new_anon_rmap()
1572 VM_WARN_ON_ONCE(address < vma->vm_start || in folio_add_new_anon_rmap()
1573 address + (nr << PAGE_SHIFT) > vma->vm_end); in folio_add_new_anon_rmap()
1580 struct page *page, int nr_pages, struct vm_area_struct *vma, in __folio_add_file_rmap() argument
1585 __folio_add_rmap(folio, page, nr_pages, vma, level); in __folio_add_file_rmap()
1594 mlock_vma_folio(folio, vma); in __folio_add_file_rmap()
1609 int nr_pages, struct vm_area_struct *vma) in folio_add_file_rmap_ptes() argument
1611 __folio_add_file_rmap(folio, page, nr_pages, vma, PGTABLE_LEVEL_PTE); in folio_add_file_rmap_ptes()
1625 struct vm_area_struct *vma) in folio_add_file_rmap_pmd() argument
1628 __folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD); in folio_add_file_rmap_pmd()
1645 struct vm_area_struct *vma) in folio_add_file_rmap_pud() argument
1649 __folio_add_file_rmap(folio, page, HPAGE_PUD_NR, vma, PGTABLE_LEVEL_PUD); in folio_add_file_rmap_pud()
1656 struct page *page, int nr_pages, struct vm_area_struct *vma, in __folio_remove_rmap() argument
1673 nr = folio_sub_return_large_mapcount(folio, nr_pages, vma); in __folio_remove_rmap()
1685 folio_sub_large_mapcount(folio, nr_pages, vma); in __folio_remove_rmap()
1702 nr = folio_dec_return_large_mapcount(folio, vma); in __folio_remove_rmap()
1714 folio_dec_large_mapcount(folio, vma); in __folio_remove_rmap()
1762 munlock_vma_folio(folio, vma); in __folio_remove_rmap()
1777 int nr_pages, struct vm_area_struct *vma) in folio_remove_rmap_ptes() argument
1779 __folio_remove_rmap(folio, page, nr_pages, vma, PGTABLE_LEVEL_PTE); in folio_remove_rmap_ptes()
1793 struct vm_area_struct *vma) in folio_remove_rmap_pmd() argument
1796 __folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD); in folio_remove_rmap_pmd()
1813 struct vm_area_struct *vma) in folio_remove_rmap_pud() argument
1817 __folio_remove_rmap(folio, page, HPAGE_PUD_NR, vma, PGTABLE_LEVEL_PUD); in folio_remove_rmap_pud()
1828 struct vm_area_struct *vma = pvmw->vma; in folio_unmap_pte_batch() local
1837 end_addr = pmd_addr_end(addr, vma->vm_end); in folio_unmap_pte_batch()
1852 static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, in try_to_unmap_one() argument
1855 struct mm_struct *mm = vma->vm_mm; in try_to_unmap_one()
1856 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); in try_to_unmap_one()
1885 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, in try_to_unmap_one()
1892 adjust_range_if_pmd_sharing_possible(vma, &range.start, in try_to_unmap_one()
1896 hsz = huge_page_size(hstate_vma(vma)); in try_to_unmap_one()
1905 (vma->vm_flags & VM_LOCKED)) { in try_to_unmap_one()
1932 mlock_vma_folio(folio, vma); in try_to_unmap_one()
1938 if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, folio)) in try_to_unmap_one()
1953 split_huge_pmd_locked(vma, pvmw.address, in try_to_unmap_one()
1998 flush_cache_range(vma, range.start, range.end); in try_to_unmap_one()
2012 if (!hugetlb_vma_trylock_write(vma)) in try_to_unmap_one()
2014 if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { in try_to_unmap_one()
2015 hugetlb_vma_unlock_write(vma); in try_to_unmap_one()
2016 flush_tlb_range(vma, in try_to_unmap_one()
2030 hugetlb_vma_unlock_write(vma); in try_to_unmap_one()
2032 pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); in try_to_unmap_one()
2038 flush_cache_range(vma, address, end_addr); in try_to_unmap_one()
2053 flush_tlb_range(vma, address, end_addr); in try_to_unmap_one()
2065 pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval); in try_to_unmap_one()
2081 !userfaultfd_armed(vma)) { in try_to_unmap_one()
2126 if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) { in try_to_unmap_one()
2161 if (arch_unmap_one(mm, vma, address, pteval) < 0) { in try_to_unmap_one()
2215 folio_remove_rmap_ptes(folio, subpage, nr_pages, vma); in try_to_unmap_one()
2217 if (vma->vm_flags & VM_LOCKED) in try_to_unmap_one()
2240 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) in invalid_migration_vma() argument
2242 return vma_is_temporary_stack(vma); in invalid_migration_vma()
2282 static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, in try_to_migrate_one() argument
2285 struct mm_struct *mm = vma->vm_mm; in try_to_migrate_one()
2286 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); in try_to_migrate_one()
2313 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, in try_to_migrate_one()
2320 adjust_range_if_pmd_sharing_possible(vma, &range.start, in try_to_migrate_one()
2324 hsz = huge_page_size(hstate_vma(vma)); in try_to_migrate_one()
2335 split_huge_pmd_locked(vma, pvmw.address, in try_to_migrate_one()
2394 flush_cache_range(vma, range.start, range.end); in try_to_migrate_one()
2408 if (!hugetlb_vma_trylock_write(vma)) { in try_to_migrate_one()
2413 if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { in try_to_migrate_one()
2414 hugetlb_vma_unlock_write(vma); in try_to_migrate_one()
2415 flush_tlb_range(vma, in try_to_migrate_one()
2431 hugetlb_vma_unlock_write(vma); in try_to_migrate_one()
2434 pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); in try_to_migrate_one()
2439 flush_cache_page(vma, address, pfn); in try_to_migrate_one()
2454 pteval = ptep_clear_flush(vma, address, pvmw.pte); in try_to_migrate_one()
2486 !userfaultfd_armed(vma)) { in try_to_migrate_one()
2507 if (arch_unmap_one(mm, vma, address, pteval) < 0) { in try_to_migrate_one()
2583 folio_remove_rmap_pte(folio, subpage, vma); in try_to_migrate_one()
2584 if (vma->vm_flags & VM_LOCKED) in try_to_migrate_one()
2684 struct vm_area_struct *vma; in make_device_exclusive() local
2706 &vma); in make_device_exclusive()
2737 fw_folio = folio_walk_start(&fw, vma, addr, 0); in make_device_exclusive()
2741 folio_walk_end(&fw, vma); in make_device_exclusive()
2749 flush_cache_page(vma, addr, page_to_pfn(page)); in make_device_exclusive()
2750 fw.pte = ptep_clear_flush(vma, addr, fw.ptep); in make_device_exclusive()
2768 folio_walk_end(&fw, vma); in make_device_exclusive()
2854 struct vm_area_struct *vma = avc->vma; in rmap_walk_anon() local
2855 unsigned long address = vma_address(vma, pgoff_start, in rmap_walk_anon()
2858 VM_BUG_ON_VMA(address == -EFAULT, vma); in rmap_walk_anon()
2861 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) in rmap_walk_anon()
2864 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) in rmap_walk_anon()
2899 struct vm_area_struct *vma; in __rmap_walk_file() local
2917 vma_interval_tree_foreach(vma, &mapping->i_mmap, in __rmap_walk_file()
2919 unsigned long address = vma_address(vma, pgoff_start, nr_pages); in __rmap_walk_file()
2921 VM_BUG_ON_VMA(address == -EFAULT, vma); in __rmap_walk_file()
2924 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) in __rmap_walk_file()
2927 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) in __rmap_walk_file()
2991 void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, in hugetlb_add_anon_rmap() argument
3006 struct vm_area_struct *vma, unsigned long address) in hugetlb_add_new_anon_rmap() argument
3010 BUG_ON(address < vma->vm_start || address >= vma->vm_end); in hugetlb_add_new_anon_rmap()
3015 __folio_set_anon(folio, vma, address, true); in hugetlb_add_new_anon_rmap()