Lines Matching defs:vma

151 static void anon_vma_chain_link(struct vm_area_struct *vma,
155 avc->vma = vma;
157 list_add(&avc->same_vma, &vma->anon_vma_chain);
163 * @vma: the memory region in question
165 * This makes sure the memory mapping described by 'vma' has
173 * reason for splitting a vma has been mprotect()), or we
176 * Anon-vma allocations are very subtle, because we may have
179 * allocated vma (it depends on RCU to make sure that the
187 int __anon_vma_prepare(struct vm_area_struct *vma)
189 struct mm_struct *mm = vma->vm_mm;
200 anon_vma = find_mergeable_anon_vma(vma);
213 if (likely(!vma->anon_vma)) {
214 vma->anon_vma = anon_vma;
215 anon_vma_chain_link(vma, avc, anon_vma);
238 * we traverse the vma->anon_vma_chain, looping over anon_vma's that
239 * have the same vma.
277 * than one child isn't reused even if there was no alive vma, thus rmap
302 * Reuse existing anon_vma if it has no vma and only one
331 * Attach vma to its own anon_vma, as well as to the anon_vmas that
335 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
346 vma->anon_vma = NULL;
352 error = anon_vma_clone(vma, pvma);
357 if (vma->anon_vma)
382 vma->anon_vma = anon_vma;
384 anon_vma_chain_link(vma, avc, anon_vma);
393 unlink_anon_vmas(vma);
397 void unlink_anon_vmas(struct vm_area_struct *vma)
406 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
424 if (vma->anon_vma) {
425 vma->anon_vma->num_active_vmas--;
428 * vma would still be needed after unlink, and anon_vma will be prepared
431 vma->anon_vma = NULL;
440 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
772 * At what user virtual address is page expected in vma?
773 * Caller should check the page is actually part of the vma.
775 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
786 if (!vma->anon_vma || !page__anon_vma ||
787 vma->anon_vma->root != page__anon_vma->root)
789 } else if (!vma->vm_file) {
791 } else if (vma->vm_file->f_mapping != folio->mapping) {
797 return vma_address(vma, pgoff, 1);
840 struct vm_area_struct *vma, unsigned long address, void *arg)
843 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
850 if (vma->vm_flags & VM_LOCKED) {
853 mlock_vma_folio(folio, vma);
865 * the range of VM_LOCKED vma. As page reclaim
867 * the range of VM_LOCKED vma.
879 if ((!atomic_read(&vma->vm_mm->mm_users) ||
880 check_stable_address_space(vma->vm_mm)) &&
895 if (ptep_clear_flush_young_notify(vma, address,
899 if (pmdp_clear_flush_young_notify(vma, address,
910 if ((vma->vm_flags & VM_LOCKED) &&
912 folio_within_vma(folio, vma)) {
921 mlock_vma_folio(folio, vma);
934 pra->vm_flags |= vma->vm_flags & ~VM_LOCKED;
943 static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg)
954 if (!vma_has_recency(vma))
961 if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
972 * @vm_flags: A combination of all the vma->vm_flags which referenced the folio.
1020 struct vm_area_struct *vma = pvmw->vma;
1029 vma->vm_mm, address, vma_address_end(pvmw));
1043 flush_cache_page(vma, address, pte_pfn(entry));
1044 entry = ptep_clear_flush(vma, address, pte);
1047 set_pte_at(vma->vm_mm, address, pte, entry);
1057 flush_cache_range(vma, address,
1059 entry = pmdp_invalidate(vma, address, pmd);
1062 set_pmd_at(vma->vm_mm, address, pmd, entry);
1079 static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma,
1082 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC);
1090 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
1092 if (vma->vm_flags & VM_SHARED)
1126 * within the @vma of shared mappings. And since clean PTEs
1131 * @vma: vma that @pfn mapped within.
1136 struct vm_area_struct *vma)
1142 .vma = vma,
1146 if (invalid_mkclean_vma(vma, NULL))
1149 pvmw.address = vma_address(vma, pgoff, nr_pages);
1150 VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma);
1206 * @vma: The vma the folio belongs to
1212 void folio_move_anon_rmap(struct folio *folio, struct vm_area_struct *vma)
1214 void *anon_vma = vma->anon_vma;
1217 VM_BUG_ON_VMA(!anon_vma, vma);
1231 * @vma: VM area to add the folio to.
1235 static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma,
1238 struct anon_vma *anon_vma = vma->anon_vma;
1243 * If the folio isn't exclusive to this vma, we must use the _oldest_
1257 folio->index = linear_page_index(vma, address);
1264 * @vma: the vm area in which the mapping is added
1268 struct vm_area_struct *vma, unsigned long address)
1281 VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root,
1283 VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address),
1310 struct page *page, int nr_pages, struct vm_area_struct *vma,
1320 __page_check_anon_rmap(folio, page, vma, address);
1352 mlock_vma_folio(folio, vma);
1360 * @vma: The vm area in which the mappings are added
1372 int nr_pages, struct vm_area_struct *vma, unsigned long address,
1375 __folio_add_anon_rmap(folio, page, nr_pages, vma, address, flags,
1383 * @vma: The vm area in which the mapping is added
1393 struct vm_area_struct *vma, unsigned long address, rmap_t flags)
1396 __folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags,
1406 * @vma: the vm area in which the mapping is added
1418 void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
1427 VM_BUG_ON_VMA(address < vma->vm_start ||
1428 address + (nr << PAGE_SHIFT) > vma->vm_end, vma);
1434 if (!folio_test_swapbacked(folio) && !(vma->vm_flags & VM_DROPPABLE))
1436 __folio_set_anon(folio, vma, address, exclusive);
1474 struct page *page, int nr_pages, struct vm_area_struct *vma,
1486 mlock_vma_folio(folio, vma);
1494 * @vma: The vm area in which the mappings are added
1501 int nr_pages, struct vm_area_struct *vma)
1503 __folio_add_file_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE);
1510 * @vma: The vm area in which the mapping is added
1517 struct vm_area_struct *vma)
1520 __folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD);
1527 struct page *page, int nr_pages, struct vm_area_struct *vma,
1595 munlock_vma_folio(folio, vma);
1603 * @vma: The vm area from which the mappings are removed
1610 int nr_pages, struct vm_area_struct *vma)
1612 __folio_remove_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE);
1619 * @vma: The vm area from which the mapping is removed
1626 struct vm_area_struct *vma)
1629 __folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD);
1638 static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
1641 struct mm_struct *mm = vma->vm_mm;
1642 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
1669 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
1676 adjust_range_if_pmd_sharing_possible(vma, &range.start,
1680 hsz = huge_page_size(hstate_vma(vma));
1686 * If the folio is in an mlock()d vma, we must not swap it out.
1689 (vma->vm_flags & VM_LOCKED)) {
1692 mlock_vma_folio(folio, vma);
1697 if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd,
1706 split_huge_pmd_locked(vma, pvmw.address,
1738 flush_cache_range(vma, range.start, range.end);
1752 if (!hugetlb_vma_trylock_write(vma))
1754 if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
1755 hugetlb_vma_unlock_write(vma);
1756 flush_tlb_range(vma,
1770 hugetlb_vma_unlock_write(vma);
1772 pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
1774 flush_cache_page(vma, address, pfn);
1789 pteval = ptep_clear_flush(vma, address, pvmw.pte);
1798 pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval);
1818 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
1874 (vma->vm_flags & VM_DROPPABLE))) {
1888 if (!(vma->vm_flags & VM_DROPPABLE))
1897 if (arch_unmap_one(mm, vma, address, pteval) < 0) {
1944 folio_remove_rmap_pte(folio, subpage, vma);
1945 if (vma->vm_flags & VM_LOCKED)
1961 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
1963 return vma_is_temporary_stack(vma);
2003 static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
2006 struct mm_struct *mm = vma->vm_mm;
2007 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
2030 split_huge_pmd_address(vma, address, true, folio);
2041 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2048 adjust_range_if_pmd_sharing_possible(vma, &range.start,
2052 hsz = huge_page_size(hstate_vma(vma));
2109 flush_cache_range(vma, range.start, range.end);
2123 if (!hugetlb_vma_trylock_write(vma)) {
2128 if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
2129 hugetlb_vma_unlock_write(vma);
2130 flush_tlb_range(vma,
2146 hugetlb_vma_unlock_write(vma);
2149 pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
2151 flush_cache_page(vma, address, pfn);
2166 pteval = ptep_clear_flush(vma, address, pvmw.pte);
2226 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
2242 if (arch_unmap_one(mm, vma, address, pteval) < 0) {
2312 folio_remove_rmap_pte(folio, subpage, vma);
2313 if (vma->vm_flags & VM_LOCKED)
2378 struct vm_area_struct *vma, unsigned long address, void *priv)
2380 struct mm_struct *mm = vma->vm_mm;
2381 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
2392 vma->vm_mm, address, min(vma->vm_end,
2413 flush_cache_page(vma, address, pte_pfn(ptent));
2414 pteval = ptep_clear_flush(vma, address, pvmw.pte);
2451 folio_remove_rmap_pte(folio, subpage, vma);
2604 * Find all the mappings of a folio using the mapping pointer and the vma
2628 struct vm_area_struct *vma = avc->vma;
2629 unsigned long address = vma_address(vma, pgoff_start,
2632 VM_BUG_ON_VMA(address == -EFAULT, vma);
2635 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2638 if (!rwc->rmap_one(folio, vma, address, rwc->arg))
2654 * Find all the mappings of a folio using the mapping pointer and the vma chains
2662 struct vm_area_struct *vma;
2689 vma_interval_tree_foreach(vma, &mapping->i_mmap,
2691 unsigned long address = vma_address(vma, pgoff_start,
2694 VM_BUG_ON_VMA(address == -EFAULT, vma);
2697 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2700 if (!rwc->rmap_one(folio, vma, address, rwc->arg))
2738 void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
2753 struct vm_area_struct *vma, unsigned long address)
2757 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
2762 __folio_set_anon(folio, vma, address, true);