Lines Matching defs:vma

43 bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
48 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
56 if (pte_needs_soft_dirty_wp(vma, pte))
60 if (userfaultfd_pte_wp(vma, pte))
63 if (!(vma->vm_flags & VM_SHARED)) {
70 page = vm_normal_page(vma, addr, pte);
87 struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
99 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
104 if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
105 atomic_read(&vma->vm_mm->mm_users) == 1)
108 flush_tlb_batched_pending(vma->vm_mm);
128 folio = vm_normal_folio(vma, addr, oldpte);
134 if (is_cow_mapping(vma->vm_flags) &&
169 oldpte = ptep_modify_prot_start(vma, addr, pte);
192 can_change_pte_writable(vma, addr, ptent))
193 ptent = pte_mkwrite(ptent, vma);
195 ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
249 pte_clear(vma->vm_mm, addr, pte);
263 set_pte_at(vma->vm_mm, addr, pte, newpte);
277 if (userfaultfd_wp_use_markers(vma)) {
284 set_pte_at(vma->vm_mm, addr, pte,
301 pgtable_split_needed(struct vm_area_struct *vma, unsigned long cp_flags)
309 return (cp_flags & MM_CP_UFFD_WP) && !vma_is_anonymous(vma);
317 pgtable_populate_needed(struct vm_area_struct *vma, unsigned long cp_flags)
324 return userfaultfd_wp_use_markers(vma);
333 #define change_pmd_prepare(vma, pmd, cp_flags) \
336 if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \
337 if (pte_alloc(vma->vm_mm, pmd)) \
348 #define change_prepare(vma, high, low, addr, cp_flags) \
351 if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \
352 low##_t *p = low##_alloc(vma->vm_mm, high, addr); \
360 struct vm_area_struct *vma, pud_t *pud, unsigned long addr,
375 ret = change_pmd_prepare(vma, pmd, cp_flags);
387 pgtable_split_needed(vma, cp_flags)) {
388 __split_huge_pmd(vma, pmd, addr, false, NULL);
394 ret = change_pmd_prepare(vma, pmd, cp_flags);
400 ret = change_huge_pmd(tlb, vma, pmd,
415 ret = change_pte_range(tlb, vma, pmd, addr, next, newprot,
430 struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr,
444 ret = change_prepare(vma, pudp, pmd, addr, cp_flags);
457 vma->vm_mm, addr, end);
463 pgtable_split_needed(vma, cp_flags)) {
464 __split_huge_pud(vma, pudp, addr);
467 ret = change_huge_pud(tlb, vma, pudp,
478 pages += change_pmd_range(tlb, vma, pudp, addr, next, newprot,
489 struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr,
499 ret = change_prepare(vma, p4d, pud, addr, cp_flags);
504 pages += change_pud_range(tlb, vma, p4d, addr, next, newprot,
512 struct vm_area_struct *vma, unsigned long addr,
515 struct mm_struct *mm = vma->vm_mm;
522 tlb_start_vma(tlb, vma);
525 ret = change_prepare(vma, pgd, p4d, addr, cp_flags);
532 pages += change_p4d_range(tlb, vma, pgd, addr, next, newprot,
536 tlb_end_vma(tlb, vma);
542 struct vm_area_struct *vma, unsigned long start,
545 pgprot_t newprot = vma->vm_page_prot;
554 * vma_set_page_prot() will adjust vma->vm_page_prot accordingly.
562 if (is_vm_hugetlb_page(vma))
563 pages = hugetlb_change_protection(vma, start, end, newprot,
566 pages = change_protection_range(tlb, vma, start, end, newprot,
604 struct vm_area_struct *vma, struct vm_area_struct **pprev,
607 struct mm_struct *mm = vma->vm_mm;
608 unsigned long oldflags = vma->vm_flags;
614 if (!can_modify_vma(vma))
618 *pprev = vma;
628 (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
659 } else if ((oldflags & VM_ACCOUNT) && vma_is_anonymous(vma) &&
660 !vma->anon_vma) {
664 vma = vma_modify_flags(vmi, *pprev, vma, start, end, newflags);
665 if (IS_ERR(vma)) {
666 error = PTR_ERR(vma);
670 *pprev = vma;
676 vma_start_write(vma);
677 vm_flags_reset(vma, newflags);
678 if (vma_wants_manual_pte_write_upgrade(vma))
680 vma_set_page_prot(vma);
682 change_protection(tlb, vma, start, end, mm_cp_flags);
693 populate_vma_page_range(vma, start, end, NULL);
698 perf_event_mmap(vma);
713 struct vm_area_struct *vma, *prev;
752 vma = vma_find(&vmi, end);
754 if (!vma)
758 if (vma->vm_start >= end)
760 start = vma->vm_start;
762 if (!(vma->vm_flags & VM_GROWSDOWN))
765 if (vma->vm_start > start)
768 end = vma->vm_end;
770 if (!(vma->vm_flags & VM_GROWSUP))
776 if (start > vma->vm_start)
777 prev = vma;
781 tmp = vma->vm_start;
782 for_each_vma_range(vmi, vma, end) {
787 if (vma->vm_start != tmp) {
793 if (rier && (vma->vm_flags & VM_MAYEXEC))
803 new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey);
805 newflags |= (vma->vm_flags & ~mask_off_old_flags);
813 if (map_deny_write_exec(vma, newflags)) {
824 error = security_file_mprotect(vma, reqprot, prot);
828 tmp = vma->vm_end;
832 if (vma->vm_ops && vma->vm_ops->mprotect) {
833 error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags);
838 error = mprotect_fixup(&vmi, &tlb, vma, &prev, nstart, tmp, newflags);