Lines Matching defs:vma

46  * Any behaviour which results in changes to the vma->vm_flags needs to
94 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
96 mmap_assert_locked(vma->vm_mm);
98 return vma->anon_name;
102 static int replace_anon_vma_name(struct vm_area_struct *vma,
105 struct anon_vma_name *orig_name = anon_vma_name(vma);
108 vma->anon_name = NULL;
116 vma->anon_name = anon_vma_name_reuse(anon_name);
122 static int replace_anon_vma_name(struct vm_area_struct *vma,
132 * Update the vm_flags on region of a vma, splitting it or merging it as
135 * anon_name belongs to a valid vma because this function might free that vma.
137 static int madvise_update_vma(struct vm_area_struct *vma,
142 struct mm_struct *mm = vma->vm_mm;
146 if (new_flags == vma->vm_flags && anon_vma_name_eq(anon_vma_name(vma), anon_name)) {
147 *prev = vma;
151 vma = vma_modify_flags_name(&vmi, *prev, vma, start, end, new_flags,
153 if (IS_ERR(vma))
154 return PTR_ERR(vma);
156 *prev = vma;
159 vma_start_write(vma);
160 vm_flags_reset(vma, new_flags);
161 if (!vma->vm_file || vma_is_anon_shmem(vma)) {
162 error = replace_anon_vma_name(vma, anon_name);
174 struct vm_area_struct *vma = walk->private;
186 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
202 vma, addr, &splug);
220 static void shmem_swapin_range(struct vm_area_struct *vma,
224 XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start));
225 pgoff_t end_index = linear_page_index(vma, end) - 1;
241 addr = vma->vm_start +
242 ((xas.xa_index - vma->vm_pgoff) << PAGE_SHIFT);
247 vma, addr, &splug);
261 static long madvise_willneed(struct vm_area_struct *vma,
265 struct mm_struct *mm = vma->vm_mm;
266 struct file *file = vma->vm_file;
269 *prev = vma;
272 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma);
278 shmem_swapin_range(vma, start, end, file->f_mapping);
294 * explicitly grab a reference because the vma (and hence the
295 * vma's reference to the file) can go away as soon as we drop
300 offset = (loff_t)(start - vma->vm_start)
301 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
309 static inline bool can_do_file_pageout(struct vm_area_struct *vma)
311 if (!vma->vm_file)
320 file_inode(vma->vm_file)) ||
321 file_permission(vma->vm_file, MAY_WRITE) == 0;
344 struct vm_area_struct *vma = walk->vma;
356 pageout_anon_only_filter = pageout && !vma_is_anonymous(vma) &&
357 !can_do_file_pageout(vma);
365 ptl = pmd_trans_huge_lock(pmd, vma);
403 pmdp_invalidate(vma, addr, pmd);
434 start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
459 folio = vm_normal_folio(vma, addr, ptent);
519 clear_young_dirty_ptes(vma, addr, pte, nr,
562 struct vm_area_struct *vma,
570 tlb_start_vma(tlb, vma);
571 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
572 tlb_end_vma(tlb, vma);
575 static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
577 return !(vma->vm_flags & (VM_LOCKED|VM_PFNMAP|VM_HUGETLB));
580 static long madvise_cold(struct vm_area_struct *vma,
584 struct mm_struct *mm = vma->vm_mm;
587 *prev = vma;
588 if (!can_madv_lru_vma(vma))
593 madvise_cold_page_range(&tlb, vma, start_addr, end_addr);
600 struct vm_area_struct *vma,
608 tlb_start_vma(tlb, vma);
609 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
610 tlb_end_vma(tlb, vma);
613 static long madvise_pageout(struct vm_area_struct *vma,
617 struct mm_struct *mm = vma->vm_mm;
620 *prev = vma;
621 if (!can_madv_lru_vma(vma))
630 if (!vma_is_anonymous(vma) && (!can_do_file_pageout(vma) &&
631 (vma->vm_flags & VM_MAYSHARE)))
636 madvise_pageout_page_range(&tlb, vma, start_addr, end_addr);
649 struct vm_area_struct *vma = walk->vma;
659 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
696 folio = vm_normal_folio(vma, addr, ptent);
767 clear_young_dirty_ptes(vma, addr, pte, nr, cydp_flags);
789 static int madvise_free_single_vma(struct vm_area_struct *vma,
792 struct mm_struct *mm = vma->vm_mm;
796 /* MADV_FREE works for only anon vma at the moment */
797 if (!vma_is_anonymous(vma))
800 range.start = max(vma->vm_start, start_addr);
801 if (range.start >= vma->vm_end)
803 range.end = min(vma->vm_end, end_addr);
804 if (range.end <= vma->vm_start)
814 tlb_start_vma(&tlb, vma);
815 walk_page_range(vma->vm_mm, range.start, range.end,
817 tlb_end_vma(&tlb, vma);
843 static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
846 zap_page_range_single(vma, start, end - start, NULL);
850 static bool madvise_dontneed_free_valid_vma(struct vm_area_struct *vma,
855 if (!is_vm_hugetlb_page(vma)) {
861 return !(vma->vm_flags & forbidden);
866 if (start & ~huge_page_mask(hstate_vma(vma)))
875 *end = ALIGN_DOWN(*end, huge_page_size(hstate_vma(vma)));
880 static long madvise_dontneed_free(struct vm_area_struct *vma,
885 struct mm_struct *mm = vma->vm_mm;
887 *prev = vma;
888 if (!madvise_dontneed_free_valid_vma(vma, start, &end, behavior))
894 if (!userfaultfd_remove(vma, start, end)) {
898 vma = vma_lookup(mm, start);
899 if (!vma)
902 * Potential end adjustment for hugetlb vma is OK as
903 * the check below keeps end within vma.
905 if (!madvise_dontneed_free_valid_vma(vma, start, &end,
908 if (end > vma->vm_end) {
910 * Don't fail if end > vma->vm_end. If the old
911 * vma was split while the mmap_lock was
915 * adjacent next vma that we'll walk
918 * end-vma->vm_end range, but the manager can
921 end = vma->vm_end;
927 return madvise_dontneed_single_vma(vma, start, end);
929 return madvise_free_single_vma(vma, start, end);
975 static long madvise_remove(struct vm_area_struct *vma,
982 struct mm_struct *mm = vma->vm_mm;
986 if (vma->vm_flags & VM_LOCKED)
989 f = vma->vm_file;
995 if (!vma_is_shared_maywrite(vma))
998 offset = (loff_t)(start - vma->vm_start)
999 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
1003 * explicitly grab a reference because the vma (and hence the
1004 * vma's reference to the file) can go away as soon as we drop
1008 if (userfaultfd_remove(vma, start, end)) {
1021 * Apply an madvise behavior to a region of a vma. madvise_update_vma
1025 static int madvise_vma_behavior(struct vm_area_struct *vma,
1032 unsigned long new_flags = vma->vm_flags;
1034 if (unlikely(!can_modify_vma_madv(vma, behavior)))
1039 return madvise_remove(vma, prev, start, end);
1041 return madvise_willneed(vma, prev, start, end);
1043 return madvise_cold(vma, prev, start, end);
1045 return madvise_pageout(vma, prev, start, end);
1049 return madvise_dontneed_free(vma, prev, start, end, behavior);
1063 if (vma->vm_flags & VM_IO)
1069 if (vma->vm_file || vma->vm_flags & VM_SHARED)
1074 if (vma->vm_flags & VM_DROPPABLE)
1082 if ((!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) ||
1083 (vma->vm_flags & VM_DROPPABLE))
1089 error = ksm_madvise(vma, start, end, behavior, &new_flags);
1095 error = hugepage_madvise(vma, &new_flags, behavior);
1100 return madvise_collapse(vma, prev, start, end);
1103 anon_name = anon_vma_name(vma);
1105 error = madvise_update_vma(vma, prev, start, end, new_flags,
1227 * between the current vma and the original range. Any unmapped regions in the
1235 int (*visit)(struct vm_area_struct *vma,
1239 struct vm_area_struct *vma;
1249 vma = find_vma_prev(mm, start, &prev);
1250 if (vma && start > vma->vm_start)
1251 prev = vma;
1257 if (!vma)
1260 /* Here start < (end|vma->vm_end). */
1261 if (start < vma->vm_start) {
1263 start = vma->vm_start;
1268 /* Here vma->vm_start <= start < (end|vma->vm_end) */
1269 tmp = vma->vm_end;
1273 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
1274 error = visit(vma, &prev, start, tmp, arg);
1283 vma = find_vma(mm, prev->vm_end);
1285 vma = find_vma(mm, start);
1292 static int madvise_vma_anon_name(struct vm_area_struct *vma,
1300 if (vma->vm_file && !vma_is_anon_shmem(vma))
1303 error = madvise_update_vma(vma, prev, start, end, vma->vm_flags,