Home
last modified time | relevance | path

Searched refs:src_vma (Results 1 – 8 of 8) sorted by relevance

/linux/mm/
H A Duserfaultfd.c1043 static struct folio *check_ptes_for_batched_move(struct vm_area_struct *src_vma, in check_ptes_for_batched_move() argument
1058 folio = vm_normal_folio(src_vma, src_addr, orig_src_pte); in check_ptes_for_batched_move()
1074 struct vm_area_struct *src_vma, in move_present_ptes() argument
1089 flush_cache_range(src_vma, src_addr, src_end); in move_present_ptes()
1137 src_folio = check_ptes_for_batched_move(src_vma, src_addr, in move_present_ptes()
1145 flush_tlb_range(src_vma, src_start, src_addr); in move_present_ptes()
1216 struct vm_area_struct *src_vma, in move_swap_pte()
1234 ptep_clear_flush(src_vma, src_addr, src_pte); in move_zeropage_pte()
1249 struct vm_area_struct *src_vma,
1337 ret = move_zeropage_pte(mm, dst_vma, src_vma, in move_pages_ptes()
1220 move_zeropage_pte(struct mm_struct * mm,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,unsigned long dst_addr,unsigned long src_addr,pte_t * dst_pte,pte_t * src_pte,pte_t orig_dst_pte,pte_t orig_src_pte,pmd_t * dst_pmd,pmd_t dst_pmdval,spinlock_t * dst_ptl,spinlock_t * src_ptl) move_zeropage_pte() argument
1253 move_pages_ptes(struct mm_struct * mm,pmd_t * dst_pmd,pmd_t * src_pmd,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,unsigned long dst_addr,unsigned long src_addr,unsigned long len,__u64 mode) move_pages_ptes() argument
1541 validate_move_areas(struct userfaultfd_ctx * ctx,struct vm_area_struct * src_vma,struct vm_area_struct * dst_vma) validate_move_areas() argument
1671 uffd_move_unlock(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma) uffd_move_unlock() argument
1696 uffd_move_unlock(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma) uffd_move_unlock() argument
1774 struct vm_area_struct *src_vma, *dst_vma; move_pages() local
[all...]
H A Dmemory.c940 struct vm_area_struct *src_vma, unsigned long addr, int *rss) in copy_nonpresent_pte() argument
1004 folio_try_dup_anon_rmap_pte(folio, page, dst_vma, src_vma); in copy_nonpresent_pte()
1029 VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags)); in copy_nonpresent_pte()
1030 if (try_restore_exclusive_pte(src_vma, addr, src_pte, orig_pte)) in copy_nonpresent_pte()
1060 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, in copy_present_page() argument
1076 if (copy_mc_user_highpage(&new_folio->page, page, addr, src_vma)) in copy_present_page()
1096 struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte, in __copy_present_ptes() argument
1099 struct mm_struct *src_mm = src_vma->vm_mm; in __copy_present_ptes()
1102 if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) { in __copy_present_ptes()
1108 if (src_vma->vm_flags & VM_SHARED) in __copy_present_ptes()
[all …]
H A Dhugetlb.c4887 struct vm_area_struct *src_vma) in copy_hugetlb_page_range() argument
4892 bool cow = is_cow_mapping(src_vma->vm_flags); in copy_hugetlb_page_range()
4893 struct hstate *h = hstate_vma(src_vma); in copy_hugetlb_page_range()
4903 src_vma->vm_start, in copy_hugetlb_page_range()
4904 src_vma->vm_end); in copy_hugetlb_page_range()
4906 vma_assert_write_locked(src_vma); in copy_hugetlb_page_range()
4915 hugetlb_vma_lock_read(src_vma); in copy_hugetlb_page_range()
4919 for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) { in copy_hugetlb_page_range()
4921 src_pte = hugetlb_walk(src_vma, addr, sz); in copy_hugetlb_page_range()
4943 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); in copy_hugetlb_page_range()
[all …]
H A Dhuge_memory.c1793 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, in copy_huge_non_present_pmd() argument
1835 dst_vma, src_vma); in copy_huge_non_present_pmd()
1848 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) in copy_huge_pmd() argument
1872 VM_WARN_ON_ONCE(is_cow_mapping(src_vma->vm_flags) && pmd_write(pmd)); in copy_huge_pmd()
1894 dst_vma, src_vma, pmd, pgtable); in copy_huge_pmd()
1923 if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, dst_vma, src_vma))) { in copy_huge_pmd()
1929 __split_huge_pmd(src_vma, src_pmd, addr, false); in copy_huge_pmd()
2704 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, in move_pages_huge_pmd() argument
2719 vma_assert_locked(src_vma); in move_pages_huge_pmd()
2753 flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE); in move_pages_huge_pmd()
[all …]
/linux/include/linux/
H A Duserfaultfd_k.h141 struct vm_area_struct *src_vma,
H A Dhuge_mm.h13 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
H A Dhugetlb.h336 struct vm_area_struct *src_vma) in copy_hugetlb_page_range() argument
H A Dmm.h2849 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);