| /linux/mm/ |
| H A D | userfaultfd.c | 1043 static struct folio *check_ptes_for_batched_move(struct vm_area_struct *src_vma, in check_ptes_for_batched_move() argument 1058 folio = vm_normal_folio(src_vma, src_addr, orig_src_pte); in check_ptes_for_batched_move() 1074 struct vm_area_struct *src_vma, in move_present_ptes() argument 1089 flush_cache_range(src_vma, src_addr, src_end); in move_present_ptes() 1137 src_folio = check_ptes_for_batched_move(src_vma, src_addr, in move_present_ptes() 1145 flush_tlb_range(src_vma, src_start, src_addr); in move_present_ptes() 1220 struct vm_area_struct *src_vma, in move_zeropage_pte() argument 1238 ptep_clear_flush(src_vma, src_addr, src_pte); in move_zeropage_pte() 1253 struct vm_area_struct *src_vma, in move_pages_ptes() argument 1341 ret = move_zeropage_pte(mm, dst_vma, src_vma, in move_pages_ptes() [all …]
|
| H A D | memory.c | 927 struct vm_area_struct *src_vma, unsigned long addr, int *rss) in copy_nonpresent_pte() argument 991 folio_try_dup_anon_rmap_pte(folio, page, dst_vma, src_vma); in copy_nonpresent_pte() 1016 VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags)); in copy_nonpresent_pte() 1017 if (try_restore_exclusive_pte(src_vma, addr, src_pte, orig_pte)) in copy_nonpresent_pte() 1047 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, in copy_present_page() argument 1063 if (copy_mc_user_highpage(&new_folio->page, page, addr, src_vma)) in copy_present_page() 1083 struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte, in __copy_present_ptes() argument 1086 struct mm_struct *src_mm = src_vma->vm_mm; in __copy_present_ptes() 1089 if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) { in __copy_present_ptes() 1095 if (src_vma->vm_flags & VM_SHARED) in __copy_present_ptes() [all …]
|
| H A D | hugetlb.c | 4866 struct vm_area_struct *src_vma) in copy_hugetlb_page_range() argument 4871 bool cow = is_cow_mapping(src_vma->vm_flags); in copy_hugetlb_page_range() 4872 struct hstate *h = hstate_vma(src_vma); in copy_hugetlb_page_range() 4882 src_vma->vm_start, in copy_hugetlb_page_range() 4883 src_vma->vm_end); in copy_hugetlb_page_range() 4885 vma_assert_write_locked(src_vma); in copy_hugetlb_page_range() 4894 hugetlb_vma_lock_read(src_vma); in copy_hugetlb_page_range() 4898 for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) { in copy_hugetlb_page_range() 4900 src_pte = hugetlb_walk(src_vma, addr, sz); in copy_hugetlb_page_range() 4922 entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte); in copy_hugetlb_page_range() [all …]
|
| H A D | huge_memory.c | 1793 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, in copy_huge_non_present_pmd() argument 1835 dst_vma, src_vma); in copy_huge_non_present_pmd() 1848 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) in copy_huge_pmd() argument 1872 VM_WARN_ON_ONCE(is_cow_mapping(src_vma->vm_flags) && pmd_write(pmd)); in copy_huge_pmd() 1894 dst_vma, src_vma, pmd, pgtable); in copy_huge_pmd() 1923 if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, dst_vma, src_vma))) { in copy_huge_pmd() 1929 __split_huge_pmd(src_vma, src_pmd, addr, false); in copy_huge_pmd() 2704 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, in move_pages_huge_pmd() argument 2719 vma_assert_locked(src_vma); in move_pages_huge_pmd() 2753 flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE); in move_pages_huge_pmd() [all …]
|
| /linux/include/linux/ |
| H A D | rmap.h | 635 struct vm_area_struct *src_vma, enum pgtable_level level) in __folio_try_dup_anon_rmap() argument 652 unlikely(folio_needs_cow_for_dma(src_vma, folio)); in __folio_try_dup_anon_rmap() 724 struct vm_area_struct *src_vma) in folio_try_dup_anon_rmap_ptes() argument 727 src_vma, PGTABLE_LEVEL_PTE); in folio_try_dup_anon_rmap_ptes() 732 struct vm_area_struct *src_vma) in folio_try_dup_anon_rmap_pte() argument 734 return __folio_try_dup_anon_rmap(folio, page, 1, dst_vma, src_vma, in folio_try_dup_anon_rmap_pte() 763 struct vm_area_struct *src_vma) in folio_try_dup_anon_rmap_pmd() argument 767 src_vma, PGTABLE_LEVEL_PMD); in folio_try_dup_anon_rmap_pmd()
|
| H A D | userfaultfd_k.h | 141 struct vm_area_struct *src_vma,
|
| H A D | huge_mm.h | 13 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
|
| H A D | hugetlb.h | 331 struct vm_area_struct *src_vma) in copy_hugetlb_page_range() argument
|
| H A D | mm.h | 2635 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
|