120c8ccb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2c1a4de99SAndrea Arcangeli /* 3c1a4de99SAndrea Arcangeli * mm/userfaultfd.c 4c1a4de99SAndrea Arcangeli * 5c1a4de99SAndrea Arcangeli * Copyright (C) 2015 Red Hat, Inc. 6c1a4de99SAndrea Arcangeli */ 7c1a4de99SAndrea Arcangeli 8c1a4de99SAndrea Arcangeli #include <linux/mm.h> 9174cd4b1SIngo Molnar #include <linux/sched/signal.h> 10c1a4de99SAndrea Arcangeli #include <linux/pagemap.h> 11c1a4de99SAndrea Arcangeli #include <linux/rmap.h> 12c1a4de99SAndrea Arcangeli #include <linux/swap.h> 13c1a4de99SAndrea Arcangeli #include <linux/swapops.h> 14c1a4de99SAndrea Arcangeli #include <linux/userfaultfd_k.h> 15c1a4de99SAndrea Arcangeli #include <linux/mmu_notifier.h> 1660d4d2d2SMike Kravetz #include <linux/hugetlb.h> 1726071cedSMike Rapoport #include <linux/shmem_fs.h> 18c1a4de99SAndrea Arcangeli #include <asm/tlbflush.h> 19c1a4de99SAndrea Arcangeli #include "internal.h" 20c1a4de99SAndrea Arcangeli 21643aa36eSWei Yang static __always_inline 22643aa36eSWei Yang struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm, 23643aa36eSWei Yang unsigned long dst_start, 24643aa36eSWei Yang unsigned long len) 25643aa36eSWei Yang { 26643aa36eSWei Yang /* 27643aa36eSWei Yang * Make sure that the dst range is both valid and fully within a 28643aa36eSWei Yang * single existing vma. 29643aa36eSWei Yang */ 30643aa36eSWei Yang struct vm_area_struct *dst_vma; 31643aa36eSWei Yang 32643aa36eSWei Yang dst_vma = find_vma(dst_mm, dst_start); 33643aa36eSWei Yang if (!dst_vma) 34643aa36eSWei Yang return NULL; 35643aa36eSWei Yang 36643aa36eSWei Yang if (dst_start < dst_vma->vm_start || 37643aa36eSWei Yang dst_start + len > dst_vma->vm_end) 38643aa36eSWei Yang return NULL; 39643aa36eSWei Yang 40643aa36eSWei Yang /* 41643aa36eSWei Yang * Check the vma is registered in uffd, this is required to 42643aa36eSWei Yang * enforce the VM_MAYWRITE check done at uffd registration 43643aa36eSWei Yang * time. 44643aa36eSWei Yang */ 45643aa36eSWei Yang if (!dst_vma->vm_userfaultfd_ctx.ctx) 46643aa36eSWei Yang return NULL; 47643aa36eSWei Yang 48643aa36eSWei Yang return dst_vma; 49643aa36eSWei Yang } 50643aa36eSWei Yang 5115313257SAxel Rasmussen /* 5215313257SAxel Rasmussen * Install PTEs, to map dst_addr (within dst_vma) to page. 5315313257SAxel Rasmussen * 547d64ae3aSAxel Rasmussen * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem 557d64ae3aSAxel Rasmussen * and anon, and for both shared and private VMAs. 5615313257SAxel Rasmussen */ 577d64ae3aSAxel Rasmussen int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, 5815313257SAxel Rasmussen struct vm_area_struct *dst_vma, 5915313257SAxel Rasmussen unsigned long dst_addr, struct page *page, 6015313257SAxel Rasmussen bool newly_allocated, bool wp_copy) 6115313257SAxel Rasmussen { 6215313257SAxel Rasmussen int ret; 6315313257SAxel Rasmussen pte_t _dst_pte, *dst_pte; 6415313257SAxel Rasmussen bool writable = dst_vma->vm_flags & VM_WRITE; 6515313257SAxel Rasmussen bool vm_shared = dst_vma->vm_flags & VM_SHARED; 6615313257SAxel Rasmussen bool page_in_cache = page->mapping; 6715313257SAxel Rasmussen spinlock_t *ptl; 6815313257SAxel Rasmussen struct inode *inode; 6915313257SAxel Rasmussen pgoff_t offset, max_off; 7015313257SAxel Rasmussen 7115313257SAxel Rasmussen _dst_pte = mk_pte(page, dst_vma->vm_page_prot); 729ae0f87dSPeter Xu _dst_pte = pte_mkdirty(_dst_pte); 7315313257SAxel Rasmussen if (page_in_cache && !vm_shared) 7415313257SAxel Rasmussen writable = false; 7515313257SAxel Rasmussen if (writable) { 7615313257SAxel Rasmussen if (wp_copy) 7715313257SAxel Rasmussen _dst_pte = pte_mkuffd_wp(_dst_pte); 7815313257SAxel Rasmussen else 7915313257SAxel Rasmussen _dst_pte = pte_mkwrite(_dst_pte); 8015313257SAxel Rasmussen } 8115313257SAxel Rasmussen 8215313257SAxel Rasmussen dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 8315313257SAxel Rasmussen 8415313257SAxel Rasmussen if (vma_is_shmem(dst_vma)) { 8515313257SAxel Rasmussen /* serialize against truncate with the page table lock */ 8615313257SAxel Rasmussen inode = dst_vma->vm_file->f_inode; 8715313257SAxel Rasmussen offset = linear_page_index(dst_vma, dst_addr); 8815313257SAxel Rasmussen max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 8915313257SAxel Rasmussen ret = -EFAULT; 9015313257SAxel Rasmussen if (unlikely(offset >= max_off)) 9115313257SAxel Rasmussen goto out_unlock; 9215313257SAxel Rasmussen } 9315313257SAxel Rasmussen 9415313257SAxel Rasmussen ret = -EEXIST; 9515313257SAxel Rasmussen if (!pte_none(*dst_pte)) 9615313257SAxel Rasmussen goto out_unlock; 9715313257SAxel Rasmussen 98*cea86fe2SHugh Dickins if (page_in_cache) { 99*cea86fe2SHugh Dickins /* Usually, cache pages are already added to LRU */ 100*cea86fe2SHugh Dickins if (newly_allocated) 101*cea86fe2SHugh Dickins lru_cache_add(page); 102*cea86fe2SHugh Dickins page_add_file_rmap(page, dst_vma, false); 103*cea86fe2SHugh Dickins } else { 10415313257SAxel Rasmussen page_add_new_anon_rmap(page, dst_vma, dst_addr, false); 105*cea86fe2SHugh Dickins lru_cache_add_inactive_or_unevictable(page, dst_vma); 106*cea86fe2SHugh Dickins } 10715313257SAxel Rasmussen 10815313257SAxel Rasmussen /* 10915313257SAxel Rasmussen * Must happen after rmap, as mm_counter() checks mapping (via 11015313257SAxel Rasmussen * PageAnon()), which is set by __page_set_anon_rmap(). 11115313257SAxel Rasmussen */ 11215313257SAxel Rasmussen inc_mm_counter(dst_mm, mm_counter(page)); 11315313257SAxel Rasmussen 11415313257SAxel Rasmussen set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 11515313257SAxel Rasmussen 11615313257SAxel Rasmussen /* No need to invalidate - it was non-present before */ 11715313257SAxel Rasmussen update_mmu_cache(dst_vma, dst_addr, dst_pte); 11815313257SAxel Rasmussen ret = 0; 11915313257SAxel Rasmussen out_unlock: 12015313257SAxel Rasmussen pte_unmap_unlock(dst_pte, ptl); 12115313257SAxel Rasmussen return ret; 12215313257SAxel Rasmussen } 12315313257SAxel Rasmussen 124c1a4de99SAndrea Arcangeli static int mcopy_atomic_pte(struct mm_struct *dst_mm, 125c1a4de99SAndrea Arcangeli pmd_t *dst_pmd, 126c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma, 127c1a4de99SAndrea Arcangeli unsigned long dst_addr, 128b6ebaedbSAndrea Arcangeli unsigned long src_addr, 12972981e0eSAndrea Arcangeli struct page **pagep, 13072981e0eSAndrea Arcangeli bool wp_copy) 131c1a4de99SAndrea Arcangeli { 132c1a4de99SAndrea Arcangeli void *page_kaddr; 133c1a4de99SAndrea Arcangeli int ret; 134b6ebaedbSAndrea Arcangeli struct page *page; 135c1a4de99SAndrea Arcangeli 136b6ebaedbSAndrea Arcangeli if (!*pagep) { 137c1a4de99SAndrea Arcangeli ret = -ENOMEM; 138c1a4de99SAndrea Arcangeli page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr); 139c1a4de99SAndrea Arcangeli if (!page) 140c1a4de99SAndrea Arcangeli goto out; 141c1a4de99SAndrea Arcangeli 142b6ebaedbSAndrea Arcangeli page_kaddr = kmap_atomic(page); 143b6ebaedbSAndrea Arcangeli ret = copy_from_user(page_kaddr, 144b6ebaedbSAndrea Arcangeli (const void __user *) src_addr, 145b6ebaedbSAndrea Arcangeli PAGE_SIZE); 146b6ebaedbSAndrea Arcangeli kunmap_atomic(page_kaddr); 147b6ebaedbSAndrea Arcangeli 148c1e8d7c6SMichel Lespinasse /* fallback to copy_from_user outside mmap_lock */ 149b6ebaedbSAndrea Arcangeli if (unlikely(ret)) { 1509e368259SAndrea Arcangeli ret = -ENOENT; 151b6ebaedbSAndrea Arcangeli *pagep = page; 152b6ebaedbSAndrea Arcangeli /* don't free the page */ 153b6ebaedbSAndrea Arcangeli goto out; 154b6ebaedbSAndrea Arcangeli } 155b6ebaedbSAndrea Arcangeli } else { 156b6ebaedbSAndrea Arcangeli page = *pagep; 157b6ebaedbSAndrea Arcangeli *pagep = NULL; 158b6ebaedbSAndrea Arcangeli } 159c1a4de99SAndrea Arcangeli 160c1a4de99SAndrea Arcangeli /* 161c1a4de99SAndrea Arcangeli * The memory barrier inside __SetPageUptodate makes sure that 162f4f5329dSWei Yang * preceding stores to the page contents become visible before 163c1a4de99SAndrea Arcangeli * the set_pte_at() write. 164c1a4de99SAndrea Arcangeli */ 165c1a4de99SAndrea Arcangeli __SetPageUptodate(page); 166c1a4de99SAndrea Arcangeli 167c1a4de99SAndrea Arcangeli ret = -ENOMEM; 1688f425e4eSMatthew Wilcox (Oracle) if (mem_cgroup_charge(page_folio(page), dst_mm, GFP_KERNEL)) 169c1a4de99SAndrea Arcangeli goto out_release; 170c1a4de99SAndrea Arcangeli 17115313257SAxel Rasmussen ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr, 17215313257SAxel Rasmussen page, true, wp_copy); 17315313257SAxel Rasmussen if (ret) 17415313257SAxel Rasmussen goto out_release; 175c1a4de99SAndrea Arcangeli out: 176c1a4de99SAndrea Arcangeli return ret; 177c1a4de99SAndrea Arcangeli out_release: 17809cbfeafSKirill A. Shutemov put_page(page); 179c1a4de99SAndrea Arcangeli goto out; 180c1a4de99SAndrea Arcangeli } 181c1a4de99SAndrea Arcangeli 182c1a4de99SAndrea Arcangeli static int mfill_zeropage_pte(struct mm_struct *dst_mm, 183c1a4de99SAndrea Arcangeli pmd_t *dst_pmd, 184c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma, 185c1a4de99SAndrea Arcangeli unsigned long dst_addr) 186c1a4de99SAndrea Arcangeli { 187c1a4de99SAndrea Arcangeli pte_t _dst_pte, *dst_pte; 188c1a4de99SAndrea Arcangeli spinlock_t *ptl; 189c1a4de99SAndrea Arcangeli int ret; 190e2a50c1fSAndrea Arcangeli pgoff_t offset, max_off; 191e2a50c1fSAndrea Arcangeli struct inode *inode; 192c1a4de99SAndrea Arcangeli 193c1a4de99SAndrea Arcangeli _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), 194c1a4de99SAndrea Arcangeli dst_vma->vm_page_prot)); 195c1a4de99SAndrea Arcangeli dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 196e2a50c1fSAndrea Arcangeli if (dst_vma->vm_file) { 197e2a50c1fSAndrea Arcangeli /* the shmem MAP_PRIVATE case requires checking the i_size */ 198e2a50c1fSAndrea Arcangeli inode = dst_vma->vm_file->f_inode; 199e2a50c1fSAndrea Arcangeli offset = linear_page_index(dst_vma, dst_addr); 200e2a50c1fSAndrea Arcangeli max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 201e2a50c1fSAndrea Arcangeli ret = -EFAULT; 202e2a50c1fSAndrea Arcangeli if (unlikely(offset >= max_off)) 203e2a50c1fSAndrea Arcangeli goto out_unlock; 204e2a50c1fSAndrea Arcangeli } 205e2a50c1fSAndrea Arcangeli ret = -EEXIST; 206c1a4de99SAndrea Arcangeli if (!pte_none(*dst_pte)) 207c1a4de99SAndrea Arcangeli goto out_unlock; 208c1a4de99SAndrea Arcangeli set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 209c1a4de99SAndrea Arcangeli /* No need to invalidate - it was non-present before */ 210c1a4de99SAndrea Arcangeli update_mmu_cache(dst_vma, dst_addr, dst_pte); 211c1a4de99SAndrea Arcangeli ret = 0; 212c1a4de99SAndrea Arcangeli out_unlock: 213c1a4de99SAndrea Arcangeli pte_unmap_unlock(dst_pte, ptl); 214c1a4de99SAndrea Arcangeli return ret; 215c1a4de99SAndrea Arcangeli } 216c1a4de99SAndrea Arcangeli 21715313257SAxel Rasmussen /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */ 21815313257SAxel Rasmussen static int mcontinue_atomic_pte(struct mm_struct *dst_mm, 21915313257SAxel Rasmussen pmd_t *dst_pmd, 22015313257SAxel Rasmussen struct vm_area_struct *dst_vma, 22115313257SAxel Rasmussen unsigned long dst_addr, 22215313257SAxel Rasmussen bool wp_copy) 22315313257SAxel Rasmussen { 22415313257SAxel Rasmussen struct inode *inode = file_inode(dst_vma->vm_file); 22515313257SAxel Rasmussen pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); 22615313257SAxel Rasmussen struct page *page; 22715313257SAxel Rasmussen int ret; 22815313257SAxel Rasmussen 22915313257SAxel Rasmussen ret = shmem_getpage(inode, pgoff, &page, SGP_READ); 23015313257SAxel Rasmussen if (ret) 23115313257SAxel Rasmussen goto out; 23215313257SAxel Rasmussen if (!page) { 23315313257SAxel Rasmussen ret = -EFAULT; 23415313257SAxel Rasmussen goto out; 23515313257SAxel Rasmussen } 23615313257SAxel Rasmussen 237a7605426SYang Shi if (PageHWPoison(page)) { 238a7605426SYang Shi ret = -EIO; 239a7605426SYang Shi goto out_release; 240a7605426SYang Shi } 241a7605426SYang Shi 24215313257SAxel Rasmussen ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr, 24315313257SAxel Rasmussen page, false, wp_copy); 24415313257SAxel Rasmussen if (ret) 24515313257SAxel Rasmussen goto out_release; 24615313257SAxel Rasmussen 24715313257SAxel Rasmussen unlock_page(page); 24815313257SAxel Rasmussen ret = 0; 24915313257SAxel Rasmussen out: 25015313257SAxel Rasmussen return ret; 25115313257SAxel Rasmussen out_release: 25215313257SAxel Rasmussen unlock_page(page); 25315313257SAxel Rasmussen put_page(page); 25415313257SAxel Rasmussen goto out; 25515313257SAxel Rasmussen } 25615313257SAxel Rasmussen 257c1a4de99SAndrea Arcangeli static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) 258c1a4de99SAndrea Arcangeli { 259c1a4de99SAndrea Arcangeli pgd_t *pgd; 260c2febafcSKirill A. Shutemov p4d_t *p4d; 261c1a4de99SAndrea Arcangeli pud_t *pud; 262c1a4de99SAndrea Arcangeli 263c1a4de99SAndrea Arcangeli pgd = pgd_offset(mm, address); 264c2febafcSKirill A. Shutemov p4d = p4d_alloc(mm, pgd, address); 265c2febafcSKirill A. Shutemov if (!p4d) 266c2febafcSKirill A. Shutemov return NULL; 267c2febafcSKirill A. Shutemov pud = pud_alloc(mm, p4d, address); 268c2febafcSKirill A. Shutemov if (!pud) 269c2febafcSKirill A. Shutemov return NULL; 270c1a4de99SAndrea Arcangeli /* 271c1a4de99SAndrea Arcangeli * Note that we didn't run this because the pmd was 272c1a4de99SAndrea Arcangeli * missing, the *pmd may be already established and in 273c1a4de99SAndrea Arcangeli * turn it may also be a trans_huge_pmd. 274c1a4de99SAndrea Arcangeli */ 275c2febafcSKirill A. Shutemov return pmd_alloc(mm, pud, address); 276c1a4de99SAndrea Arcangeli } 277c1a4de99SAndrea Arcangeli 27860d4d2d2SMike Kravetz #ifdef CONFIG_HUGETLB_PAGE 27960d4d2d2SMike Kravetz /* 28060d4d2d2SMike Kravetz * __mcopy_atomic processing for HUGETLB vmas. Note that this routine is 281c1e8d7c6SMichel Lespinasse * called with mmap_lock held, it will release mmap_lock before returning. 28260d4d2d2SMike Kravetz */ 28360d4d2d2SMike Kravetz static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, 28460d4d2d2SMike Kravetz struct vm_area_struct *dst_vma, 28560d4d2d2SMike Kravetz unsigned long dst_start, 28660d4d2d2SMike Kravetz unsigned long src_start, 28760d4d2d2SMike Kravetz unsigned long len, 288f6191471SAxel Rasmussen enum mcopy_atomic_mode mode) 28960d4d2d2SMike Kravetz { 2901c9e8defSMike Kravetz int vm_shared = dst_vma->vm_flags & VM_SHARED; 29160d4d2d2SMike Kravetz ssize_t err; 29260d4d2d2SMike Kravetz pte_t *dst_pte; 29360d4d2d2SMike Kravetz unsigned long src_addr, dst_addr; 29460d4d2d2SMike Kravetz long copied; 29560d4d2d2SMike Kravetz struct page *page; 29660d4d2d2SMike Kravetz unsigned long vma_hpagesize; 29760d4d2d2SMike Kravetz pgoff_t idx; 29860d4d2d2SMike Kravetz u32 hash; 29960d4d2d2SMike Kravetz struct address_space *mapping; 30060d4d2d2SMike Kravetz 30160d4d2d2SMike Kravetz /* 30260d4d2d2SMike Kravetz * There is no default zero huge page for all huge page sizes as 30360d4d2d2SMike Kravetz * supported by hugetlb. A PMD_SIZE huge pages may exist as used 30460d4d2d2SMike Kravetz * by THP. Since we can not reliably insert a zero page, this 30560d4d2d2SMike Kravetz * feature is not supported. 30660d4d2d2SMike Kravetz */ 307f6191471SAxel Rasmussen if (mode == MCOPY_ATOMIC_ZEROPAGE) { 308d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 30960d4d2d2SMike Kravetz return -EINVAL; 31060d4d2d2SMike Kravetz } 31160d4d2d2SMike Kravetz 31260d4d2d2SMike Kravetz src_addr = src_start; 31360d4d2d2SMike Kravetz dst_addr = dst_start; 31460d4d2d2SMike Kravetz copied = 0; 31560d4d2d2SMike Kravetz page = NULL; 31660d4d2d2SMike Kravetz vma_hpagesize = vma_kernel_pagesize(dst_vma); 31760d4d2d2SMike Kravetz 31860d4d2d2SMike Kravetz /* 31960d4d2d2SMike Kravetz * Validate alignment based on huge page size 32060d4d2d2SMike Kravetz */ 32160d4d2d2SMike Kravetz err = -EINVAL; 32260d4d2d2SMike Kravetz if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1)) 32360d4d2d2SMike Kravetz goto out_unlock; 32460d4d2d2SMike Kravetz 32560d4d2d2SMike Kravetz retry: 32660d4d2d2SMike Kravetz /* 327c1e8d7c6SMichel Lespinasse * On routine entry dst_vma is set. If we had to drop mmap_lock and 32860d4d2d2SMike Kravetz * retry, dst_vma will be set to NULL and we must lookup again. 32960d4d2d2SMike Kravetz */ 33060d4d2d2SMike Kravetz if (!dst_vma) { 33127d02568SMike Rapoport err = -ENOENT; 332643aa36eSWei Yang dst_vma = find_dst_vma(dst_mm, dst_start, len); 33360d4d2d2SMike Kravetz if (!dst_vma || !is_vm_hugetlb_page(dst_vma)) 33460d4d2d2SMike Kravetz goto out_unlock; 3351c9e8defSMike Kravetz 33627d02568SMike Rapoport err = -EINVAL; 33727d02568SMike Rapoport if (vma_hpagesize != vma_kernel_pagesize(dst_vma)) 33827d02568SMike Rapoport goto out_unlock; 33927d02568SMike Rapoport 3401c9e8defSMike Kravetz vm_shared = dst_vma->vm_flags & VM_SHARED; 34160d4d2d2SMike Kravetz } 34260d4d2d2SMike Kravetz 34360d4d2d2SMike Kravetz /* 3441c9e8defSMike Kravetz * If not shared, ensure the dst_vma has a anon_vma. 34560d4d2d2SMike Kravetz */ 34660d4d2d2SMike Kravetz err = -ENOMEM; 3471c9e8defSMike Kravetz if (!vm_shared) { 34860d4d2d2SMike Kravetz if (unlikely(anon_vma_prepare(dst_vma))) 34960d4d2d2SMike Kravetz goto out_unlock; 3501c9e8defSMike Kravetz } 35160d4d2d2SMike Kravetz 35260d4d2d2SMike Kravetz while (src_addr < src_start + len) { 35360d4d2d2SMike Kravetz BUG_ON(dst_addr >= dst_start + len); 35460d4d2d2SMike Kravetz 35560d4d2d2SMike Kravetz /* 356c0d0381aSMike Kravetz * Serialize via i_mmap_rwsem and hugetlb_fault_mutex. 357c0d0381aSMike Kravetz * i_mmap_rwsem ensures the dst_pte remains valid even 358c0d0381aSMike Kravetz * in the case of shared pmds. fault mutex prevents 359c0d0381aSMike Kravetz * races with other faulting threads. 36060d4d2d2SMike Kravetz */ 361ddeaab32SMike Kravetz mapping = dst_vma->vm_file->f_mapping; 362c0d0381aSMike Kravetz i_mmap_lock_read(mapping); 363c0d0381aSMike Kravetz idx = linear_page_index(dst_vma, dst_addr); 364188b04a7SWei Yang hash = hugetlb_fault_mutex_hash(mapping, idx); 36560d4d2d2SMike Kravetz mutex_lock(&hugetlb_fault_mutex_table[hash]); 36660d4d2d2SMike Kravetz 36760d4d2d2SMike Kravetz err = -ENOMEM; 368aec44e0fSPeter Xu dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize); 36960d4d2d2SMike Kravetz if (!dst_pte) { 37060d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 371c0d0381aSMike Kravetz i_mmap_unlock_read(mapping); 37260d4d2d2SMike Kravetz goto out_unlock; 37360d4d2d2SMike Kravetz } 37460d4d2d2SMike Kravetz 375f6191471SAxel Rasmussen if (mode != MCOPY_ATOMIC_CONTINUE && 376f6191471SAxel Rasmussen !huge_pte_none(huge_ptep_get(dst_pte))) { 37760d4d2d2SMike Kravetz err = -EEXIST; 37860d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 379c0d0381aSMike Kravetz i_mmap_unlock_read(mapping); 38060d4d2d2SMike Kravetz goto out_unlock; 38160d4d2d2SMike Kravetz } 38260d4d2d2SMike Kravetz 38360d4d2d2SMike Kravetz err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, 384f6191471SAxel Rasmussen dst_addr, src_addr, mode, &page); 38560d4d2d2SMike Kravetz 38660d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 387c0d0381aSMike Kravetz i_mmap_unlock_read(mapping); 38860d4d2d2SMike Kravetz 38960d4d2d2SMike Kravetz cond_resched(); 39060d4d2d2SMike Kravetz 3919e368259SAndrea Arcangeli if (unlikely(err == -ENOENT)) { 392d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 39360d4d2d2SMike Kravetz BUG_ON(!page); 39460d4d2d2SMike Kravetz 39560d4d2d2SMike Kravetz err = copy_huge_page_from_user(page, 39660d4d2d2SMike Kravetz (const void __user *)src_addr, 3974fb07ee6SWei Yang vma_hpagesize / PAGE_SIZE, 3984fb07ee6SWei Yang true); 39960d4d2d2SMike Kravetz if (unlikely(err)) { 40060d4d2d2SMike Kravetz err = -EFAULT; 40160d4d2d2SMike Kravetz goto out; 40260d4d2d2SMike Kravetz } 403d8ed45c5SMichel Lespinasse mmap_read_lock(dst_mm); 40460d4d2d2SMike Kravetz 40560d4d2d2SMike Kravetz dst_vma = NULL; 40660d4d2d2SMike Kravetz goto retry; 40760d4d2d2SMike Kravetz } else 40860d4d2d2SMike Kravetz BUG_ON(page); 40960d4d2d2SMike Kravetz 41060d4d2d2SMike Kravetz if (!err) { 41160d4d2d2SMike Kravetz dst_addr += vma_hpagesize; 41260d4d2d2SMike Kravetz src_addr += vma_hpagesize; 41360d4d2d2SMike Kravetz copied += vma_hpagesize; 41460d4d2d2SMike Kravetz 41560d4d2d2SMike Kravetz if (fatal_signal_pending(current)) 41660d4d2d2SMike Kravetz err = -EINTR; 41760d4d2d2SMike Kravetz } 41860d4d2d2SMike Kravetz if (err) 41960d4d2d2SMike Kravetz break; 42060d4d2d2SMike Kravetz } 42160d4d2d2SMike Kravetz 42260d4d2d2SMike Kravetz out_unlock: 423d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 42460d4d2d2SMike Kravetz out: 4258cc5fcbbSMina Almasry if (page) 42660d4d2d2SMike Kravetz put_page(page); 42760d4d2d2SMike Kravetz BUG_ON(copied < 0); 42860d4d2d2SMike Kravetz BUG_ON(err > 0); 42960d4d2d2SMike Kravetz BUG_ON(!copied && !err); 43060d4d2d2SMike Kravetz return copied ? copied : err; 43160d4d2d2SMike Kravetz } 43260d4d2d2SMike Kravetz #else /* !CONFIG_HUGETLB_PAGE */ 43360d4d2d2SMike Kravetz /* fail at build time if gcc attempts to use this */ 43460d4d2d2SMike Kravetz extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, 43560d4d2d2SMike Kravetz struct vm_area_struct *dst_vma, 43660d4d2d2SMike Kravetz unsigned long dst_start, 43760d4d2d2SMike Kravetz unsigned long src_start, 43860d4d2d2SMike Kravetz unsigned long len, 439f6191471SAxel Rasmussen enum mcopy_atomic_mode mode); 44060d4d2d2SMike Kravetz #endif /* CONFIG_HUGETLB_PAGE */ 44160d4d2d2SMike Kravetz 4423217d3c7SMike Rapoport static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm, 4433217d3c7SMike Rapoport pmd_t *dst_pmd, 4443217d3c7SMike Rapoport struct vm_area_struct *dst_vma, 4453217d3c7SMike Rapoport unsigned long dst_addr, 4463217d3c7SMike Rapoport unsigned long src_addr, 4473217d3c7SMike Rapoport struct page **page, 44815313257SAxel Rasmussen enum mcopy_atomic_mode mode, 44972981e0eSAndrea Arcangeli bool wp_copy) 4503217d3c7SMike Rapoport { 4513217d3c7SMike Rapoport ssize_t err; 4523217d3c7SMike Rapoport 45315313257SAxel Rasmussen if (mode == MCOPY_ATOMIC_CONTINUE) { 45415313257SAxel Rasmussen return mcontinue_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, 45515313257SAxel Rasmussen wp_copy); 45615313257SAxel Rasmussen } 45715313257SAxel Rasmussen 4585b51072eSAndrea Arcangeli /* 4595b51072eSAndrea Arcangeli * The normal page fault path for a shmem will invoke the 4605b51072eSAndrea Arcangeli * fault, fill the hole in the file and COW it right away. The 4615b51072eSAndrea Arcangeli * result generates plain anonymous memory. So when we are 4625b51072eSAndrea Arcangeli * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll 4635b51072eSAndrea Arcangeli * generate anonymous memory directly without actually filling 4645b51072eSAndrea Arcangeli * the hole. For the MAP_PRIVATE case the robustness check 4655b51072eSAndrea Arcangeli * only happens in the pagetable (to verify it's still none) 4665b51072eSAndrea Arcangeli * and not in the radix tree. 4675b51072eSAndrea Arcangeli */ 4685b51072eSAndrea Arcangeli if (!(dst_vma->vm_flags & VM_SHARED)) { 46915313257SAxel Rasmussen if (mode == MCOPY_ATOMIC_NORMAL) 4703217d3c7SMike Rapoport err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma, 47172981e0eSAndrea Arcangeli dst_addr, src_addr, page, 47272981e0eSAndrea Arcangeli wp_copy); 4733217d3c7SMike Rapoport else 4743217d3c7SMike Rapoport err = mfill_zeropage_pte(dst_mm, dst_pmd, 4753217d3c7SMike Rapoport dst_vma, dst_addr); 4763217d3c7SMike Rapoport } else { 47772981e0eSAndrea Arcangeli VM_WARN_ON_ONCE(wp_copy); 4783460f6e5SAxel Rasmussen err = shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, 47915313257SAxel Rasmussen dst_addr, src_addr, 48015313257SAxel Rasmussen mode != MCOPY_ATOMIC_NORMAL, 4813460f6e5SAxel Rasmussen page); 4823217d3c7SMike Rapoport } 4833217d3c7SMike Rapoport 4843217d3c7SMike Rapoport return err; 4853217d3c7SMike Rapoport } 4863217d3c7SMike Rapoport 487c1a4de99SAndrea Arcangeli static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, 488c1a4de99SAndrea Arcangeli unsigned long dst_start, 489c1a4de99SAndrea Arcangeli unsigned long src_start, 490c1a4de99SAndrea Arcangeli unsigned long len, 491f6191471SAxel Rasmussen enum mcopy_atomic_mode mcopy_mode, 492a759a909SNadav Amit atomic_t *mmap_changing, 49372981e0eSAndrea Arcangeli __u64 mode) 494c1a4de99SAndrea Arcangeli { 495c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma; 496c1a4de99SAndrea Arcangeli ssize_t err; 497c1a4de99SAndrea Arcangeli pmd_t *dst_pmd; 498c1a4de99SAndrea Arcangeli unsigned long src_addr, dst_addr; 499b6ebaedbSAndrea Arcangeli long copied; 500b6ebaedbSAndrea Arcangeli struct page *page; 50172981e0eSAndrea Arcangeli bool wp_copy; 502c1a4de99SAndrea Arcangeli 503c1a4de99SAndrea Arcangeli /* 504c1a4de99SAndrea Arcangeli * Sanitize the command parameters: 505c1a4de99SAndrea Arcangeli */ 506c1a4de99SAndrea Arcangeli BUG_ON(dst_start & ~PAGE_MASK); 507c1a4de99SAndrea Arcangeli BUG_ON(len & ~PAGE_MASK); 508c1a4de99SAndrea Arcangeli 509c1a4de99SAndrea Arcangeli /* Does the address range wrap, or is the span zero-sized? */ 510c1a4de99SAndrea Arcangeli BUG_ON(src_start + len <= src_start); 511c1a4de99SAndrea Arcangeli BUG_ON(dst_start + len <= dst_start); 512c1a4de99SAndrea Arcangeli 513b6ebaedbSAndrea Arcangeli src_addr = src_start; 514b6ebaedbSAndrea Arcangeli dst_addr = dst_start; 515b6ebaedbSAndrea Arcangeli copied = 0; 516b6ebaedbSAndrea Arcangeli page = NULL; 517b6ebaedbSAndrea Arcangeli retry: 518d8ed45c5SMichel Lespinasse mmap_read_lock(dst_mm); 519c1a4de99SAndrea Arcangeli 520c1a4de99SAndrea Arcangeli /* 521df2cc96eSMike Rapoport * If memory mappings are changing because of non-cooperative 522df2cc96eSMike Rapoport * operation (e.g. mremap) running in parallel, bail out and 523df2cc96eSMike Rapoport * request the user to retry later 524df2cc96eSMike Rapoport */ 525df2cc96eSMike Rapoport err = -EAGAIN; 526a759a909SNadav Amit if (mmap_changing && atomic_read(mmap_changing)) 527df2cc96eSMike Rapoport goto out_unlock; 528df2cc96eSMike Rapoport 529df2cc96eSMike Rapoport /* 530c1a4de99SAndrea Arcangeli * Make sure the vma is not shared, that the dst range is 531c1a4de99SAndrea Arcangeli * both valid and fully within a single existing vma. 532c1a4de99SAndrea Arcangeli */ 53327d02568SMike Rapoport err = -ENOENT; 534643aa36eSWei Yang dst_vma = find_dst_vma(dst_mm, dst_start, len); 53526071cedSMike Rapoport if (!dst_vma) 53626071cedSMike Rapoport goto out_unlock; 53727d02568SMike Rapoport 53827d02568SMike Rapoport err = -EINVAL; 53927d02568SMike Rapoport /* 54027d02568SMike Rapoport * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but 54127d02568SMike Rapoport * it will overwrite vm_ops, so vma_is_anonymous must return false. 54227d02568SMike Rapoport */ 54327d02568SMike Rapoport if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) && 54427d02568SMike Rapoport dst_vma->vm_flags & VM_SHARED)) 54527d02568SMike Rapoport goto out_unlock; 54627d02568SMike Rapoport 54727d02568SMike Rapoport /* 54872981e0eSAndrea Arcangeli * validate 'mode' now that we know the dst_vma: don't allow 54972981e0eSAndrea Arcangeli * a wrprotect copy if the userfaultfd didn't register as WP. 55072981e0eSAndrea Arcangeli */ 55172981e0eSAndrea Arcangeli wp_copy = mode & UFFDIO_COPY_MODE_WP; 55272981e0eSAndrea Arcangeli if (wp_copy && !(dst_vma->vm_flags & VM_UFFD_WP)) 55372981e0eSAndrea Arcangeli goto out_unlock; 55472981e0eSAndrea Arcangeli 55572981e0eSAndrea Arcangeli /* 55627d02568SMike Rapoport * If this is a HUGETLB vma, pass off to appropriate routine 55727d02568SMike Rapoport */ 55827d02568SMike Rapoport if (is_vm_hugetlb_page(dst_vma)) 55927d02568SMike Rapoport return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start, 560f6191471SAxel Rasmussen src_start, len, mcopy_mode); 56127d02568SMike Rapoport 56226071cedSMike Rapoport if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) 563b6ebaedbSAndrea Arcangeli goto out_unlock; 56415313257SAxel Rasmussen if (!vma_is_shmem(dst_vma) && mcopy_mode == MCOPY_ATOMIC_CONTINUE) 565f6191471SAxel Rasmussen goto out_unlock; 566c1a4de99SAndrea Arcangeli 567c1a4de99SAndrea Arcangeli /* 568c1a4de99SAndrea Arcangeli * Ensure the dst_vma has a anon_vma or this page 569c1a4de99SAndrea Arcangeli * would get a NULL anon_vma when moved in the 570c1a4de99SAndrea Arcangeli * dst_vma. 571c1a4de99SAndrea Arcangeli */ 572c1a4de99SAndrea Arcangeli err = -ENOMEM; 5735b51072eSAndrea Arcangeli if (!(dst_vma->vm_flags & VM_SHARED) && 5745b51072eSAndrea Arcangeli unlikely(anon_vma_prepare(dst_vma))) 575b6ebaedbSAndrea Arcangeli goto out_unlock; 576c1a4de99SAndrea Arcangeli 577b6ebaedbSAndrea Arcangeli while (src_addr < src_start + len) { 578c1a4de99SAndrea Arcangeli pmd_t dst_pmdval; 579b6ebaedbSAndrea Arcangeli 580c1a4de99SAndrea Arcangeli BUG_ON(dst_addr >= dst_start + len); 581b6ebaedbSAndrea Arcangeli 582c1a4de99SAndrea Arcangeli dst_pmd = mm_alloc_pmd(dst_mm, dst_addr); 583c1a4de99SAndrea Arcangeli if (unlikely(!dst_pmd)) { 584c1a4de99SAndrea Arcangeli err = -ENOMEM; 585c1a4de99SAndrea Arcangeli break; 586c1a4de99SAndrea Arcangeli } 587c1a4de99SAndrea Arcangeli 588c1a4de99SAndrea Arcangeli dst_pmdval = pmd_read_atomic(dst_pmd); 589c1a4de99SAndrea Arcangeli /* 590c1a4de99SAndrea Arcangeli * If the dst_pmd is mapped as THP don't 591c1a4de99SAndrea Arcangeli * override it and just be strict. 592c1a4de99SAndrea Arcangeli */ 593c1a4de99SAndrea Arcangeli if (unlikely(pmd_trans_huge(dst_pmdval))) { 594c1a4de99SAndrea Arcangeli err = -EEXIST; 595c1a4de99SAndrea Arcangeli break; 596c1a4de99SAndrea Arcangeli } 597c1a4de99SAndrea Arcangeli if (unlikely(pmd_none(dst_pmdval)) && 5984cf58924SJoel Fernandes (Google) unlikely(__pte_alloc(dst_mm, dst_pmd))) { 599c1a4de99SAndrea Arcangeli err = -ENOMEM; 600c1a4de99SAndrea Arcangeli break; 601c1a4de99SAndrea Arcangeli } 602c1a4de99SAndrea Arcangeli /* If an huge pmd materialized from under us fail */ 603c1a4de99SAndrea Arcangeli if (unlikely(pmd_trans_huge(*dst_pmd))) { 604c1a4de99SAndrea Arcangeli err = -EFAULT; 605c1a4de99SAndrea Arcangeli break; 606c1a4de99SAndrea Arcangeli } 607c1a4de99SAndrea Arcangeli 608c1a4de99SAndrea Arcangeli BUG_ON(pmd_none(*dst_pmd)); 609c1a4de99SAndrea Arcangeli BUG_ON(pmd_trans_huge(*dst_pmd)); 610c1a4de99SAndrea Arcangeli 6113217d3c7SMike Rapoport err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, 61215313257SAxel Rasmussen src_addr, &page, mcopy_mode, wp_copy); 613c1a4de99SAndrea Arcangeli cond_resched(); 614c1a4de99SAndrea Arcangeli 6159e368259SAndrea Arcangeli if (unlikely(err == -ENOENT)) { 616b6ebaedbSAndrea Arcangeli void *page_kaddr; 617b6ebaedbSAndrea Arcangeli 618d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 619b6ebaedbSAndrea Arcangeli BUG_ON(!page); 620b6ebaedbSAndrea Arcangeli 621b6ebaedbSAndrea Arcangeli page_kaddr = kmap(page); 622b6ebaedbSAndrea Arcangeli err = copy_from_user(page_kaddr, 623b6ebaedbSAndrea Arcangeli (const void __user *) src_addr, 624b6ebaedbSAndrea Arcangeli PAGE_SIZE); 625b6ebaedbSAndrea Arcangeli kunmap(page); 626b6ebaedbSAndrea Arcangeli if (unlikely(err)) { 627b6ebaedbSAndrea Arcangeli err = -EFAULT; 628b6ebaedbSAndrea Arcangeli goto out; 629b6ebaedbSAndrea Arcangeli } 630b6ebaedbSAndrea Arcangeli goto retry; 631b6ebaedbSAndrea Arcangeli } else 632b6ebaedbSAndrea Arcangeli BUG_ON(page); 633b6ebaedbSAndrea Arcangeli 634c1a4de99SAndrea Arcangeli if (!err) { 635c1a4de99SAndrea Arcangeli dst_addr += PAGE_SIZE; 636c1a4de99SAndrea Arcangeli src_addr += PAGE_SIZE; 637c1a4de99SAndrea Arcangeli copied += PAGE_SIZE; 638c1a4de99SAndrea Arcangeli 639c1a4de99SAndrea Arcangeli if (fatal_signal_pending(current)) 640c1a4de99SAndrea Arcangeli err = -EINTR; 641c1a4de99SAndrea Arcangeli } 642c1a4de99SAndrea Arcangeli if (err) 643c1a4de99SAndrea Arcangeli break; 644c1a4de99SAndrea Arcangeli } 645c1a4de99SAndrea Arcangeli 646b6ebaedbSAndrea Arcangeli out_unlock: 647d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 648b6ebaedbSAndrea Arcangeli out: 649b6ebaedbSAndrea Arcangeli if (page) 65009cbfeafSKirill A. Shutemov put_page(page); 651c1a4de99SAndrea Arcangeli BUG_ON(copied < 0); 652c1a4de99SAndrea Arcangeli BUG_ON(err > 0); 653c1a4de99SAndrea Arcangeli BUG_ON(!copied && !err); 654c1a4de99SAndrea Arcangeli return copied ? copied : err; 655c1a4de99SAndrea Arcangeli } 656c1a4de99SAndrea Arcangeli 657c1a4de99SAndrea Arcangeli ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, 658df2cc96eSMike Rapoport unsigned long src_start, unsigned long len, 659a759a909SNadav Amit atomic_t *mmap_changing, __u64 mode) 660c1a4de99SAndrea Arcangeli { 661f6191471SAxel Rasmussen return __mcopy_atomic(dst_mm, dst_start, src_start, len, 662f6191471SAxel Rasmussen MCOPY_ATOMIC_NORMAL, mmap_changing, mode); 663c1a4de99SAndrea Arcangeli } 664c1a4de99SAndrea Arcangeli 665c1a4de99SAndrea Arcangeli ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start, 666a759a909SNadav Amit unsigned long len, atomic_t *mmap_changing) 667c1a4de99SAndrea Arcangeli { 668f6191471SAxel Rasmussen return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_ZEROPAGE, 669f6191471SAxel Rasmussen mmap_changing, 0); 670f6191471SAxel Rasmussen } 671f6191471SAxel Rasmussen 672f6191471SAxel Rasmussen ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long start, 673a759a909SNadav Amit unsigned long len, atomic_t *mmap_changing) 674f6191471SAxel Rasmussen { 675f6191471SAxel Rasmussen return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_CONTINUE, 676f6191471SAxel Rasmussen mmap_changing, 0); 677c1a4de99SAndrea Arcangeli } 678ffd05793SShaohua Li 679ffd05793SShaohua Li int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, 680a759a909SNadav Amit unsigned long len, bool enable_wp, 681a759a909SNadav Amit atomic_t *mmap_changing) 682ffd05793SShaohua Li { 683ffd05793SShaohua Li struct vm_area_struct *dst_vma; 684ffd05793SShaohua Li pgprot_t newprot; 685ffd05793SShaohua Li int err; 686ffd05793SShaohua Li 687ffd05793SShaohua Li /* 688ffd05793SShaohua Li * Sanitize the command parameters: 689ffd05793SShaohua Li */ 690ffd05793SShaohua Li BUG_ON(start & ~PAGE_MASK); 691ffd05793SShaohua Li BUG_ON(len & ~PAGE_MASK); 692ffd05793SShaohua Li 693ffd05793SShaohua Li /* Does the address range wrap, or is the span zero-sized? */ 694ffd05793SShaohua Li BUG_ON(start + len <= start); 695ffd05793SShaohua Li 696d8ed45c5SMichel Lespinasse mmap_read_lock(dst_mm); 697ffd05793SShaohua Li 698ffd05793SShaohua Li /* 699ffd05793SShaohua Li * If memory mappings are changing because of non-cooperative 700ffd05793SShaohua Li * operation (e.g. mremap) running in parallel, bail out and 701ffd05793SShaohua Li * request the user to retry later 702ffd05793SShaohua Li */ 703ffd05793SShaohua Li err = -EAGAIN; 704a759a909SNadav Amit if (mmap_changing && atomic_read(mmap_changing)) 705ffd05793SShaohua Li goto out_unlock; 706ffd05793SShaohua Li 707ffd05793SShaohua Li err = -ENOENT; 708ffd05793SShaohua Li dst_vma = find_dst_vma(dst_mm, start, len); 709ffd05793SShaohua Li /* 710ffd05793SShaohua Li * Make sure the vma is not shared, that the dst range is 711ffd05793SShaohua Li * both valid and fully within a single existing vma. 712ffd05793SShaohua Li */ 713ffd05793SShaohua Li if (!dst_vma || (dst_vma->vm_flags & VM_SHARED)) 714ffd05793SShaohua Li goto out_unlock; 715ffd05793SShaohua Li if (!userfaultfd_wp(dst_vma)) 716ffd05793SShaohua Li goto out_unlock; 717ffd05793SShaohua Li if (!vma_is_anonymous(dst_vma)) 718ffd05793SShaohua Li goto out_unlock; 719ffd05793SShaohua Li 720ffd05793SShaohua Li if (enable_wp) 721ffd05793SShaohua Li newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE)); 722ffd05793SShaohua Li else 723ffd05793SShaohua Li newprot = vm_get_page_prot(dst_vma->vm_flags); 724ffd05793SShaohua Li 725ffd05793SShaohua Li change_protection(dst_vma, start, start + len, newprot, 726ffd05793SShaohua Li enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE); 727ffd05793SShaohua Li 728ffd05793SShaohua Li err = 0; 729ffd05793SShaohua Li out_unlock: 730d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 731ffd05793SShaohua Li return err; 732ffd05793SShaohua Li } 733