120c8ccb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2c1a4de99SAndrea Arcangeli /* 3c1a4de99SAndrea Arcangeli * mm/userfaultfd.c 4c1a4de99SAndrea Arcangeli * 5c1a4de99SAndrea Arcangeli * Copyright (C) 2015 Red Hat, Inc. 6c1a4de99SAndrea Arcangeli */ 7c1a4de99SAndrea Arcangeli 8c1a4de99SAndrea Arcangeli #include <linux/mm.h> 9174cd4b1SIngo Molnar #include <linux/sched/signal.h> 10c1a4de99SAndrea Arcangeli #include <linux/pagemap.h> 11c1a4de99SAndrea Arcangeli #include <linux/rmap.h> 12c1a4de99SAndrea Arcangeli #include <linux/swap.h> 13c1a4de99SAndrea Arcangeli #include <linux/swapops.h> 14c1a4de99SAndrea Arcangeli #include <linux/userfaultfd_k.h> 15c1a4de99SAndrea Arcangeli #include <linux/mmu_notifier.h> 1660d4d2d2SMike Kravetz #include <linux/hugetlb.h> 1726071cedSMike Rapoport #include <linux/shmem_fs.h> 18c1a4de99SAndrea Arcangeli #include <asm/tlbflush.h> 194a18419fSNadav Amit #include <asm/tlb.h> 20c1a4de99SAndrea Arcangeli #include "internal.h" 21c1a4de99SAndrea Arcangeli 22643aa36eSWei Yang static __always_inline 23643aa36eSWei Yang struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm, 24643aa36eSWei Yang unsigned long dst_start, 25643aa36eSWei Yang unsigned long len) 26643aa36eSWei Yang { 27643aa36eSWei Yang /* 28643aa36eSWei Yang * Make sure that the dst range is both valid and fully within a 29643aa36eSWei Yang * single existing vma. 30643aa36eSWei Yang */ 31643aa36eSWei Yang struct vm_area_struct *dst_vma; 32643aa36eSWei Yang 33643aa36eSWei Yang dst_vma = find_vma(dst_mm, dst_start); 34643aa36eSWei Yang if (!dst_vma) 35643aa36eSWei Yang return NULL; 36643aa36eSWei Yang 37643aa36eSWei Yang if (dst_start < dst_vma->vm_start || 38643aa36eSWei Yang dst_start + len > dst_vma->vm_end) 39643aa36eSWei Yang return NULL; 40643aa36eSWei Yang 41643aa36eSWei Yang /* 42643aa36eSWei Yang * Check the vma is registered in uffd, this is required to 43643aa36eSWei Yang * enforce the VM_MAYWRITE check done at uffd registration 44643aa36eSWei Yang * time. 45643aa36eSWei Yang */ 46643aa36eSWei Yang if (!dst_vma->vm_userfaultfd_ctx.ctx) 47643aa36eSWei Yang return NULL; 48643aa36eSWei Yang 49643aa36eSWei Yang return dst_vma; 50643aa36eSWei Yang } 51643aa36eSWei Yang 5215313257SAxel Rasmussen /* 5315313257SAxel Rasmussen * Install PTEs, to map dst_addr (within dst_vma) to page. 5415313257SAxel Rasmussen * 557d64ae3aSAxel Rasmussen * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem 567d64ae3aSAxel Rasmussen * and anon, and for both shared and private VMAs. 5715313257SAxel Rasmussen */ 587d64ae3aSAxel Rasmussen int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, 5915313257SAxel Rasmussen struct vm_area_struct *dst_vma, 6015313257SAxel Rasmussen unsigned long dst_addr, struct page *page, 6115313257SAxel Rasmussen bool newly_allocated, bool wp_copy) 6215313257SAxel Rasmussen { 6315313257SAxel Rasmussen int ret; 6415313257SAxel Rasmussen pte_t _dst_pte, *dst_pte; 6515313257SAxel Rasmussen bool writable = dst_vma->vm_flags & VM_WRITE; 6615313257SAxel Rasmussen bool vm_shared = dst_vma->vm_flags & VM_SHARED; 6715313257SAxel Rasmussen bool page_in_cache = page->mapping; 6815313257SAxel Rasmussen spinlock_t *ptl; 6915313257SAxel Rasmussen struct inode *inode; 7015313257SAxel Rasmussen pgoff_t offset, max_off; 7115313257SAxel Rasmussen 7215313257SAxel Rasmussen _dst_pte = mk_pte(page, dst_vma->vm_page_prot); 739ae0f87dSPeter Xu _dst_pte = pte_mkdirty(_dst_pte); 7415313257SAxel Rasmussen if (page_in_cache && !vm_shared) 7515313257SAxel Rasmussen writable = false; 760e88904cSNadav Amit 770e88904cSNadav Amit /* 780e88904cSNadav Amit * Always mark a PTE as write-protected when needed, regardless of 790e88904cSNadav Amit * VM_WRITE, which the user might change. 800e88904cSNadav Amit */ 818ee79edfSPeter Xu if (wp_copy) { 8215313257SAxel Rasmussen _dst_pte = pte_mkuffd_wp(_dst_pte); 838ee79edfSPeter Xu writable = false; 848ee79edfSPeter Xu } 858ee79edfSPeter Xu 868ee79edfSPeter Xu if (writable) 8715313257SAxel Rasmussen _dst_pte = pte_mkwrite(_dst_pte); 888ee79edfSPeter Xu else 898ee79edfSPeter Xu /* 908ee79edfSPeter Xu * We need this to make sure write bit removed; as mk_pte() 918ee79edfSPeter Xu * could return a pte with write bit set. 928ee79edfSPeter Xu */ 938ee79edfSPeter Xu _dst_pte = pte_wrprotect(_dst_pte); 9415313257SAxel Rasmussen 9515313257SAxel Rasmussen dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 9615313257SAxel Rasmussen 9715313257SAxel Rasmussen if (vma_is_shmem(dst_vma)) { 9815313257SAxel Rasmussen /* serialize against truncate with the page table lock */ 9915313257SAxel Rasmussen inode = dst_vma->vm_file->f_inode; 10015313257SAxel Rasmussen offset = linear_page_index(dst_vma, dst_addr); 10115313257SAxel Rasmussen max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 10215313257SAxel Rasmussen ret = -EFAULT; 10315313257SAxel Rasmussen if (unlikely(offset >= max_off)) 10415313257SAxel Rasmussen goto out_unlock; 10515313257SAxel Rasmussen } 10615313257SAxel Rasmussen 10715313257SAxel Rasmussen ret = -EEXIST; 1088ee79edfSPeter Xu /* 1098ee79edfSPeter Xu * We allow to overwrite a pte marker: consider when both MISSING|WP 1108ee79edfSPeter Xu * registered, we firstly wr-protect a none pte which has no page cache 1118ee79edfSPeter Xu * page backing it, then access the page. 1128ee79edfSPeter Xu */ 1138ee79edfSPeter Xu if (!pte_none_mostly(*dst_pte)) 11415313257SAxel Rasmussen goto out_unlock; 11515313257SAxel Rasmussen 116cea86fe2SHugh Dickins if (page_in_cache) { 117cea86fe2SHugh Dickins /* Usually, cache pages are already added to LRU */ 118cea86fe2SHugh Dickins if (newly_allocated) 119cea86fe2SHugh Dickins lru_cache_add(page); 120cea86fe2SHugh Dickins page_add_file_rmap(page, dst_vma, false); 121cea86fe2SHugh Dickins } else { 12240f2bbf7SDavid Hildenbrand page_add_new_anon_rmap(page, dst_vma, dst_addr); 123cea86fe2SHugh Dickins lru_cache_add_inactive_or_unevictable(page, dst_vma); 124cea86fe2SHugh Dickins } 12515313257SAxel Rasmussen 12615313257SAxel Rasmussen /* 12715313257SAxel Rasmussen * Must happen after rmap, as mm_counter() checks mapping (via 12815313257SAxel Rasmussen * PageAnon()), which is set by __page_set_anon_rmap(). 12915313257SAxel Rasmussen */ 13015313257SAxel Rasmussen inc_mm_counter(dst_mm, mm_counter(page)); 13115313257SAxel Rasmussen 13215313257SAxel Rasmussen set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 13315313257SAxel Rasmussen 13415313257SAxel Rasmussen /* No need to invalidate - it was non-present before */ 13515313257SAxel Rasmussen update_mmu_cache(dst_vma, dst_addr, dst_pte); 13615313257SAxel Rasmussen ret = 0; 13715313257SAxel Rasmussen out_unlock: 13815313257SAxel Rasmussen pte_unmap_unlock(dst_pte, ptl); 13915313257SAxel Rasmussen return ret; 14015313257SAxel Rasmussen } 14115313257SAxel Rasmussen 142c1a4de99SAndrea Arcangeli static int mcopy_atomic_pte(struct mm_struct *dst_mm, 143c1a4de99SAndrea Arcangeli pmd_t *dst_pmd, 144c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma, 145c1a4de99SAndrea Arcangeli unsigned long dst_addr, 146b6ebaedbSAndrea Arcangeli unsigned long src_addr, 14772981e0eSAndrea Arcangeli struct page **pagep, 14872981e0eSAndrea Arcangeli bool wp_copy) 149c1a4de99SAndrea Arcangeli { 150c1a4de99SAndrea Arcangeli void *page_kaddr; 151c1a4de99SAndrea Arcangeli int ret; 152b6ebaedbSAndrea Arcangeli struct page *page; 153c1a4de99SAndrea Arcangeli 154b6ebaedbSAndrea Arcangeli if (!*pagep) { 155c1a4de99SAndrea Arcangeli ret = -ENOMEM; 156c1a4de99SAndrea Arcangeli page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr); 157c1a4de99SAndrea Arcangeli if (!page) 158c1a4de99SAndrea Arcangeli goto out; 159c1a4de99SAndrea Arcangeli 160b6ebaedbSAndrea Arcangeli page_kaddr = kmap_atomic(page); 161b6ebaedbSAndrea Arcangeli ret = copy_from_user(page_kaddr, 162b6ebaedbSAndrea Arcangeli (const void __user *) src_addr, 163b6ebaedbSAndrea Arcangeli PAGE_SIZE); 164b6ebaedbSAndrea Arcangeli kunmap_atomic(page_kaddr); 165b6ebaedbSAndrea Arcangeli 166c1e8d7c6SMichel Lespinasse /* fallback to copy_from_user outside mmap_lock */ 167b6ebaedbSAndrea Arcangeli if (unlikely(ret)) { 1689e368259SAndrea Arcangeli ret = -ENOENT; 169b6ebaedbSAndrea Arcangeli *pagep = page; 170b6ebaedbSAndrea Arcangeli /* don't free the page */ 171b6ebaedbSAndrea Arcangeli goto out; 172b6ebaedbSAndrea Arcangeli } 1737c25a0b8SMuchun Song 1747c25a0b8SMuchun Song flush_dcache_page(page); 175b6ebaedbSAndrea Arcangeli } else { 176b6ebaedbSAndrea Arcangeli page = *pagep; 177b6ebaedbSAndrea Arcangeli *pagep = NULL; 178b6ebaedbSAndrea Arcangeli } 179c1a4de99SAndrea Arcangeli 180c1a4de99SAndrea Arcangeli /* 181c1a4de99SAndrea Arcangeli * The memory barrier inside __SetPageUptodate makes sure that 182f4f5329dSWei Yang * preceding stores to the page contents become visible before 183c1a4de99SAndrea Arcangeli * the set_pte_at() write. 184c1a4de99SAndrea Arcangeli */ 185c1a4de99SAndrea Arcangeli __SetPageUptodate(page); 186c1a4de99SAndrea Arcangeli 187c1a4de99SAndrea Arcangeli ret = -ENOMEM; 1888f425e4eSMatthew Wilcox (Oracle) if (mem_cgroup_charge(page_folio(page), dst_mm, GFP_KERNEL)) 189c1a4de99SAndrea Arcangeli goto out_release; 190c1a4de99SAndrea Arcangeli 19115313257SAxel Rasmussen ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr, 19215313257SAxel Rasmussen page, true, wp_copy); 19315313257SAxel Rasmussen if (ret) 19415313257SAxel Rasmussen goto out_release; 195c1a4de99SAndrea Arcangeli out: 196c1a4de99SAndrea Arcangeli return ret; 197c1a4de99SAndrea Arcangeli out_release: 19809cbfeafSKirill A. Shutemov put_page(page); 199c1a4de99SAndrea Arcangeli goto out; 200c1a4de99SAndrea Arcangeli } 201c1a4de99SAndrea Arcangeli 202c1a4de99SAndrea Arcangeli static int mfill_zeropage_pte(struct mm_struct *dst_mm, 203c1a4de99SAndrea Arcangeli pmd_t *dst_pmd, 204c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma, 205c1a4de99SAndrea Arcangeli unsigned long dst_addr) 206c1a4de99SAndrea Arcangeli { 207c1a4de99SAndrea Arcangeli pte_t _dst_pte, *dst_pte; 208c1a4de99SAndrea Arcangeli spinlock_t *ptl; 209c1a4de99SAndrea Arcangeli int ret; 210e2a50c1fSAndrea Arcangeli pgoff_t offset, max_off; 211e2a50c1fSAndrea Arcangeli struct inode *inode; 212c1a4de99SAndrea Arcangeli 213c1a4de99SAndrea Arcangeli _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), 214c1a4de99SAndrea Arcangeli dst_vma->vm_page_prot)); 215c1a4de99SAndrea Arcangeli dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 216e2a50c1fSAndrea Arcangeli if (dst_vma->vm_file) { 217e2a50c1fSAndrea Arcangeli /* the shmem MAP_PRIVATE case requires checking the i_size */ 218e2a50c1fSAndrea Arcangeli inode = dst_vma->vm_file->f_inode; 219e2a50c1fSAndrea Arcangeli offset = linear_page_index(dst_vma, dst_addr); 220e2a50c1fSAndrea Arcangeli max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 221e2a50c1fSAndrea Arcangeli ret = -EFAULT; 222e2a50c1fSAndrea Arcangeli if (unlikely(offset >= max_off)) 223e2a50c1fSAndrea Arcangeli goto out_unlock; 224e2a50c1fSAndrea Arcangeli } 225e2a50c1fSAndrea Arcangeli ret = -EEXIST; 226c1a4de99SAndrea Arcangeli if (!pte_none(*dst_pte)) 227c1a4de99SAndrea Arcangeli goto out_unlock; 228c1a4de99SAndrea Arcangeli set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 229c1a4de99SAndrea Arcangeli /* No need to invalidate - it was non-present before */ 230c1a4de99SAndrea Arcangeli update_mmu_cache(dst_vma, dst_addr, dst_pte); 231c1a4de99SAndrea Arcangeli ret = 0; 232c1a4de99SAndrea Arcangeli out_unlock: 233c1a4de99SAndrea Arcangeli pte_unmap_unlock(dst_pte, ptl); 234c1a4de99SAndrea Arcangeli return ret; 235c1a4de99SAndrea Arcangeli } 236c1a4de99SAndrea Arcangeli 23715313257SAxel Rasmussen /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */ 23815313257SAxel Rasmussen static int mcontinue_atomic_pte(struct mm_struct *dst_mm, 23915313257SAxel Rasmussen pmd_t *dst_pmd, 24015313257SAxel Rasmussen struct vm_area_struct *dst_vma, 24115313257SAxel Rasmussen unsigned long dst_addr, 24215313257SAxel Rasmussen bool wp_copy) 24315313257SAxel Rasmussen { 24415313257SAxel Rasmussen struct inode *inode = file_inode(dst_vma->vm_file); 24515313257SAxel Rasmussen pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); 24615313257SAxel Rasmussen struct page *page; 24715313257SAxel Rasmussen int ret; 24815313257SAxel Rasmussen 24915313257SAxel Rasmussen ret = shmem_getpage(inode, pgoff, &page, SGP_READ); 25015313257SAxel Rasmussen if (ret) 25115313257SAxel Rasmussen goto out; 25215313257SAxel Rasmussen if (!page) { 25315313257SAxel Rasmussen ret = -EFAULT; 25415313257SAxel Rasmussen goto out; 25515313257SAxel Rasmussen } 25615313257SAxel Rasmussen 257a7605426SYang Shi if (PageHWPoison(page)) { 258a7605426SYang Shi ret = -EIO; 259a7605426SYang Shi goto out_release; 260a7605426SYang Shi } 261a7605426SYang Shi 26215313257SAxel Rasmussen ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr, 26315313257SAxel Rasmussen page, false, wp_copy); 26415313257SAxel Rasmussen if (ret) 26515313257SAxel Rasmussen goto out_release; 26615313257SAxel Rasmussen 26715313257SAxel Rasmussen unlock_page(page); 26815313257SAxel Rasmussen ret = 0; 26915313257SAxel Rasmussen out: 27015313257SAxel Rasmussen return ret; 27115313257SAxel Rasmussen out_release: 27215313257SAxel Rasmussen unlock_page(page); 27315313257SAxel Rasmussen put_page(page); 27415313257SAxel Rasmussen goto out; 27515313257SAxel Rasmussen } 27615313257SAxel Rasmussen 277c1a4de99SAndrea Arcangeli static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) 278c1a4de99SAndrea Arcangeli { 279c1a4de99SAndrea Arcangeli pgd_t *pgd; 280c2febafcSKirill A. Shutemov p4d_t *p4d; 281c1a4de99SAndrea Arcangeli pud_t *pud; 282c1a4de99SAndrea Arcangeli 283c1a4de99SAndrea Arcangeli pgd = pgd_offset(mm, address); 284c2febafcSKirill A. Shutemov p4d = p4d_alloc(mm, pgd, address); 285c2febafcSKirill A. Shutemov if (!p4d) 286c2febafcSKirill A. Shutemov return NULL; 287c2febafcSKirill A. Shutemov pud = pud_alloc(mm, p4d, address); 288c2febafcSKirill A. Shutemov if (!pud) 289c2febafcSKirill A. Shutemov return NULL; 290c1a4de99SAndrea Arcangeli /* 291c1a4de99SAndrea Arcangeli * Note that we didn't run this because the pmd was 292c1a4de99SAndrea Arcangeli * missing, the *pmd may be already established and in 293c1a4de99SAndrea Arcangeli * turn it may also be a trans_huge_pmd. 294c1a4de99SAndrea Arcangeli */ 295c2febafcSKirill A. Shutemov return pmd_alloc(mm, pud, address); 296c1a4de99SAndrea Arcangeli } 297c1a4de99SAndrea Arcangeli 29860d4d2d2SMike Kravetz #ifdef CONFIG_HUGETLB_PAGE 29960d4d2d2SMike Kravetz /* 30060d4d2d2SMike Kravetz * __mcopy_atomic processing for HUGETLB vmas. Note that this routine is 301c1e8d7c6SMichel Lespinasse * called with mmap_lock held, it will release mmap_lock before returning. 30260d4d2d2SMike Kravetz */ 30360d4d2d2SMike Kravetz static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, 30460d4d2d2SMike Kravetz struct vm_area_struct *dst_vma, 30560d4d2d2SMike Kravetz unsigned long dst_start, 30660d4d2d2SMike Kravetz unsigned long src_start, 30760d4d2d2SMike Kravetz unsigned long len, 308*6041c691SPeter Xu enum mcopy_atomic_mode mode, 309*6041c691SPeter Xu bool wp_copy) 31060d4d2d2SMike Kravetz { 3111c9e8defSMike Kravetz int vm_shared = dst_vma->vm_flags & VM_SHARED; 31260d4d2d2SMike Kravetz ssize_t err; 31360d4d2d2SMike Kravetz pte_t *dst_pte; 31460d4d2d2SMike Kravetz unsigned long src_addr, dst_addr; 31560d4d2d2SMike Kravetz long copied; 31660d4d2d2SMike Kravetz struct page *page; 31760d4d2d2SMike Kravetz unsigned long vma_hpagesize; 31860d4d2d2SMike Kravetz pgoff_t idx; 31960d4d2d2SMike Kravetz u32 hash; 32060d4d2d2SMike Kravetz struct address_space *mapping; 32160d4d2d2SMike Kravetz 32260d4d2d2SMike Kravetz /* 32360d4d2d2SMike Kravetz * There is no default zero huge page for all huge page sizes as 32460d4d2d2SMike Kravetz * supported by hugetlb. A PMD_SIZE huge pages may exist as used 32560d4d2d2SMike Kravetz * by THP. Since we can not reliably insert a zero page, this 32660d4d2d2SMike Kravetz * feature is not supported. 32760d4d2d2SMike Kravetz */ 328f6191471SAxel Rasmussen if (mode == MCOPY_ATOMIC_ZEROPAGE) { 329d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 33060d4d2d2SMike Kravetz return -EINVAL; 33160d4d2d2SMike Kravetz } 33260d4d2d2SMike Kravetz 33360d4d2d2SMike Kravetz src_addr = src_start; 33460d4d2d2SMike Kravetz dst_addr = dst_start; 33560d4d2d2SMike Kravetz copied = 0; 33660d4d2d2SMike Kravetz page = NULL; 33760d4d2d2SMike Kravetz vma_hpagesize = vma_kernel_pagesize(dst_vma); 33860d4d2d2SMike Kravetz 33960d4d2d2SMike Kravetz /* 34060d4d2d2SMike Kravetz * Validate alignment based on huge page size 34160d4d2d2SMike Kravetz */ 34260d4d2d2SMike Kravetz err = -EINVAL; 34360d4d2d2SMike Kravetz if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1)) 34460d4d2d2SMike Kravetz goto out_unlock; 34560d4d2d2SMike Kravetz 34660d4d2d2SMike Kravetz retry: 34760d4d2d2SMike Kravetz /* 348c1e8d7c6SMichel Lespinasse * On routine entry dst_vma is set. If we had to drop mmap_lock and 34960d4d2d2SMike Kravetz * retry, dst_vma will be set to NULL and we must lookup again. 35060d4d2d2SMike Kravetz */ 35160d4d2d2SMike Kravetz if (!dst_vma) { 35227d02568SMike Rapoport err = -ENOENT; 353643aa36eSWei Yang dst_vma = find_dst_vma(dst_mm, dst_start, len); 35460d4d2d2SMike Kravetz if (!dst_vma || !is_vm_hugetlb_page(dst_vma)) 35560d4d2d2SMike Kravetz goto out_unlock; 3561c9e8defSMike Kravetz 35727d02568SMike Rapoport err = -EINVAL; 35827d02568SMike Rapoport if (vma_hpagesize != vma_kernel_pagesize(dst_vma)) 35927d02568SMike Rapoport goto out_unlock; 36027d02568SMike Rapoport 3611c9e8defSMike Kravetz vm_shared = dst_vma->vm_flags & VM_SHARED; 36260d4d2d2SMike Kravetz } 36360d4d2d2SMike Kravetz 36460d4d2d2SMike Kravetz /* 3651c9e8defSMike Kravetz * If not shared, ensure the dst_vma has a anon_vma. 36660d4d2d2SMike Kravetz */ 36760d4d2d2SMike Kravetz err = -ENOMEM; 3681c9e8defSMike Kravetz if (!vm_shared) { 36960d4d2d2SMike Kravetz if (unlikely(anon_vma_prepare(dst_vma))) 37060d4d2d2SMike Kravetz goto out_unlock; 3711c9e8defSMike Kravetz } 37260d4d2d2SMike Kravetz 37360d4d2d2SMike Kravetz while (src_addr < src_start + len) { 37460d4d2d2SMike Kravetz BUG_ON(dst_addr >= dst_start + len); 37560d4d2d2SMike Kravetz 37660d4d2d2SMike Kravetz /* 377c0d0381aSMike Kravetz * Serialize via i_mmap_rwsem and hugetlb_fault_mutex. 378c0d0381aSMike Kravetz * i_mmap_rwsem ensures the dst_pte remains valid even 379c0d0381aSMike Kravetz * in the case of shared pmds. fault mutex prevents 380c0d0381aSMike Kravetz * races with other faulting threads. 38160d4d2d2SMike Kravetz */ 382ddeaab32SMike Kravetz mapping = dst_vma->vm_file->f_mapping; 383c0d0381aSMike Kravetz i_mmap_lock_read(mapping); 384c0d0381aSMike Kravetz idx = linear_page_index(dst_vma, dst_addr); 385188b04a7SWei Yang hash = hugetlb_fault_mutex_hash(mapping, idx); 38660d4d2d2SMike Kravetz mutex_lock(&hugetlb_fault_mutex_table[hash]); 38760d4d2d2SMike Kravetz 38860d4d2d2SMike Kravetz err = -ENOMEM; 389aec44e0fSPeter Xu dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize); 39060d4d2d2SMike Kravetz if (!dst_pte) { 39160d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 392c0d0381aSMike Kravetz i_mmap_unlock_read(mapping); 39360d4d2d2SMike Kravetz goto out_unlock; 39460d4d2d2SMike Kravetz } 39560d4d2d2SMike Kravetz 396f6191471SAxel Rasmussen if (mode != MCOPY_ATOMIC_CONTINUE && 397*6041c691SPeter Xu !huge_pte_none_mostly(huge_ptep_get(dst_pte))) { 39860d4d2d2SMike Kravetz err = -EEXIST; 39960d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 400c0d0381aSMike Kravetz i_mmap_unlock_read(mapping); 40160d4d2d2SMike Kravetz goto out_unlock; 40260d4d2d2SMike Kravetz } 40360d4d2d2SMike Kravetz 40460d4d2d2SMike Kravetz err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, 405*6041c691SPeter Xu dst_addr, src_addr, mode, &page, 406*6041c691SPeter Xu wp_copy); 40760d4d2d2SMike Kravetz 40860d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 409c0d0381aSMike Kravetz i_mmap_unlock_read(mapping); 41060d4d2d2SMike Kravetz 41160d4d2d2SMike Kravetz cond_resched(); 41260d4d2d2SMike Kravetz 4139e368259SAndrea Arcangeli if (unlikely(err == -ENOENT)) { 414d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 41560d4d2d2SMike Kravetz BUG_ON(!page); 41660d4d2d2SMike Kravetz 41760d4d2d2SMike Kravetz err = copy_huge_page_from_user(page, 41860d4d2d2SMike Kravetz (const void __user *)src_addr, 4194fb07ee6SWei Yang vma_hpagesize / PAGE_SIZE, 4204fb07ee6SWei Yang true); 42160d4d2d2SMike Kravetz if (unlikely(err)) { 42260d4d2d2SMike Kravetz err = -EFAULT; 42360d4d2d2SMike Kravetz goto out; 42460d4d2d2SMike Kravetz } 425d8ed45c5SMichel Lespinasse mmap_read_lock(dst_mm); 42660d4d2d2SMike Kravetz 42760d4d2d2SMike Kravetz dst_vma = NULL; 42860d4d2d2SMike Kravetz goto retry; 42960d4d2d2SMike Kravetz } else 43060d4d2d2SMike Kravetz BUG_ON(page); 43160d4d2d2SMike Kravetz 43260d4d2d2SMike Kravetz if (!err) { 43360d4d2d2SMike Kravetz dst_addr += vma_hpagesize; 43460d4d2d2SMike Kravetz src_addr += vma_hpagesize; 43560d4d2d2SMike Kravetz copied += vma_hpagesize; 43660d4d2d2SMike Kravetz 43760d4d2d2SMike Kravetz if (fatal_signal_pending(current)) 43860d4d2d2SMike Kravetz err = -EINTR; 43960d4d2d2SMike Kravetz } 44060d4d2d2SMike Kravetz if (err) 44160d4d2d2SMike Kravetz break; 44260d4d2d2SMike Kravetz } 44360d4d2d2SMike Kravetz 44460d4d2d2SMike Kravetz out_unlock: 445d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 44660d4d2d2SMike Kravetz out: 4478cc5fcbbSMina Almasry if (page) 44860d4d2d2SMike Kravetz put_page(page); 44960d4d2d2SMike Kravetz BUG_ON(copied < 0); 45060d4d2d2SMike Kravetz BUG_ON(err > 0); 45160d4d2d2SMike Kravetz BUG_ON(!copied && !err); 45260d4d2d2SMike Kravetz return copied ? copied : err; 45360d4d2d2SMike Kravetz } 45460d4d2d2SMike Kravetz #else /* !CONFIG_HUGETLB_PAGE */ 45560d4d2d2SMike Kravetz /* fail at build time if gcc attempts to use this */ 45660d4d2d2SMike Kravetz extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, 45760d4d2d2SMike Kravetz struct vm_area_struct *dst_vma, 45860d4d2d2SMike Kravetz unsigned long dst_start, 45960d4d2d2SMike Kravetz unsigned long src_start, 46060d4d2d2SMike Kravetz unsigned long len, 461*6041c691SPeter Xu enum mcopy_atomic_mode mode, 462*6041c691SPeter Xu bool wp_copy); 46360d4d2d2SMike Kravetz #endif /* CONFIG_HUGETLB_PAGE */ 46460d4d2d2SMike Kravetz 4653217d3c7SMike Rapoport static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm, 4663217d3c7SMike Rapoport pmd_t *dst_pmd, 4673217d3c7SMike Rapoport struct vm_area_struct *dst_vma, 4683217d3c7SMike Rapoport unsigned long dst_addr, 4693217d3c7SMike Rapoport unsigned long src_addr, 4703217d3c7SMike Rapoport struct page **page, 47115313257SAxel Rasmussen enum mcopy_atomic_mode mode, 47272981e0eSAndrea Arcangeli bool wp_copy) 4733217d3c7SMike Rapoport { 4743217d3c7SMike Rapoport ssize_t err; 4753217d3c7SMike Rapoport 47615313257SAxel Rasmussen if (mode == MCOPY_ATOMIC_CONTINUE) { 47715313257SAxel Rasmussen return mcontinue_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, 47815313257SAxel Rasmussen wp_copy); 47915313257SAxel Rasmussen } 48015313257SAxel Rasmussen 4815b51072eSAndrea Arcangeli /* 4825b51072eSAndrea Arcangeli * The normal page fault path for a shmem will invoke the 4835b51072eSAndrea Arcangeli * fault, fill the hole in the file and COW it right away. The 4845b51072eSAndrea Arcangeli * result generates plain anonymous memory. So when we are 4855b51072eSAndrea Arcangeli * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll 4865b51072eSAndrea Arcangeli * generate anonymous memory directly without actually filling 4875b51072eSAndrea Arcangeli * the hole. For the MAP_PRIVATE case the robustness check 4885b51072eSAndrea Arcangeli * only happens in the pagetable (to verify it's still none) 4895b51072eSAndrea Arcangeli * and not in the radix tree. 4905b51072eSAndrea Arcangeli */ 4915b51072eSAndrea Arcangeli if (!(dst_vma->vm_flags & VM_SHARED)) { 49215313257SAxel Rasmussen if (mode == MCOPY_ATOMIC_NORMAL) 4933217d3c7SMike Rapoport err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma, 49472981e0eSAndrea Arcangeli dst_addr, src_addr, page, 49572981e0eSAndrea Arcangeli wp_copy); 4963217d3c7SMike Rapoport else 4973217d3c7SMike Rapoport err = mfill_zeropage_pte(dst_mm, dst_pmd, 4983217d3c7SMike Rapoport dst_vma, dst_addr); 4993217d3c7SMike Rapoport } else { 5003460f6e5SAxel Rasmussen err = shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, 50115313257SAxel Rasmussen dst_addr, src_addr, 50215313257SAxel Rasmussen mode != MCOPY_ATOMIC_NORMAL, 5038ee79edfSPeter Xu wp_copy, page); 5043217d3c7SMike Rapoport } 5053217d3c7SMike Rapoport 5063217d3c7SMike Rapoport return err; 5073217d3c7SMike Rapoport } 5083217d3c7SMike Rapoport 509c1a4de99SAndrea Arcangeli static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, 510c1a4de99SAndrea Arcangeli unsigned long dst_start, 511c1a4de99SAndrea Arcangeli unsigned long src_start, 512c1a4de99SAndrea Arcangeli unsigned long len, 513f6191471SAxel Rasmussen enum mcopy_atomic_mode mcopy_mode, 514a759a909SNadav Amit atomic_t *mmap_changing, 51572981e0eSAndrea Arcangeli __u64 mode) 516c1a4de99SAndrea Arcangeli { 517c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma; 518c1a4de99SAndrea Arcangeli ssize_t err; 519c1a4de99SAndrea Arcangeli pmd_t *dst_pmd; 520c1a4de99SAndrea Arcangeli unsigned long src_addr, dst_addr; 521b6ebaedbSAndrea Arcangeli long copied; 522b6ebaedbSAndrea Arcangeli struct page *page; 52372981e0eSAndrea Arcangeli bool wp_copy; 524c1a4de99SAndrea Arcangeli 525c1a4de99SAndrea Arcangeli /* 526c1a4de99SAndrea Arcangeli * Sanitize the command parameters: 527c1a4de99SAndrea Arcangeli */ 528c1a4de99SAndrea Arcangeli BUG_ON(dst_start & ~PAGE_MASK); 529c1a4de99SAndrea Arcangeli BUG_ON(len & ~PAGE_MASK); 530c1a4de99SAndrea Arcangeli 531c1a4de99SAndrea Arcangeli /* Does the address range wrap, or is the span zero-sized? */ 532c1a4de99SAndrea Arcangeli BUG_ON(src_start + len <= src_start); 533c1a4de99SAndrea Arcangeli BUG_ON(dst_start + len <= dst_start); 534c1a4de99SAndrea Arcangeli 535b6ebaedbSAndrea Arcangeli src_addr = src_start; 536b6ebaedbSAndrea Arcangeli dst_addr = dst_start; 537b6ebaedbSAndrea Arcangeli copied = 0; 538b6ebaedbSAndrea Arcangeli page = NULL; 539b6ebaedbSAndrea Arcangeli retry: 540d8ed45c5SMichel Lespinasse mmap_read_lock(dst_mm); 541c1a4de99SAndrea Arcangeli 542c1a4de99SAndrea Arcangeli /* 543df2cc96eSMike Rapoport * If memory mappings are changing because of non-cooperative 544df2cc96eSMike Rapoport * operation (e.g. mremap) running in parallel, bail out and 545df2cc96eSMike Rapoport * request the user to retry later 546df2cc96eSMike Rapoport */ 547df2cc96eSMike Rapoport err = -EAGAIN; 548a759a909SNadav Amit if (mmap_changing && atomic_read(mmap_changing)) 549df2cc96eSMike Rapoport goto out_unlock; 550df2cc96eSMike Rapoport 551df2cc96eSMike Rapoport /* 552c1a4de99SAndrea Arcangeli * Make sure the vma is not shared, that the dst range is 553c1a4de99SAndrea Arcangeli * both valid and fully within a single existing vma. 554c1a4de99SAndrea Arcangeli */ 55527d02568SMike Rapoport err = -ENOENT; 556643aa36eSWei Yang dst_vma = find_dst_vma(dst_mm, dst_start, len); 55726071cedSMike Rapoport if (!dst_vma) 55826071cedSMike Rapoport goto out_unlock; 55927d02568SMike Rapoport 56027d02568SMike Rapoport err = -EINVAL; 56127d02568SMike Rapoport /* 56227d02568SMike Rapoport * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but 56327d02568SMike Rapoport * it will overwrite vm_ops, so vma_is_anonymous must return false. 56427d02568SMike Rapoport */ 56527d02568SMike Rapoport if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) && 56627d02568SMike Rapoport dst_vma->vm_flags & VM_SHARED)) 56727d02568SMike Rapoport goto out_unlock; 56827d02568SMike Rapoport 56927d02568SMike Rapoport /* 57072981e0eSAndrea Arcangeli * validate 'mode' now that we know the dst_vma: don't allow 57172981e0eSAndrea Arcangeli * a wrprotect copy if the userfaultfd didn't register as WP. 57272981e0eSAndrea Arcangeli */ 57372981e0eSAndrea Arcangeli wp_copy = mode & UFFDIO_COPY_MODE_WP; 57472981e0eSAndrea Arcangeli if (wp_copy && !(dst_vma->vm_flags & VM_UFFD_WP)) 57572981e0eSAndrea Arcangeli goto out_unlock; 57672981e0eSAndrea Arcangeli 57772981e0eSAndrea Arcangeli /* 57827d02568SMike Rapoport * If this is a HUGETLB vma, pass off to appropriate routine 57927d02568SMike Rapoport */ 58027d02568SMike Rapoport if (is_vm_hugetlb_page(dst_vma)) 58127d02568SMike Rapoport return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start, 582*6041c691SPeter Xu src_start, len, mcopy_mode, 583*6041c691SPeter Xu wp_copy); 58427d02568SMike Rapoport 58526071cedSMike Rapoport if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) 586b6ebaedbSAndrea Arcangeli goto out_unlock; 58715313257SAxel Rasmussen if (!vma_is_shmem(dst_vma) && mcopy_mode == MCOPY_ATOMIC_CONTINUE) 588f6191471SAxel Rasmussen goto out_unlock; 589c1a4de99SAndrea Arcangeli 590c1a4de99SAndrea Arcangeli /* 591c1a4de99SAndrea Arcangeli * Ensure the dst_vma has a anon_vma or this page 592c1a4de99SAndrea Arcangeli * would get a NULL anon_vma when moved in the 593c1a4de99SAndrea Arcangeli * dst_vma. 594c1a4de99SAndrea Arcangeli */ 595c1a4de99SAndrea Arcangeli err = -ENOMEM; 5965b51072eSAndrea Arcangeli if (!(dst_vma->vm_flags & VM_SHARED) && 5975b51072eSAndrea Arcangeli unlikely(anon_vma_prepare(dst_vma))) 598b6ebaedbSAndrea Arcangeli goto out_unlock; 599c1a4de99SAndrea Arcangeli 600b6ebaedbSAndrea Arcangeli while (src_addr < src_start + len) { 601c1a4de99SAndrea Arcangeli pmd_t dst_pmdval; 602b6ebaedbSAndrea Arcangeli 603c1a4de99SAndrea Arcangeli BUG_ON(dst_addr >= dst_start + len); 604b6ebaedbSAndrea Arcangeli 605c1a4de99SAndrea Arcangeli dst_pmd = mm_alloc_pmd(dst_mm, dst_addr); 606c1a4de99SAndrea Arcangeli if (unlikely(!dst_pmd)) { 607c1a4de99SAndrea Arcangeli err = -ENOMEM; 608c1a4de99SAndrea Arcangeli break; 609c1a4de99SAndrea Arcangeli } 610c1a4de99SAndrea Arcangeli 611c1a4de99SAndrea Arcangeli dst_pmdval = pmd_read_atomic(dst_pmd); 612c1a4de99SAndrea Arcangeli /* 613c1a4de99SAndrea Arcangeli * If the dst_pmd is mapped as THP don't 614c1a4de99SAndrea Arcangeli * override it and just be strict. 615c1a4de99SAndrea Arcangeli */ 616c1a4de99SAndrea Arcangeli if (unlikely(pmd_trans_huge(dst_pmdval))) { 617c1a4de99SAndrea Arcangeli err = -EEXIST; 618c1a4de99SAndrea Arcangeli break; 619c1a4de99SAndrea Arcangeli } 620c1a4de99SAndrea Arcangeli if (unlikely(pmd_none(dst_pmdval)) && 6214cf58924SJoel Fernandes (Google) unlikely(__pte_alloc(dst_mm, dst_pmd))) { 622c1a4de99SAndrea Arcangeli err = -ENOMEM; 623c1a4de99SAndrea Arcangeli break; 624c1a4de99SAndrea Arcangeli } 625c1a4de99SAndrea Arcangeli /* If an huge pmd materialized from under us fail */ 626c1a4de99SAndrea Arcangeli if (unlikely(pmd_trans_huge(*dst_pmd))) { 627c1a4de99SAndrea Arcangeli err = -EFAULT; 628c1a4de99SAndrea Arcangeli break; 629c1a4de99SAndrea Arcangeli } 630c1a4de99SAndrea Arcangeli 631c1a4de99SAndrea Arcangeli BUG_ON(pmd_none(*dst_pmd)); 632c1a4de99SAndrea Arcangeli BUG_ON(pmd_trans_huge(*dst_pmd)); 633c1a4de99SAndrea Arcangeli 6343217d3c7SMike Rapoport err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, 63515313257SAxel Rasmussen src_addr, &page, mcopy_mode, wp_copy); 636c1a4de99SAndrea Arcangeli cond_resched(); 637c1a4de99SAndrea Arcangeli 6389e368259SAndrea Arcangeli if (unlikely(err == -ENOENT)) { 639b6ebaedbSAndrea Arcangeli void *page_kaddr; 640b6ebaedbSAndrea Arcangeli 641d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 642b6ebaedbSAndrea Arcangeli BUG_ON(!page); 643b6ebaedbSAndrea Arcangeli 644b6ebaedbSAndrea Arcangeli page_kaddr = kmap(page); 645b6ebaedbSAndrea Arcangeli err = copy_from_user(page_kaddr, 646b6ebaedbSAndrea Arcangeli (const void __user *) src_addr, 647b6ebaedbSAndrea Arcangeli PAGE_SIZE); 648b6ebaedbSAndrea Arcangeli kunmap(page); 649b6ebaedbSAndrea Arcangeli if (unlikely(err)) { 650b6ebaedbSAndrea Arcangeli err = -EFAULT; 651b6ebaedbSAndrea Arcangeli goto out; 652b6ebaedbSAndrea Arcangeli } 6537c25a0b8SMuchun Song flush_dcache_page(page); 654b6ebaedbSAndrea Arcangeli goto retry; 655b6ebaedbSAndrea Arcangeli } else 656b6ebaedbSAndrea Arcangeli BUG_ON(page); 657b6ebaedbSAndrea Arcangeli 658c1a4de99SAndrea Arcangeli if (!err) { 659c1a4de99SAndrea Arcangeli dst_addr += PAGE_SIZE; 660c1a4de99SAndrea Arcangeli src_addr += PAGE_SIZE; 661c1a4de99SAndrea Arcangeli copied += PAGE_SIZE; 662c1a4de99SAndrea Arcangeli 663c1a4de99SAndrea Arcangeli if (fatal_signal_pending(current)) 664c1a4de99SAndrea Arcangeli err = -EINTR; 665c1a4de99SAndrea Arcangeli } 666c1a4de99SAndrea Arcangeli if (err) 667c1a4de99SAndrea Arcangeli break; 668c1a4de99SAndrea Arcangeli } 669c1a4de99SAndrea Arcangeli 670b6ebaedbSAndrea Arcangeli out_unlock: 671d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 672b6ebaedbSAndrea Arcangeli out: 673b6ebaedbSAndrea Arcangeli if (page) 67409cbfeafSKirill A. Shutemov put_page(page); 675c1a4de99SAndrea Arcangeli BUG_ON(copied < 0); 676c1a4de99SAndrea Arcangeli BUG_ON(err > 0); 677c1a4de99SAndrea Arcangeli BUG_ON(!copied && !err); 678c1a4de99SAndrea Arcangeli return copied ? copied : err; 679c1a4de99SAndrea Arcangeli } 680c1a4de99SAndrea Arcangeli 681c1a4de99SAndrea Arcangeli ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, 682df2cc96eSMike Rapoport unsigned long src_start, unsigned long len, 683a759a909SNadav Amit atomic_t *mmap_changing, __u64 mode) 684c1a4de99SAndrea Arcangeli { 685f6191471SAxel Rasmussen return __mcopy_atomic(dst_mm, dst_start, src_start, len, 686f6191471SAxel Rasmussen MCOPY_ATOMIC_NORMAL, mmap_changing, mode); 687c1a4de99SAndrea Arcangeli } 688c1a4de99SAndrea Arcangeli 689c1a4de99SAndrea Arcangeli ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start, 690a759a909SNadav Amit unsigned long len, atomic_t *mmap_changing) 691c1a4de99SAndrea Arcangeli { 692f6191471SAxel Rasmussen return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_ZEROPAGE, 693f6191471SAxel Rasmussen mmap_changing, 0); 694f6191471SAxel Rasmussen } 695f6191471SAxel Rasmussen 696f6191471SAxel Rasmussen ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long start, 697a759a909SNadav Amit unsigned long len, atomic_t *mmap_changing) 698f6191471SAxel Rasmussen { 699f6191471SAxel Rasmussen return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_CONTINUE, 700f6191471SAxel Rasmussen mmap_changing, 0); 701c1a4de99SAndrea Arcangeli } 702ffd05793SShaohua Li 703ffd05793SShaohua Li int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, 704a759a909SNadav Amit unsigned long len, bool enable_wp, 705a759a909SNadav Amit atomic_t *mmap_changing) 706ffd05793SShaohua Li { 707ffd05793SShaohua Li struct vm_area_struct *dst_vma; 7084a18419fSNadav Amit struct mmu_gather tlb; 709ffd05793SShaohua Li pgprot_t newprot; 710ffd05793SShaohua Li int err; 711ffd05793SShaohua Li 712ffd05793SShaohua Li /* 713ffd05793SShaohua Li * Sanitize the command parameters: 714ffd05793SShaohua Li */ 715ffd05793SShaohua Li BUG_ON(start & ~PAGE_MASK); 716ffd05793SShaohua Li BUG_ON(len & ~PAGE_MASK); 717ffd05793SShaohua Li 718ffd05793SShaohua Li /* Does the address range wrap, or is the span zero-sized? */ 719ffd05793SShaohua Li BUG_ON(start + len <= start); 720ffd05793SShaohua Li 721d8ed45c5SMichel Lespinasse mmap_read_lock(dst_mm); 722ffd05793SShaohua Li 723ffd05793SShaohua Li /* 724ffd05793SShaohua Li * If memory mappings are changing because of non-cooperative 725ffd05793SShaohua Li * operation (e.g. mremap) running in parallel, bail out and 726ffd05793SShaohua Li * request the user to retry later 727ffd05793SShaohua Li */ 728ffd05793SShaohua Li err = -EAGAIN; 729a759a909SNadav Amit if (mmap_changing && atomic_read(mmap_changing)) 730ffd05793SShaohua Li goto out_unlock; 731ffd05793SShaohua Li 732ffd05793SShaohua Li err = -ENOENT; 733ffd05793SShaohua Li dst_vma = find_dst_vma(dst_mm, start, len); 734ffd05793SShaohua Li /* 735ffd05793SShaohua Li * Make sure the vma is not shared, that the dst range is 736ffd05793SShaohua Li * both valid and fully within a single existing vma. 737ffd05793SShaohua Li */ 738ffd05793SShaohua Li if (!dst_vma || (dst_vma->vm_flags & VM_SHARED)) 739ffd05793SShaohua Li goto out_unlock; 740ffd05793SShaohua Li if (!userfaultfd_wp(dst_vma)) 741ffd05793SShaohua Li goto out_unlock; 742ffd05793SShaohua Li if (!vma_is_anonymous(dst_vma)) 743ffd05793SShaohua Li goto out_unlock; 744ffd05793SShaohua Li 745ffd05793SShaohua Li if (enable_wp) 746ffd05793SShaohua Li newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE)); 747ffd05793SShaohua Li else 748ffd05793SShaohua Li newprot = vm_get_page_prot(dst_vma->vm_flags); 749ffd05793SShaohua Li 7504a18419fSNadav Amit tlb_gather_mmu(&tlb, dst_mm); 7514a18419fSNadav Amit change_protection(&tlb, dst_vma, start, start + len, newprot, 752ffd05793SShaohua Li enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE); 7534a18419fSNadav Amit tlb_finish_mmu(&tlb); 754ffd05793SShaohua Li 755ffd05793SShaohua Li err = 0; 756ffd05793SShaohua Li out_unlock: 757d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 758ffd05793SShaohua Li return err; 759ffd05793SShaohua Li } 760