120c8ccb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2c1a4de99SAndrea Arcangeli /* 3c1a4de99SAndrea Arcangeli * mm/userfaultfd.c 4c1a4de99SAndrea Arcangeli * 5c1a4de99SAndrea Arcangeli * Copyright (C) 2015 Red Hat, Inc. 6c1a4de99SAndrea Arcangeli */ 7c1a4de99SAndrea Arcangeli 8c1a4de99SAndrea Arcangeli #include <linux/mm.h> 9174cd4b1SIngo Molnar #include <linux/sched/signal.h> 10c1a4de99SAndrea Arcangeli #include <linux/pagemap.h> 11c1a4de99SAndrea Arcangeli #include <linux/rmap.h> 12c1a4de99SAndrea Arcangeli #include <linux/swap.h> 13c1a4de99SAndrea Arcangeli #include <linux/swapops.h> 14c1a4de99SAndrea Arcangeli #include <linux/userfaultfd_k.h> 15c1a4de99SAndrea Arcangeli #include <linux/mmu_notifier.h> 1660d4d2d2SMike Kravetz #include <linux/hugetlb.h> 1726071cedSMike Rapoport #include <linux/shmem_fs.h> 18c1a4de99SAndrea Arcangeli #include <asm/tlbflush.h> 194a18419fSNadav Amit #include <asm/tlb.h> 20c1a4de99SAndrea Arcangeli #include "internal.h" 21c1a4de99SAndrea Arcangeli 22643aa36eSWei Yang static __always_inline 23643aa36eSWei Yang struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm, 24643aa36eSWei Yang unsigned long dst_start, 25643aa36eSWei Yang unsigned long len) 26643aa36eSWei Yang { 27643aa36eSWei Yang /* 28643aa36eSWei Yang * Make sure that the dst range is both valid and fully within a 29643aa36eSWei Yang * single existing vma. 30643aa36eSWei Yang */ 31643aa36eSWei Yang struct vm_area_struct *dst_vma; 32643aa36eSWei Yang 33643aa36eSWei Yang dst_vma = find_vma(dst_mm, dst_start); 34686ea6e6SZhangPeng if (!range_in_vma(dst_vma, dst_start, dst_start + len)) 35643aa36eSWei Yang return NULL; 36643aa36eSWei Yang 37643aa36eSWei Yang /* 38643aa36eSWei Yang * Check the vma is registered in uffd, this is required to 39643aa36eSWei Yang * enforce the VM_MAYWRITE check done at uffd registration 40643aa36eSWei Yang * time. 41643aa36eSWei Yang */ 42643aa36eSWei Yang if (!dst_vma->vm_userfaultfd_ctx.ctx) 43643aa36eSWei Yang return NULL; 44643aa36eSWei Yang 45643aa36eSWei Yang return dst_vma; 46643aa36eSWei Yang } 47643aa36eSWei Yang 4815313257SAxel Rasmussen /* 4915313257SAxel Rasmussen * Install PTEs, to map dst_addr (within dst_vma) to page. 5015313257SAxel Rasmussen * 517d64ae3aSAxel Rasmussen * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem 527d64ae3aSAxel Rasmussen * and anon, and for both shared and private VMAs. 5315313257SAxel Rasmussen */ 5461c50040SAxel Rasmussen int mfill_atomic_install_pte(pmd_t *dst_pmd, 5515313257SAxel Rasmussen struct vm_area_struct *dst_vma, 5615313257SAxel Rasmussen unsigned long dst_addr, struct page *page, 57d9712937SAxel Rasmussen bool newly_allocated, uffd_flags_t flags) 5815313257SAxel Rasmussen { 5915313257SAxel Rasmussen int ret; 6061c50040SAxel Rasmussen struct mm_struct *dst_mm = dst_vma->vm_mm; 6115313257SAxel Rasmussen pte_t _dst_pte, *dst_pte; 6215313257SAxel Rasmussen bool writable = dst_vma->vm_flags & VM_WRITE; 6315313257SAxel Rasmussen bool vm_shared = dst_vma->vm_flags & VM_SHARED; 6493b0d917SPeter Xu bool page_in_cache = page_mapping(page); 6515313257SAxel Rasmussen spinlock_t *ptl; 6628965f0fSVishal Moola (Oracle) struct folio *folio; 6715313257SAxel Rasmussen struct inode *inode; 6815313257SAxel Rasmussen pgoff_t offset, max_off; 6915313257SAxel Rasmussen 7015313257SAxel Rasmussen _dst_pte = mk_pte(page, dst_vma->vm_page_prot); 719ae0f87dSPeter Xu _dst_pte = pte_mkdirty(_dst_pte); 7215313257SAxel Rasmussen if (page_in_cache && !vm_shared) 7315313257SAxel Rasmussen writable = false; 748ee79edfSPeter Xu if (writable) 7515313257SAxel Rasmussen _dst_pte = pte_mkwrite(_dst_pte); 76d9712937SAxel Rasmussen if (flags & MFILL_ATOMIC_WP) 77f1eb1bacSPeter Xu _dst_pte = pte_mkuffd_wp(_dst_pte); 7815313257SAxel Rasmussen 79*3622d3cdSHugh Dickins ret = -EAGAIN; 8015313257SAxel Rasmussen dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 81*3622d3cdSHugh Dickins if (!dst_pte) 82*3622d3cdSHugh Dickins goto out; 8315313257SAxel Rasmussen 8415313257SAxel Rasmussen if (vma_is_shmem(dst_vma)) { 8515313257SAxel Rasmussen /* serialize against truncate with the page table lock */ 8615313257SAxel Rasmussen inode = dst_vma->vm_file->f_inode; 8715313257SAxel Rasmussen offset = linear_page_index(dst_vma, dst_addr); 8815313257SAxel Rasmussen max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 8915313257SAxel Rasmussen ret = -EFAULT; 9015313257SAxel Rasmussen if (unlikely(offset >= max_off)) 9115313257SAxel Rasmussen goto out_unlock; 9215313257SAxel Rasmussen } 9315313257SAxel Rasmussen 9415313257SAxel Rasmussen ret = -EEXIST; 958ee79edfSPeter Xu /* 968ee79edfSPeter Xu * We allow to overwrite a pte marker: consider when both MISSING|WP 978ee79edfSPeter Xu * registered, we firstly wr-protect a none pte which has no page cache 988ee79edfSPeter Xu * page backing it, then access the page. 998ee79edfSPeter Xu */ 1008ee79edfSPeter Xu if (!pte_none_mostly(*dst_pte)) 10115313257SAxel Rasmussen goto out_unlock; 10215313257SAxel Rasmussen 10328965f0fSVishal Moola (Oracle) folio = page_folio(page); 104cea86fe2SHugh Dickins if (page_in_cache) { 105cea86fe2SHugh Dickins /* Usually, cache pages are already added to LRU */ 106cea86fe2SHugh Dickins if (newly_allocated) 10728965f0fSVishal Moola (Oracle) folio_add_lru(folio); 108cea86fe2SHugh Dickins page_add_file_rmap(page, dst_vma, false); 109cea86fe2SHugh Dickins } else { 11040f2bbf7SDavid Hildenbrand page_add_new_anon_rmap(page, dst_vma, dst_addr); 11128965f0fSVishal Moola (Oracle) folio_add_lru_vma(folio, dst_vma); 112cea86fe2SHugh Dickins } 11315313257SAxel Rasmussen 11415313257SAxel Rasmussen /* 11515313257SAxel Rasmussen * Must happen after rmap, as mm_counter() checks mapping (via 11615313257SAxel Rasmussen * PageAnon()), which is set by __page_set_anon_rmap(). 11715313257SAxel Rasmussen */ 11815313257SAxel Rasmussen inc_mm_counter(dst_mm, mm_counter(page)); 11915313257SAxel Rasmussen 12015313257SAxel Rasmussen set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 12115313257SAxel Rasmussen 12215313257SAxel Rasmussen /* No need to invalidate - it was non-present before */ 12315313257SAxel Rasmussen update_mmu_cache(dst_vma, dst_addr, dst_pte); 12415313257SAxel Rasmussen ret = 0; 12515313257SAxel Rasmussen out_unlock: 12615313257SAxel Rasmussen pte_unmap_unlock(dst_pte, ptl); 127*3622d3cdSHugh Dickins out: 12815313257SAxel Rasmussen return ret; 12915313257SAxel Rasmussen } 13015313257SAxel Rasmussen 13161c50040SAxel Rasmussen static int mfill_atomic_pte_copy(pmd_t *dst_pmd, 132c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma, 133c1a4de99SAndrea Arcangeli unsigned long dst_addr, 134b6ebaedbSAndrea Arcangeli unsigned long src_addr, 135d9712937SAxel Rasmussen uffd_flags_t flags, 136d7be6d7eSZhangPeng struct folio **foliop) 137c1a4de99SAndrea Arcangeli { 13807e6d409SZhangPeng void *kaddr; 139c1a4de99SAndrea Arcangeli int ret; 14007e6d409SZhangPeng struct folio *folio; 141c1a4de99SAndrea Arcangeli 142d7be6d7eSZhangPeng if (!*foliop) { 143c1a4de99SAndrea Arcangeli ret = -ENOMEM; 14407e6d409SZhangPeng folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma, 14507e6d409SZhangPeng dst_addr, false); 14607e6d409SZhangPeng if (!folio) 147c1a4de99SAndrea Arcangeli goto out; 148c1a4de99SAndrea Arcangeli 14907e6d409SZhangPeng kaddr = kmap_local_folio(folio, 0); 1505521de7dSIra Weiny /* 1515521de7dSIra Weiny * The read mmap_lock is held here. Despite the 1525521de7dSIra Weiny * mmap_lock being read recursive a deadlock is still 1535521de7dSIra Weiny * possible if a writer has taken a lock. For example: 1545521de7dSIra Weiny * 1555521de7dSIra Weiny * process A thread 1 takes read lock on own mmap_lock 1565521de7dSIra Weiny * process A thread 2 calls mmap, blocks taking write lock 1575521de7dSIra Weiny * process B thread 1 takes page fault, read lock on own mmap lock 1585521de7dSIra Weiny * process B thread 2 calls mmap, blocks taking write lock 1595521de7dSIra Weiny * process A thread 1 blocks taking read lock on process B 1605521de7dSIra Weiny * process B thread 1 blocks taking read lock on process A 1615521de7dSIra Weiny * 1625521de7dSIra Weiny * Disable page faults to prevent potential deadlock 1635521de7dSIra Weiny * and retry the copy outside the mmap_lock. 1645521de7dSIra Weiny */ 1655521de7dSIra Weiny pagefault_disable(); 16607e6d409SZhangPeng ret = copy_from_user(kaddr, (const void __user *) src_addr, 167b6ebaedbSAndrea Arcangeli PAGE_SIZE); 1685521de7dSIra Weiny pagefault_enable(); 16907e6d409SZhangPeng kunmap_local(kaddr); 170b6ebaedbSAndrea Arcangeli 171c1e8d7c6SMichel Lespinasse /* fallback to copy_from_user outside mmap_lock */ 172b6ebaedbSAndrea Arcangeli if (unlikely(ret)) { 1739e368259SAndrea Arcangeli ret = -ENOENT; 174d7be6d7eSZhangPeng *foliop = folio; 175b6ebaedbSAndrea Arcangeli /* don't free the page */ 176b6ebaedbSAndrea Arcangeli goto out; 177b6ebaedbSAndrea Arcangeli } 1787c25a0b8SMuchun Song 17907e6d409SZhangPeng flush_dcache_folio(folio); 180b6ebaedbSAndrea Arcangeli } else { 181d7be6d7eSZhangPeng folio = *foliop; 182d7be6d7eSZhangPeng *foliop = NULL; 183b6ebaedbSAndrea Arcangeli } 184c1a4de99SAndrea Arcangeli 185c1a4de99SAndrea Arcangeli /* 18607e6d409SZhangPeng * The memory barrier inside __folio_mark_uptodate makes sure that 187f4f5329dSWei Yang * preceding stores to the page contents become visible before 188c1a4de99SAndrea Arcangeli * the set_pte_at() write. 189c1a4de99SAndrea Arcangeli */ 19007e6d409SZhangPeng __folio_mark_uptodate(folio); 191c1a4de99SAndrea Arcangeli 192c1a4de99SAndrea Arcangeli ret = -ENOMEM; 19307e6d409SZhangPeng if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL)) 194c1a4de99SAndrea Arcangeli goto out_release; 195c1a4de99SAndrea Arcangeli 19661c50040SAxel Rasmussen ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, 19707e6d409SZhangPeng &folio->page, true, flags); 19815313257SAxel Rasmussen if (ret) 19915313257SAxel Rasmussen goto out_release; 200c1a4de99SAndrea Arcangeli out: 201c1a4de99SAndrea Arcangeli return ret; 202c1a4de99SAndrea Arcangeli out_release: 20307e6d409SZhangPeng folio_put(folio); 204c1a4de99SAndrea Arcangeli goto out; 205c1a4de99SAndrea Arcangeli } 206c1a4de99SAndrea Arcangeli 20761c50040SAxel Rasmussen static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd, 208c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma, 209c1a4de99SAndrea Arcangeli unsigned long dst_addr) 210c1a4de99SAndrea Arcangeli { 211c1a4de99SAndrea Arcangeli pte_t _dst_pte, *dst_pte; 212c1a4de99SAndrea Arcangeli spinlock_t *ptl; 213c1a4de99SAndrea Arcangeli int ret; 214e2a50c1fSAndrea Arcangeli pgoff_t offset, max_off; 215e2a50c1fSAndrea Arcangeli struct inode *inode; 216c1a4de99SAndrea Arcangeli 217c1a4de99SAndrea Arcangeli _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), 218c1a4de99SAndrea Arcangeli dst_vma->vm_page_prot)); 219*3622d3cdSHugh Dickins ret = -EAGAIN; 22061c50040SAxel Rasmussen dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl); 221*3622d3cdSHugh Dickins if (!dst_pte) 222*3622d3cdSHugh Dickins goto out; 223e2a50c1fSAndrea Arcangeli if (dst_vma->vm_file) { 224e2a50c1fSAndrea Arcangeli /* the shmem MAP_PRIVATE case requires checking the i_size */ 225e2a50c1fSAndrea Arcangeli inode = dst_vma->vm_file->f_inode; 226e2a50c1fSAndrea Arcangeli offset = linear_page_index(dst_vma, dst_addr); 227e2a50c1fSAndrea Arcangeli max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 228e2a50c1fSAndrea Arcangeli ret = -EFAULT; 229e2a50c1fSAndrea Arcangeli if (unlikely(offset >= max_off)) 230e2a50c1fSAndrea Arcangeli goto out_unlock; 231e2a50c1fSAndrea Arcangeli } 232e2a50c1fSAndrea Arcangeli ret = -EEXIST; 233c1a4de99SAndrea Arcangeli if (!pte_none(*dst_pte)) 234c1a4de99SAndrea Arcangeli goto out_unlock; 23561c50040SAxel Rasmussen set_pte_at(dst_vma->vm_mm, dst_addr, dst_pte, _dst_pte); 236c1a4de99SAndrea Arcangeli /* No need to invalidate - it was non-present before */ 237c1a4de99SAndrea Arcangeli update_mmu_cache(dst_vma, dst_addr, dst_pte); 238c1a4de99SAndrea Arcangeli ret = 0; 239c1a4de99SAndrea Arcangeli out_unlock: 240c1a4de99SAndrea Arcangeli pte_unmap_unlock(dst_pte, ptl); 241*3622d3cdSHugh Dickins out: 242c1a4de99SAndrea Arcangeli return ret; 243c1a4de99SAndrea Arcangeli } 244c1a4de99SAndrea Arcangeli 24515313257SAxel Rasmussen /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */ 24661c50040SAxel Rasmussen static int mfill_atomic_pte_continue(pmd_t *dst_pmd, 24715313257SAxel Rasmussen struct vm_area_struct *dst_vma, 24815313257SAxel Rasmussen unsigned long dst_addr, 249d9712937SAxel Rasmussen uffd_flags_t flags) 25015313257SAxel Rasmussen { 25115313257SAxel Rasmussen struct inode *inode = file_inode(dst_vma->vm_file); 25215313257SAxel Rasmussen pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); 25312acf4fbSMatthew Wilcox (Oracle) struct folio *folio; 25415313257SAxel Rasmussen struct page *page; 25515313257SAxel Rasmussen int ret; 25615313257SAxel Rasmussen 25712acf4fbSMatthew Wilcox (Oracle) ret = shmem_get_folio(inode, pgoff, &folio, SGP_NOALLOC); 25812acf4fbSMatthew Wilcox (Oracle) /* Our caller expects us to return -EFAULT if we failed to find folio */ 25973f37dbcSAxel Rasmussen if (ret == -ENOENT) 26073f37dbcSAxel Rasmussen ret = -EFAULT; 26115313257SAxel Rasmussen if (ret) 26215313257SAxel Rasmussen goto out; 26312acf4fbSMatthew Wilcox (Oracle) if (!folio) { 26415313257SAxel Rasmussen ret = -EFAULT; 26515313257SAxel Rasmussen goto out; 26615313257SAxel Rasmussen } 26715313257SAxel Rasmussen 26812acf4fbSMatthew Wilcox (Oracle) page = folio_file_page(folio, pgoff); 269a7605426SYang Shi if (PageHWPoison(page)) { 270a7605426SYang Shi ret = -EIO; 271a7605426SYang Shi goto out_release; 272a7605426SYang Shi } 273a7605426SYang Shi 27461c50040SAxel Rasmussen ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, 275d9712937SAxel Rasmussen page, false, flags); 27615313257SAxel Rasmussen if (ret) 27715313257SAxel Rasmussen goto out_release; 27815313257SAxel Rasmussen 27912acf4fbSMatthew Wilcox (Oracle) folio_unlock(folio); 28015313257SAxel Rasmussen ret = 0; 28115313257SAxel Rasmussen out: 28215313257SAxel Rasmussen return ret; 28315313257SAxel Rasmussen out_release: 28412acf4fbSMatthew Wilcox (Oracle) folio_unlock(folio); 28512acf4fbSMatthew Wilcox (Oracle) folio_put(folio); 28615313257SAxel Rasmussen goto out; 28715313257SAxel Rasmussen } 28815313257SAxel Rasmussen 289c1a4de99SAndrea Arcangeli static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) 290c1a4de99SAndrea Arcangeli { 291c1a4de99SAndrea Arcangeli pgd_t *pgd; 292c2febafcSKirill A. Shutemov p4d_t *p4d; 293c1a4de99SAndrea Arcangeli pud_t *pud; 294c1a4de99SAndrea Arcangeli 295c1a4de99SAndrea Arcangeli pgd = pgd_offset(mm, address); 296c2febafcSKirill A. Shutemov p4d = p4d_alloc(mm, pgd, address); 297c2febafcSKirill A. Shutemov if (!p4d) 298c2febafcSKirill A. Shutemov return NULL; 299c2febafcSKirill A. Shutemov pud = pud_alloc(mm, p4d, address); 300c2febafcSKirill A. Shutemov if (!pud) 301c2febafcSKirill A. Shutemov return NULL; 302c1a4de99SAndrea Arcangeli /* 303c1a4de99SAndrea Arcangeli * Note that we didn't run this because the pmd was 304c1a4de99SAndrea Arcangeli * missing, the *pmd may be already established and in 305c1a4de99SAndrea Arcangeli * turn it may also be a trans_huge_pmd. 306c1a4de99SAndrea Arcangeli */ 307c2febafcSKirill A. Shutemov return pmd_alloc(mm, pud, address); 308c1a4de99SAndrea Arcangeli } 309c1a4de99SAndrea Arcangeli 31060d4d2d2SMike Kravetz #ifdef CONFIG_HUGETLB_PAGE 31160d4d2d2SMike Kravetz /* 312a734991cSAxel Rasmussen * mfill_atomic processing for HUGETLB vmas. Note that this routine is 313c1e8d7c6SMichel Lespinasse * called with mmap_lock held, it will release mmap_lock before returning. 31460d4d2d2SMike Kravetz */ 31561c50040SAxel Rasmussen static __always_inline ssize_t mfill_atomic_hugetlb( 31660d4d2d2SMike Kravetz struct vm_area_struct *dst_vma, 31760d4d2d2SMike Kravetz unsigned long dst_start, 31860d4d2d2SMike Kravetz unsigned long src_start, 31960d4d2d2SMike Kravetz unsigned long len, 320d9712937SAxel Rasmussen uffd_flags_t flags) 32160d4d2d2SMike Kravetz { 32261c50040SAxel Rasmussen struct mm_struct *dst_mm = dst_vma->vm_mm; 3231c9e8defSMike Kravetz int vm_shared = dst_vma->vm_flags & VM_SHARED; 32460d4d2d2SMike Kravetz ssize_t err; 32560d4d2d2SMike Kravetz pte_t *dst_pte; 32660d4d2d2SMike Kravetz unsigned long src_addr, dst_addr; 32760d4d2d2SMike Kravetz long copied; 3280169fd51SZhangPeng struct folio *folio; 32960d4d2d2SMike Kravetz unsigned long vma_hpagesize; 33060d4d2d2SMike Kravetz pgoff_t idx; 33160d4d2d2SMike Kravetz u32 hash; 33260d4d2d2SMike Kravetz struct address_space *mapping; 33360d4d2d2SMike Kravetz 33460d4d2d2SMike Kravetz /* 33560d4d2d2SMike Kravetz * There is no default zero huge page for all huge page sizes as 33660d4d2d2SMike Kravetz * supported by hugetlb. A PMD_SIZE huge pages may exist as used 33760d4d2d2SMike Kravetz * by THP. Since we can not reliably insert a zero page, this 33860d4d2d2SMike Kravetz * feature is not supported. 33960d4d2d2SMike Kravetz */ 340d9712937SAxel Rasmussen if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) { 341d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 34260d4d2d2SMike Kravetz return -EINVAL; 34360d4d2d2SMike Kravetz } 34460d4d2d2SMike Kravetz 34560d4d2d2SMike Kravetz src_addr = src_start; 34660d4d2d2SMike Kravetz dst_addr = dst_start; 34760d4d2d2SMike Kravetz copied = 0; 3480169fd51SZhangPeng folio = NULL; 34960d4d2d2SMike Kravetz vma_hpagesize = vma_kernel_pagesize(dst_vma); 35060d4d2d2SMike Kravetz 35160d4d2d2SMike Kravetz /* 35260d4d2d2SMike Kravetz * Validate alignment based on huge page size 35360d4d2d2SMike Kravetz */ 35460d4d2d2SMike Kravetz err = -EINVAL; 35560d4d2d2SMike Kravetz if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1)) 35660d4d2d2SMike Kravetz goto out_unlock; 35760d4d2d2SMike Kravetz 35860d4d2d2SMike Kravetz retry: 35960d4d2d2SMike Kravetz /* 360c1e8d7c6SMichel Lespinasse * On routine entry dst_vma is set. If we had to drop mmap_lock and 36160d4d2d2SMike Kravetz * retry, dst_vma will be set to NULL and we must lookup again. 36260d4d2d2SMike Kravetz */ 36360d4d2d2SMike Kravetz if (!dst_vma) { 36427d02568SMike Rapoport err = -ENOENT; 365643aa36eSWei Yang dst_vma = find_dst_vma(dst_mm, dst_start, len); 36660d4d2d2SMike Kravetz if (!dst_vma || !is_vm_hugetlb_page(dst_vma)) 36760d4d2d2SMike Kravetz goto out_unlock; 3681c9e8defSMike Kravetz 36927d02568SMike Rapoport err = -EINVAL; 37027d02568SMike Rapoport if (vma_hpagesize != vma_kernel_pagesize(dst_vma)) 37127d02568SMike Rapoport goto out_unlock; 37227d02568SMike Rapoport 3731c9e8defSMike Kravetz vm_shared = dst_vma->vm_flags & VM_SHARED; 37460d4d2d2SMike Kravetz } 37560d4d2d2SMike Kravetz 37660d4d2d2SMike Kravetz /* 3771c9e8defSMike Kravetz * If not shared, ensure the dst_vma has a anon_vma. 37860d4d2d2SMike Kravetz */ 37960d4d2d2SMike Kravetz err = -ENOMEM; 3801c9e8defSMike Kravetz if (!vm_shared) { 38160d4d2d2SMike Kravetz if (unlikely(anon_vma_prepare(dst_vma))) 38260d4d2d2SMike Kravetz goto out_unlock; 3831c9e8defSMike Kravetz } 38460d4d2d2SMike Kravetz 38560d4d2d2SMike Kravetz while (src_addr < src_start + len) { 38660d4d2d2SMike Kravetz BUG_ON(dst_addr >= dst_start + len); 38760d4d2d2SMike Kravetz 38860d4d2d2SMike Kravetz /* 38940549ba8SMike Kravetz * Serialize via vma_lock and hugetlb_fault_mutex. 39040549ba8SMike Kravetz * vma_lock ensures the dst_pte remains valid even 39140549ba8SMike Kravetz * in the case of shared pmds. fault mutex prevents 39240549ba8SMike Kravetz * races with other faulting threads. 39360d4d2d2SMike Kravetz */ 394c0d0381aSMike Kravetz idx = linear_page_index(dst_vma, dst_addr); 3953a47c54fSMike Kravetz mapping = dst_vma->vm_file->f_mapping; 396188b04a7SWei Yang hash = hugetlb_fault_mutex_hash(mapping, idx); 39760d4d2d2SMike Kravetz mutex_lock(&hugetlb_fault_mutex_table[hash]); 39840549ba8SMike Kravetz hugetlb_vma_lock_read(dst_vma); 39960d4d2d2SMike Kravetz 40060d4d2d2SMike Kravetz err = -ENOMEM; 401aec44e0fSPeter Xu dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize); 40260d4d2d2SMike Kravetz if (!dst_pte) { 40340549ba8SMike Kravetz hugetlb_vma_unlock_read(dst_vma); 40460d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 40560d4d2d2SMike Kravetz goto out_unlock; 40660d4d2d2SMike Kravetz } 40760d4d2d2SMike Kravetz 408d9712937SAxel Rasmussen if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) && 4096041c691SPeter Xu !huge_pte_none_mostly(huge_ptep_get(dst_pte))) { 41060d4d2d2SMike Kravetz err = -EEXIST; 41140549ba8SMike Kravetz hugetlb_vma_unlock_read(dst_vma); 41260d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 41360d4d2d2SMike Kravetz goto out_unlock; 41460d4d2d2SMike Kravetz } 41560d4d2d2SMike Kravetz 416d9712937SAxel Rasmussen err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr, 4170169fd51SZhangPeng src_addr, flags, &folio); 41860d4d2d2SMike Kravetz 41940549ba8SMike Kravetz hugetlb_vma_unlock_read(dst_vma); 42060d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 42160d4d2d2SMike Kravetz 42260d4d2d2SMike Kravetz cond_resched(); 42360d4d2d2SMike Kravetz 4249e368259SAndrea Arcangeli if (unlikely(err == -ENOENT)) { 425d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 4260169fd51SZhangPeng BUG_ON(!folio); 42760d4d2d2SMike Kravetz 4280169fd51SZhangPeng err = copy_folio_from_user(folio, 429e87340caSZhangPeng (const void __user *)src_addr, true); 43060d4d2d2SMike Kravetz if (unlikely(err)) { 43160d4d2d2SMike Kravetz err = -EFAULT; 43260d4d2d2SMike Kravetz goto out; 43360d4d2d2SMike Kravetz } 434d8ed45c5SMichel Lespinasse mmap_read_lock(dst_mm); 43560d4d2d2SMike Kravetz 43660d4d2d2SMike Kravetz dst_vma = NULL; 43760d4d2d2SMike Kravetz goto retry; 43860d4d2d2SMike Kravetz } else 4390169fd51SZhangPeng BUG_ON(folio); 44060d4d2d2SMike Kravetz 44160d4d2d2SMike Kravetz if (!err) { 44260d4d2d2SMike Kravetz dst_addr += vma_hpagesize; 44360d4d2d2SMike Kravetz src_addr += vma_hpagesize; 44460d4d2d2SMike Kravetz copied += vma_hpagesize; 44560d4d2d2SMike Kravetz 44660d4d2d2SMike Kravetz if (fatal_signal_pending(current)) 44760d4d2d2SMike Kravetz err = -EINTR; 44860d4d2d2SMike Kravetz } 44960d4d2d2SMike Kravetz if (err) 45060d4d2d2SMike Kravetz break; 45160d4d2d2SMike Kravetz } 45260d4d2d2SMike Kravetz 45360d4d2d2SMike Kravetz out_unlock: 454d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 45560d4d2d2SMike Kravetz out: 4560169fd51SZhangPeng if (folio) 4570169fd51SZhangPeng folio_put(folio); 45860d4d2d2SMike Kravetz BUG_ON(copied < 0); 45960d4d2d2SMike Kravetz BUG_ON(err > 0); 46060d4d2d2SMike Kravetz BUG_ON(!copied && !err); 46160d4d2d2SMike Kravetz return copied ? copied : err; 46260d4d2d2SMike Kravetz } 46360d4d2d2SMike Kravetz #else /* !CONFIG_HUGETLB_PAGE */ 46460d4d2d2SMike Kravetz /* fail at build time if gcc attempts to use this */ 46561c50040SAxel Rasmussen extern ssize_t mfill_atomic_hugetlb(struct vm_area_struct *dst_vma, 46660d4d2d2SMike Kravetz unsigned long dst_start, 46760d4d2d2SMike Kravetz unsigned long src_start, 46860d4d2d2SMike Kravetz unsigned long len, 469d9712937SAxel Rasmussen uffd_flags_t flags); 47060d4d2d2SMike Kravetz #endif /* CONFIG_HUGETLB_PAGE */ 47160d4d2d2SMike Kravetz 47261c50040SAxel Rasmussen static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd, 4733217d3c7SMike Rapoport struct vm_area_struct *dst_vma, 4743217d3c7SMike Rapoport unsigned long dst_addr, 4753217d3c7SMike Rapoport unsigned long src_addr, 476d9712937SAxel Rasmussen uffd_flags_t flags, 477d7be6d7eSZhangPeng struct folio **foliop) 4783217d3c7SMike Rapoport { 4793217d3c7SMike Rapoport ssize_t err; 4803217d3c7SMike Rapoport 481d9712937SAxel Rasmussen if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) { 48261c50040SAxel Rasmussen return mfill_atomic_pte_continue(dst_pmd, dst_vma, 483d9712937SAxel Rasmussen dst_addr, flags); 48415313257SAxel Rasmussen } 48515313257SAxel Rasmussen 4865b51072eSAndrea Arcangeli /* 4875b51072eSAndrea Arcangeli * The normal page fault path for a shmem will invoke the 4885b51072eSAndrea Arcangeli * fault, fill the hole in the file and COW it right away. The 4895b51072eSAndrea Arcangeli * result generates plain anonymous memory. So when we are 4905b51072eSAndrea Arcangeli * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll 4915b51072eSAndrea Arcangeli * generate anonymous memory directly without actually filling 4925b51072eSAndrea Arcangeli * the hole. For the MAP_PRIVATE case the robustness check 4935b51072eSAndrea Arcangeli * only happens in the pagetable (to verify it's still none) 4945b51072eSAndrea Arcangeli * and not in the radix tree. 4955b51072eSAndrea Arcangeli */ 4965b51072eSAndrea Arcangeli if (!(dst_vma->vm_flags & VM_SHARED)) { 497d9712937SAxel Rasmussen if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) 49861c50040SAxel Rasmussen err = mfill_atomic_pte_copy(dst_pmd, dst_vma, 499d9712937SAxel Rasmussen dst_addr, src_addr, 500d7be6d7eSZhangPeng flags, foliop); 5013217d3c7SMike Rapoport else 50261c50040SAxel Rasmussen err = mfill_atomic_pte_zeropage(dst_pmd, 5033217d3c7SMike Rapoport dst_vma, dst_addr); 5043217d3c7SMike Rapoport } else { 50561c50040SAxel Rasmussen err = shmem_mfill_atomic_pte(dst_pmd, dst_vma, 50615313257SAxel Rasmussen dst_addr, src_addr, 507d7be6d7eSZhangPeng flags, foliop); 5083217d3c7SMike Rapoport } 5093217d3c7SMike Rapoport 5103217d3c7SMike Rapoport return err; 5113217d3c7SMike Rapoport } 5123217d3c7SMike Rapoport 513a734991cSAxel Rasmussen static __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm, 514c1a4de99SAndrea Arcangeli unsigned long dst_start, 515c1a4de99SAndrea Arcangeli unsigned long src_start, 516c1a4de99SAndrea Arcangeli unsigned long len, 517a759a909SNadav Amit atomic_t *mmap_changing, 518d9712937SAxel Rasmussen uffd_flags_t flags) 519c1a4de99SAndrea Arcangeli { 520c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma; 521c1a4de99SAndrea Arcangeli ssize_t err; 522c1a4de99SAndrea Arcangeli pmd_t *dst_pmd; 523c1a4de99SAndrea Arcangeli unsigned long src_addr, dst_addr; 524b6ebaedbSAndrea Arcangeli long copied; 525d7be6d7eSZhangPeng struct folio *folio; 526c1a4de99SAndrea Arcangeli 527c1a4de99SAndrea Arcangeli /* 528c1a4de99SAndrea Arcangeli * Sanitize the command parameters: 529c1a4de99SAndrea Arcangeli */ 530c1a4de99SAndrea Arcangeli BUG_ON(dst_start & ~PAGE_MASK); 531c1a4de99SAndrea Arcangeli BUG_ON(len & ~PAGE_MASK); 532c1a4de99SAndrea Arcangeli 533c1a4de99SAndrea Arcangeli /* Does the address range wrap, or is the span zero-sized? */ 534c1a4de99SAndrea Arcangeli BUG_ON(src_start + len <= src_start); 535c1a4de99SAndrea Arcangeli BUG_ON(dst_start + len <= dst_start); 536c1a4de99SAndrea Arcangeli 537b6ebaedbSAndrea Arcangeli src_addr = src_start; 538b6ebaedbSAndrea Arcangeli dst_addr = dst_start; 539b6ebaedbSAndrea Arcangeli copied = 0; 540d7be6d7eSZhangPeng folio = NULL; 541b6ebaedbSAndrea Arcangeli retry: 542d8ed45c5SMichel Lespinasse mmap_read_lock(dst_mm); 543c1a4de99SAndrea Arcangeli 544c1a4de99SAndrea Arcangeli /* 545df2cc96eSMike Rapoport * If memory mappings are changing because of non-cooperative 546df2cc96eSMike Rapoport * operation (e.g. mremap) running in parallel, bail out and 547df2cc96eSMike Rapoport * request the user to retry later 548df2cc96eSMike Rapoport */ 549df2cc96eSMike Rapoport err = -EAGAIN; 550a759a909SNadav Amit if (mmap_changing && atomic_read(mmap_changing)) 551df2cc96eSMike Rapoport goto out_unlock; 552df2cc96eSMike Rapoport 553df2cc96eSMike Rapoport /* 554c1a4de99SAndrea Arcangeli * Make sure the vma is not shared, that the dst range is 555c1a4de99SAndrea Arcangeli * both valid and fully within a single existing vma. 556c1a4de99SAndrea Arcangeli */ 55727d02568SMike Rapoport err = -ENOENT; 558643aa36eSWei Yang dst_vma = find_dst_vma(dst_mm, dst_start, len); 55926071cedSMike Rapoport if (!dst_vma) 56026071cedSMike Rapoport goto out_unlock; 56127d02568SMike Rapoport 56227d02568SMike Rapoport err = -EINVAL; 56327d02568SMike Rapoport /* 56427d02568SMike Rapoport * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but 56527d02568SMike Rapoport * it will overwrite vm_ops, so vma_is_anonymous must return false. 56627d02568SMike Rapoport */ 56727d02568SMike Rapoport if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) && 56827d02568SMike Rapoport dst_vma->vm_flags & VM_SHARED)) 56927d02568SMike Rapoport goto out_unlock; 57027d02568SMike Rapoport 57127d02568SMike Rapoport /* 57272981e0eSAndrea Arcangeli * validate 'mode' now that we know the dst_vma: don't allow 57372981e0eSAndrea Arcangeli * a wrprotect copy if the userfaultfd didn't register as WP. 57472981e0eSAndrea Arcangeli */ 575d9712937SAxel Rasmussen if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP)) 57672981e0eSAndrea Arcangeli goto out_unlock; 57772981e0eSAndrea Arcangeli 57872981e0eSAndrea Arcangeli /* 57927d02568SMike Rapoport * If this is a HUGETLB vma, pass off to appropriate routine 58027d02568SMike Rapoport */ 58127d02568SMike Rapoport if (is_vm_hugetlb_page(dst_vma)) 58261c50040SAxel Rasmussen return mfill_atomic_hugetlb(dst_vma, dst_start, 583d9712937SAxel Rasmussen src_start, len, flags); 58427d02568SMike Rapoport 58526071cedSMike Rapoport if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) 586b6ebaedbSAndrea Arcangeli goto out_unlock; 587d9712937SAxel Rasmussen if (!vma_is_shmem(dst_vma) && 588d9712937SAxel Rasmussen uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) 589f6191471SAxel Rasmussen goto out_unlock; 590c1a4de99SAndrea Arcangeli 591c1a4de99SAndrea Arcangeli /* 592c1a4de99SAndrea Arcangeli * Ensure the dst_vma has a anon_vma or this page 593c1a4de99SAndrea Arcangeli * would get a NULL anon_vma when moved in the 594c1a4de99SAndrea Arcangeli * dst_vma. 595c1a4de99SAndrea Arcangeli */ 596c1a4de99SAndrea Arcangeli err = -ENOMEM; 5975b51072eSAndrea Arcangeli if (!(dst_vma->vm_flags & VM_SHARED) && 5985b51072eSAndrea Arcangeli unlikely(anon_vma_prepare(dst_vma))) 599b6ebaedbSAndrea Arcangeli goto out_unlock; 600c1a4de99SAndrea Arcangeli 601b6ebaedbSAndrea Arcangeli while (src_addr < src_start + len) { 602c1a4de99SAndrea Arcangeli pmd_t dst_pmdval; 603b6ebaedbSAndrea Arcangeli 604c1a4de99SAndrea Arcangeli BUG_ON(dst_addr >= dst_start + len); 605b6ebaedbSAndrea Arcangeli 606c1a4de99SAndrea Arcangeli dst_pmd = mm_alloc_pmd(dst_mm, dst_addr); 607c1a4de99SAndrea Arcangeli if (unlikely(!dst_pmd)) { 608c1a4de99SAndrea Arcangeli err = -ENOMEM; 609c1a4de99SAndrea Arcangeli break; 610c1a4de99SAndrea Arcangeli } 611c1a4de99SAndrea Arcangeli 612dab6e717SPeter Zijlstra dst_pmdval = pmdp_get_lockless(dst_pmd); 613c1a4de99SAndrea Arcangeli /* 614c1a4de99SAndrea Arcangeli * If the dst_pmd is mapped as THP don't 615c1a4de99SAndrea Arcangeli * override it and just be strict. 616c1a4de99SAndrea Arcangeli */ 617c1a4de99SAndrea Arcangeli if (unlikely(pmd_trans_huge(dst_pmdval))) { 618c1a4de99SAndrea Arcangeli err = -EEXIST; 619c1a4de99SAndrea Arcangeli break; 620c1a4de99SAndrea Arcangeli } 621c1a4de99SAndrea Arcangeli if (unlikely(pmd_none(dst_pmdval)) && 6224cf58924SJoel Fernandes (Google) unlikely(__pte_alloc(dst_mm, dst_pmd))) { 623c1a4de99SAndrea Arcangeli err = -ENOMEM; 624c1a4de99SAndrea Arcangeli break; 625c1a4de99SAndrea Arcangeli } 626c1a4de99SAndrea Arcangeli /* If an huge pmd materialized from under us fail */ 627c1a4de99SAndrea Arcangeli if (unlikely(pmd_trans_huge(*dst_pmd))) { 628c1a4de99SAndrea Arcangeli err = -EFAULT; 629c1a4de99SAndrea Arcangeli break; 630c1a4de99SAndrea Arcangeli } 631c1a4de99SAndrea Arcangeli 632c1a4de99SAndrea Arcangeli BUG_ON(pmd_none(*dst_pmd)); 633c1a4de99SAndrea Arcangeli BUG_ON(pmd_trans_huge(*dst_pmd)); 634c1a4de99SAndrea Arcangeli 63561c50040SAxel Rasmussen err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, 636d7be6d7eSZhangPeng src_addr, flags, &folio); 637c1a4de99SAndrea Arcangeli cond_resched(); 638c1a4de99SAndrea Arcangeli 6399e368259SAndrea Arcangeli if (unlikely(err == -ENOENT)) { 640d7be6d7eSZhangPeng void *kaddr; 641b6ebaedbSAndrea Arcangeli 642d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 643d7be6d7eSZhangPeng BUG_ON(!folio); 644b6ebaedbSAndrea Arcangeli 645d7be6d7eSZhangPeng kaddr = kmap_local_folio(folio, 0); 646d7be6d7eSZhangPeng err = copy_from_user(kaddr, 647b6ebaedbSAndrea Arcangeli (const void __user *) src_addr, 648b6ebaedbSAndrea Arcangeli PAGE_SIZE); 649d7be6d7eSZhangPeng kunmap_local(kaddr); 650b6ebaedbSAndrea Arcangeli if (unlikely(err)) { 651b6ebaedbSAndrea Arcangeli err = -EFAULT; 652b6ebaedbSAndrea Arcangeli goto out; 653b6ebaedbSAndrea Arcangeli } 654d7be6d7eSZhangPeng flush_dcache_folio(folio); 655b6ebaedbSAndrea Arcangeli goto retry; 656b6ebaedbSAndrea Arcangeli } else 657d7be6d7eSZhangPeng BUG_ON(folio); 658b6ebaedbSAndrea Arcangeli 659c1a4de99SAndrea Arcangeli if (!err) { 660c1a4de99SAndrea Arcangeli dst_addr += PAGE_SIZE; 661c1a4de99SAndrea Arcangeli src_addr += PAGE_SIZE; 662c1a4de99SAndrea Arcangeli copied += PAGE_SIZE; 663c1a4de99SAndrea Arcangeli 664c1a4de99SAndrea Arcangeli if (fatal_signal_pending(current)) 665c1a4de99SAndrea Arcangeli err = -EINTR; 666c1a4de99SAndrea Arcangeli } 667c1a4de99SAndrea Arcangeli if (err) 668c1a4de99SAndrea Arcangeli break; 669c1a4de99SAndrea Arcangeli } 670c1a4de99SAndrea Arcangeli 671b6ebaedbSAndrea Arcangeli out_unlock: 672d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 673b6ebaedbSAndrea Arcangeli out: 674d7be6d7eSZhangPeng if (folio) 675d7be6d7eSZhangPeng folio_put(folio); 676c1a4de99SAndrea Arcangeli BUG_ON(copied < 0); 677c1a4de99SAndrea Arcangeli BUG_ON(err > 0); 678c1a4de99SAndrea Arcangeli BUG_ON(!copied && !err); 679c1a4de99SAndrea Arcangeli return copied ? copied : err; 680c1a4de99SAndrea Arcangeli } 681c1a4de99SAndrea Arcangeli 682a734991cSAxel Rasmussen ssize_t mfill_atomic_copy(struct mm_struct *dst_mm, unsigned long dst_start, 683df2cc96eSMike Rapoport unsigned long src_start, unsigned long len, 684d9712937SAxel Rasmussen atomic_t *mmap_changing, uffd_flags_t flags) 685c1a4de99SAndrea Arcangeli { 686d9712937SAxel Rasmussen return mfill_atomic(dst_mm, dst_start, src_start, len, mmap_changing, 687d9712937SAxel Rasmussen uffd_flags_set_mode(flags, MFILL_ATOMIC_COPY)); 688c1a4de99SAndrea Arcangeli } 689c1a4de99SAndrea Arcangeli 690a734991cSAxel Rasmussen ssize_t mfill_atomic_zeropage(struct mm_struct *dst_mm, unsigned long start, 691a759a909SNadav Amit unsigned long len, atomic_t *mmap_changing) 692c1a4de99SAndrea Arcangeli { 693d9712937SAxel Rasmussen return mfill_atomic(dst_mm, start, 0, len, mmap_changing, 694d9712937SAxel Rasmussen uffd_flags_set_mode(0, MFILL_ATOMIC_ZEROPAGE)); 695f6191471SAxel Rasmussen } 696f6191471SAxel Rasmussen 697a734991cSAxel Rasmussen ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long start, 69802891844SAxel Rasmussen unsigned long len, atomic_t *mmap_changing, 69902891844SAxel Rasmussen uffd_flags_t flags) 700f6191471SAxel Rasmussen { 701d9712937SAxel Rasmussen return mfill_atomic(dst_mm, start, 0, len, mmap_changing, 70202891844SAxel Rasmussen uffd_flags_set_mode(flags, MFILL_ATOMIC_CONTINUE)); 703c1a4de99SAndrea Arcangeli } 704ffd05793SShaohua Li 70561c50040SAxel Rasmussen long uffd_wp_range(struct vm_area_struct *dst_vma, 706f369b07cSPeter Xu unsigned long start, unsigned long len, bool enable_wp) 707f369b07cSPeter Xu { 708931298e1SDavid Hildenbrand unsigned int mm_cp_flags; 709f369b07cSPeter Xu struct mmu_gather tlb; 710d1751118SPeter Xu long ret; 711f369b07cSPeter Xu 712a1b92a3fSMuhammad Usama Anjum VM_WARN_ONCE(start < dst_vma->vm_start || start + len > dst_vma->vm_end, 713a1b92a3fSMuhammad Usama Anjum "The address range exceeds VMA boundary.\n"); 714f369b07cSPeter Xu if (enable_wp) 715931298e1SDavid Hildenbrand mm_cp_flags = MM_CP_UFFD_WP; 716f369b07cSPeter Xu else 717931298e1SDavid Hildenbrand mm_cp_flags = MM_CP_UFFD_WP_RESOLVE; 718f369b07cSPeter Xu 719931298e1SDavid Hildenbrand /* 720931298e1SDavid Hildenbrand * vma->vm_page_prot already reflects that uffd-wp is enabled for this 721931298e1SDavid Hildenbrand * VMA (see userfaultfd_set_vm_flags()) and that all PTEs are supposed 722931298e1SDavid Hildenbrand * to be write-protected as default whenever protection changes. 723931298e1SDavid Hildenbrand * Try upgrading write permissions manually. 724931298e1SDavid Hildenbrand */ 725931298e1SDavid Hildenbrand if (!enable_wp && vma_wants_manual_pte_write_upgrade(dst_vma)) 726931298e1SDavid Hildenbrand mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE; 72761c50040SAxel Rasmussen tlb_gather_mmu(&tlb, dst_vma->vm_mm); 728d1751118SPeter Xu ret = change_protection(&tlb, dst_vma, start, start + len, mm_cp_flags); 729f369b07cSPeter Xu tlb_finish_mmu(&tlb); 730d1751118SPeter Xu 731d1751118SPeter Xu return ret; 732f369b07cSPeter Xu } 733f369b07cSPeter Xu 734ffd05793SShaohua Li int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, 735a759a909SNadav Amit unsigned long len, bool enable_wp, 736a759a909SNadav Amit atomic_t *mmap_changing) 737ffd05793SShaohua Li { 738a1b92a3fSMuhammad Usama Anjum unsigned long end = start + len; 739a1b92a3fSMuhammad Usama Anjum unsigned long _start, _end; 740ffd05793SShaohua Li struct vm_area_struct *dst_vma; 7415a90d5a1SPeter Xu unsigned long page_mask; 742d1751118SPeter Xu long err; 743a1b92a3fSMuhammad Usama Anjum VMA_ITERATOR(vmi, dst_mm, start); 744ffd05793SShaohua Li 745ffd05793SShaohua Li /* 746ffd05793SShaohua Li * Sanitize the command parameters: 747ffd05793SShaohua Li */ 748ffd05793SShaohua Li BUG_ON(start & ~PAGE_MASK); 749ffd05793SShaohua Li BUG_ON(len & ~PAGE_MASK); 750ffd05793SShaohua Li 751ffd05793SShaohua Li /* Does the address range wrap, or is the span zero-sized? */ 752ffd05793SShaohua Li BUG_ON(start + len <= start); 753ffd05793SShaohua Li 754d8ed45c5SMichel Lespinasse mmap_read_lock(dst_mm); 755ffd05793SShaohua Li 756ffd05793SShaohua Li /* 757ffd05793SShaohua Li * If memory mappings are changing because of non-cooperative 758ffd05793SShaohua Li * operation (e.g. mremap) running in parallel, bail out and 759ffd05793SShaohua Li * request the user to retry later 760ffd05793SShaohua Li */ 761ffd05793SShaohua Li err = -EAGAIN; 762a759a909SNadav Amit if (mmap_changing && atomic_read(mmap_changing)) 763ffd05793SShaohua Li goto out_unlock; 764ffd05793SShaohua Li 765ffd05793SShaohua Li err = -ENOENT; 766a1b92a3fSMuhammad Usama Anjum for_each_vma_range(vmi, dst_vma, end) { 767b1f9e876SPeter Xu 768a1b92a3fSMuhammad Usama Anjum if (!userfaultfd_wp(dst_vma)) { 769a1b92a3fSMuhammad Usama Anjum err = -ENOENT; 770a1b92a3fSMuhammad Usama Anjum break; 771a1b92a3fSMuhammad Usama Anjum } 772ffd05793SShaohua Li 7735a90d5a1SPeter Xu if (is_vm_hugetlb_page(dst_vma)) { 7745a90d5a1SPeter Xu err = -EINVAL; 7755a90d5a1SPeter Xu page_mask = vma_kernel_pagesize(dst_vma) - 1; 7765a90d5a1SPeter Xu if ((start & page_mask) || (len & page_mask)) 777a1b92a3fSMuhammad Usama Anjum break; 7785a90d5a1SPeter Xu } 7795a90d5a1SPeter Xu 780a1b92a3fSMuhammad Usama Anjum _start = max(dst_vma->vm_start, start); 781a1b92a3fSMuhammad Usama Anjum _end = min(dst_vma->vm_end, end); 782a1b92a3fSMuhammad Usama Anjum 78361c50040SAxel Rasmussen err = uffd_wp_range(dst_vma, _start, _end - _start, enable_wp); 784ffd05793SShaohua Li 785d1751118SPeter Xu /* Return 0 on success, <0 on failures */ 786a1b92a3fSMuhammad Usama Anjum if (err < 0) 787a1b92a3fSMuhammad Usama Anjum break; 788ffd05793SShaohua Li err = 0; 789a1b92a3fSMuhammad Usama Anjum } 790ffd05793SShaohua Li out_unlock: 791d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 792ffd05793SShaohua Li return err; 793ffd05793SShaohua Li } 794