120c8ccb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2c1a4de99SAndrea Arcangeli /* 3c1a4de99SAndrea Arcangeli * mm/userfaultfd.c 4c1a4de99SAndrea Arcangeli * 5c1a4de99SAndrea Arcangeli * Copyright (C) 2015 Red Hat, Inc. 6c1a4de99SAndrea Arcangeli */ 7c1a4de99SAndrea Arcangeli 8c1a4de99SAndrea Arcangeli #include <linux/mm.h> 9174cd4b1SIngo Molnar #include <linux/sched/signal.h> 10c1a4de99SAndrea Arcangeli #include <linux/pagemap.h> 11c1a4de99SAndrea Arcangeli #include <linux/rmap.h> 12c1a4de99SAndrea Arcangeli #include <linux/swap.h> 13c1a4de99SAndrea Arcangeli #include <linux/swapops.h> 14c1a4de99SAndrea Arcangeli #include <linux/userfaultfd_k.h> 15c1a4de99SAndrea Arcangeli #include <linux/mmu_notifier.h> 1660d4d2d2SMike Kravetz #include <linux/hugetlb.h> 1726071cedSMike Rapoport #include <linux/shmem_fs.h> 18c1a4de99SAndrea Arcangeli #include <asm/tlbflush.h> 19c1a4de99SAndrea Arcangeli #include "internal.h" 20c1a4de99SAndrea Arcangeli 21643aa36eSWei Yang static __always_inline 22643aa36eSWei Yang struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm, 23643aa36eSWei Yang unsigned long dst_start, 24643aa36eSWei Yang unsigned long len) 25643aa36eSWei Yang { 26643aa36eSWei Yang /* 27643aa36eSWei Yang * Make sure that the dst range is both valid and fully within a 28643aa36eSWei Yang * single existing vma. 29643aa36eSWei Yang */ 30643aa36eSWei Yang struct vm_area_struct *dst_vma; 31643aa36eSWei Yang 32643aa36eSWei Yang dst_vma = find_vma(dst_mm, dst_start); 33643aa36eSWei Yang if (!dst_vma) 34643aa36eSWei Yang return NULL; 35643aa36eSWei Yang 36643aa36eSWei Yang if (dst_start < dst_vma->vm_start || 37643aa36eSWei Yang dst_start + len > dst_vma->vm_end) 38643aa36eSWei Yang return NULL; 39643aa36eSWei Yang 40643aa36eSWei Yang /* 41643aa36eSWei Yang * Check the vma is registered in uffd, this is required to 42643aa36eSWei Yang * enforce the VM_MAYWRITE check done at uffd registration 43643aa36eSWei Yang * time. 44643aa36eSWei Yang */ 45643aa36eSWei Yang if (!dst_vma->vm_userfaultfd_ctx.ctx) 46643aa36eSWei Yang return NULL; 47643aa36eSWei Yang 48643aa36eSWei Yang return dst_vma; 49643aa36eSWei Yang } 50643aa36eSWei Yang 51c1a4de99SAndrea Arcangeli static int mcopy_atomic_pte(struct mm_struct *dst_mm, 52c1a4de99SAndrea Arcangeli pmd_t *dst_pmd, 53c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma, 54c1a4de99SAndrea Arcangeli unsigned long dst_addr, 55b6ebaedbSAndrea Arcangeli unsigned long src_addr, 5672981e0eSAndrea Arcangeli struct page **pagep, 5772981e0eSAndrea Arcangeli bool wp_copy) 58c1a4de99SAndrea Arcangeli { 59c1a4de99SAndrea Arcangeli pte_t _dst_pte, *dst_pte; 60c1a4de99SAndrea Arcangeli spinlock_t *ptl; 61c1a4de99SAndrea Arcangeli void *page_kaddr; 62c1a4de99SAndrea Arcangeli int ret; 63b6ebaedbSAndrea Arcangeli struct page *page; 64e2a50c1fSAndrea Arcangeli pgoff_t offset, max_off; 65e2a50c1fSAndrea Arcangeli struct inode *inode; 66c1a4de99SAndrea Arcangeli 67b6ebaedbSAndrea Arcangeli if (!*pagep) { 68c1a4de99SAndrea Arcangeli ret = -ENOMEM; 69c1a4de99SAndrea Arcangeli page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr); 70c1a4de99SAndrea Arcangeli if (!page) 71c1a4de99SAndrea Arcangeli goto out; 72c1a4de99SAndrea Arcangeli 73b6ebaedbSAndrea Arcangeli page_kaddr = kmap_atomic(page); 74b6ebaedbSAndrea Arcangeli ret = copy_from_user(page_kaddr, 75b6ebaedbSAndrea Arcangeli (const void __user *) src_addr, 76b6ebaedbSAndrea Arcangeli PAGE_SIZE); 77b6ebaedbSAndrea Arcangeli kunmap_atomic(page_kaddr); 78b6ebaedbSAndrea Arcangeli 79c1e8d7c6SMichel Lespinasse /* fallback to copy_from_user outside mmap_lock */ 80b6ebaedbSAndrea Arcangeli if (unlikely(ret)) { 819e368259SAndrea Arcangeli ret = -ENOENT; 82b6ebaedbSAndrea Arcangeli *pagep = page; 83b6ebaedbSAndrea Arcangeli /* don't free the page */ 84b6ebaedbSAndrea Arcangeli goto out; 85b6ebaedbSAndrea Arcangeli } 86b6ebaedbSAndrea Arcangeli } else { 87b6ebaedbSAndrea Arcangeli page = *pagep; 88b6ebaedbSAndrea Arcangeli *pagep = NULL; 89b6ebaedbSAndrea Arcangeli } 90c1a4de99SAndrea Arcangeli 91c1a4de99SAndrea Arcangeli /* 92c1a4de99SAndrea Arcangeli * The memory barrier inside __SetPageUptodate makes sure that 93f4f5329dSWei Yang * preceding stores to the page contents become visible before 94c1a4de99SAndrea Arcangeli * the set_pte_at() write. 95c1a4de99SAndrea Arcangeli */ 96c1a4de99SAndrea Arcangeli __SetPageUptodate(page); 97c1a4de99SAndrea Arcangeli 98c1a4de99SAndrea Arcangeli ret = -ENOMEM; 99d9eb1ea2SJohannes Weiner if (mem_cgroup_charge(page, dst_mm, GFP_KERNEL)) 100c1a4de99SAndrea Arcangeli goto out_release; 101c1a4de99SAndrea Arcangeli 10272981e0eSAndrea Arcangeli _dst_pte = pte_mkdirty(mk_pte(page, dst_vma->vm_page_prot)); 103292924b2SPeter Xu if (dst_vma->vm_flags & VM_WRITE) { 104292924b2SPeter Xu if (wp_copy) 105292924b2SPeter Xu _dst_pte = pte_mkuffd_wp(_dst_pte); 106292924b2SPeter Xu else 10772981e0eSAndrea Arcangeli _dst_pte = pte_mkwrite(_dst_pte); 108292924b2SPeter Xu } 109c1a4de99SAndrea Arcangeli 110c1a4de99SAndrea Arcangeli dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 111e2a50c1fSAndrea Arcangeli if (dst_vma->vm_file) { 112e2a50c1fSAndrea Arcangeli /* the shmem MAP_PRIVATE case requires checking the i_size */ 113e2a50c1fSAndrea Arcangeli inode = dst_vma->vm_file->f_inode; 114e2a50c1fSAndrea Arcangeli offset = linear_page_index(dst_vma, dst_addr); 115e2a50c1fSAndrea Arcangeli max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 116e2a50c1fSAndrea Arcangeli ret = -EFAULT; 117e2a50c1fSAndrea Arcangeli if (unlikely(offset >= max_off)) 118e2a50c1fSAndrea Arcangeli goto out_release_uncharge_unlock; 119e2a50c1fSAndrea Arcangeli } 120e2a50c1fSAndrea Arcangeli ret = -EEXIST; 121c1a4de99SAndrea Arcangeli if (!pte_none(*dst_pte)) 122c1a4de99SAndrea Arcangeli goto out_release_uncharge_unlock; 123c1a4de99SAndrea Arcangeli 124c1a4de99SAndrea Arcangeli inc_mm_counter(dst_mm, MM_ANONPAGES); 125be5d0a74SJohannes Weiner page_add_new_anon_rmap(page, dst_vma, dst_addr, false); 126b518154eSJoonsoo Kim lru_cache_add_inactive_or_unevictable(page, dst_vma); 127c1a4de99SAndrea Arcangeli 128c1a4de99SAndrea Arcangeli set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 129c1a4de99SAndrea Arcangeli 130c1a4de99SAndrea Arcangeli /* No need to invalidate - it was non-present before */ 131c1a4de99SAndrea Arcangeli update_mmu_cache(dst_vma, dst_addr, dst_pte); 132c1a4de99SAndrea Arcangeli 133c1a4de99SAndrea Arcangeli pte_unmap_unlock(dst_pte, ptl); 134c1a4de99SAndrea Arcangeli ret = 0; 135c1a4de99SAndrea Arcangeli out: 136c1a4de99SAndrea Arcangeli return ret; 137c1a4de99SAndrea Arcangeli out_release_uncharge_unlock: 138c1a4de99SAndrea Arcangeli pte_unmap_unlock(dst_pte, ptl); 139c1a4de99SAndrea Arcangeli out_release: 14009cbfeafSKirill A. Shutemov put_page(page); 141c1a4de99SAndrea Arcangeli goto out; 142c1a4de99SAndrea Arcangeli } 143c1a4de99SAndrea Arcangeli 144c1a4de99SAndrea Arcangeli static int mfill_zeropage_pte(struct mm_struct *dst_mm, 145c1a4de99SAndrea Arcangeli pmd_t *dst_pmd, 146c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma, 147c1a4de99SAndrea Arcangeli unsigned long dst_addr) 148c1a4de99SAndrea Arcangeli { 149c1a4de99SAndrea Arcangeli pte_t _dst_pte, *dst_pte; 150c1a4de99SAndrea Arcangeli spinlock_t *ptl; 151c1a4de99SAndrea Arcangeli int ret; 152e2a50c1fSAndrea Arcangeli pgoff_t offset, max_off; 153e2a50c1fSAndrea Arcangeli struct inode *inode; 154c1a4de99SAndrea Arcangeli 155c1a4de99SAndrea Arcangeli _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), 156c1a4de99SAndrea Arcangeli dst_vma->vm_page_prot)); 157c1a4de99SAndrea Arcangeli dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 158e2a50c1fSAndrea Arcangeli if (dst_vma->vm_file) { 159e2a50c1fSAndrea Arcangeli /* the shmem MAP_PRIVATE case requires checking the i_size */ 160e2a50c1fSAndrea Arcangeli inode = dst_vma->vm_file->f_inode; 161e2a50c1fSAndrea Arcangeli offset = linear_page_index(dst_vma, dst_addr); 162e2a50c1fSAndrea Arcangeli max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 163e2a50c1fSAndrea Arcangeli ret = -EFAULT; 164e2a50c1fSAndrea Arcangeli if (unlikely(offset >= max_off)) 165e2a50c1fSAndrea Arcangeli goto out_unlock; 166e2a50c1fSAndrea Arcangeli } 167e2a50c1fSAndrea Arcangeli ret = -EEXIST; 168c1a4de99SAndrea Arcangeli if (!pte_none(*dst_pte)) 169c1a4de99SAndrea Arcangeli goto out_unlock; 170c1a4de99SAndrea Arcangeli set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 171c1a4de99SAndrea Arcangeli /* No need to invalidate - it was non-present before */ 172c1a4de99SAndrea Arcangeli update_mmu_cache(dst_vma, dst_addr, dst_pte); 173c1a4de99SAndrea Arcangeli ret = 0; 174c1a4de99SAndrea Arcangeli out_unlock: 175c1a4de99SAndrea Arcangeli pte_unmap_unlock(dst_pte, ptl); 176c1a4de99SAndrea Arcangeli return ret; 177c1a4de99SAndrea Arcangeli } 178c1a4de99SAndrea Arcangeli 179c1a4de99SAndrea Arcangeli static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) 180c1a4de99SAndrea Arcangeli { 181c1a4de99SAndrea Arcangeli pgd_t *pgd; 182c2febafcSKirill A. Shutemov p4d_t *p4d; 183c1a4de99SAndrea Arcangeli pud_t *pud; 184c1a4de99SAndrea Arcangeli 185c1a4de99SAndrea Arcangeli pgd = pgd_offset(mm, address); 186c2febafcSKirill A. Shutemov p4d = p4d_alloc(mm, pgd, address); 187c2febafcSKirill A. Shutemov if (!p4d) 188c2febafcSKirill A. Shutemov return NULL; 189c2febafcSKirill A. Shutemov pud = pud_alloc(mm, p4d, address); 190c2febafcSKirill A. Shutemov if (!pud) 191c2febafcSKirill A. Shutemov return NULL; 192c1a4de99SAndrea Arcangeli /* 193c1a4de99SAndrea Arcangeli * Note that we didn't run this because the pmd was 194c1a4de99SAndrea Arcangeli * missing, the *pmd may be already established and in 195c1a4de99SAndrea Arcangeli * turn it may also be a trans_huge_pmd. 196c1a4de99SAndrea Arcangeli */ 197c2febafcSKirill A. Shutemov return pmd_alloc(mm, pud, address); 198c1a4de99SAndrea Arcangeli } 199c1a4de99SAndrea Arcangeli 20060d4d2d2SMike Kravetz #ifdef CONFIG_HUGETLB_PAGE 20160d4d2d2SMike Kravetz /* 20260d4d2d2SMike Kravetz * __mcopy_atomic processing for HUGETLB vmas. Note that this routine is 203c1e8d7c6SMichel Lespinasse * called with mmap_lock held, it will release mmap_lock before returning. 20460d4d2d2SMike Kravetz */ 20560d4d2d2SMike Kravetz static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, 20660d4d2d2SMike Kravetz struct vm_area_struct *dst_vma, 20760d4d2d2SMike Kravetz unsigned long dst_start, 20860d4d2d2SMike Kravetz unsigned long src_start, 20960d4d2d2SMike Kravetz unsigned long len, 210*f6191471SAxel Rasmussen enum mcopy_atomic_mode mode) 21160d4d2d2SMike Kravetz { 2121c9e8defSMike Kravetz int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED; 2131c9e8defSMike Kravetz int vm_shared = dst_vma->vm_flags & VM_SHARED; 21460d4d2d2SMike Kravetz ssize_t err; 21560d4d2d2SMike Kravetz pte_t *dst_pte; 21660d4d2d2SMike Kravetz unsigned long src_addr, dst_addr; 21760d4d2d2SMike Kravetz long copied; 21860d4d2d2SMike Kravetz struct page *page; 21960d4d2d2SMike Kravetz unsigned long vma_hpagesize; 22060d4d2d2SMike Kravetz pgoff_t idx; 22160d4d2d2SMike Kravetz u32 hash; 22260d4d2d2SMike Kravetz struct address_space *mapping; 22360d4d2d2SMike Kravetz 22460d4d2d2SMike Kravetz /* 22560d4d2d2SMike Kravetz * There is no default zero huge page for all huge page sizes as 22660d4d2d2SMike Kravetz * supported by hugetlb. A PMD_SIZE huge pages may exist as used 22760d4d2d2SMike Kravetz * by THP. Since we can not reliably insert a zero page, this 22860d4d2d2SMike Kravetz * feature is not supported. 22960d4d2d2SMike Kravetz */ 230*f6191471SAxel Rasmussen if (mode == MCOPY_ATOMIC_ZEROPAGE) { 231d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 23260d4d2d2SMike Kravetz return -EINVAL; 23360d4d2d2SMike Kravetz } 23460d4d2d2SMike Kravetz 23560d4d2d2SMike Kravetz src_addr = src_start; 23660d4d2d2SMike Kravetz dst_addr = dst_start; 23760d4d2d2SMike Kravetz copied = 0; 23860d4d2d2SMike Kravetz page = NULL; 23960d4d2d2SMike Kravetz vma_hpagesize = vma_kernel_pagesize(dst_vma); 24060d4d2d2SMike Kravetz 24160d4d2d2SMike Kravetz /* 24260d4d2d2SMike Kravetz * Validate alignment based on huge page size 24360d4d2d2SMike Kravetz */ 24460d4d2d2SMike Kravetz err = -EINVAL; 24560d4d2d2SMike Kravetz if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1)) 24660d4d2d2SMike Kravetz goto out_unlock; 24760d4d2d2SMike Kravetz 24860d4d2d2SMike Kravetz retry: 24960d4d2d2SMike Kravetz /* 250c1e8d7c6SMichel Lespinasse * On routine entry dst_vma is set. If we had to drop mmap_lock and 25160d4d2d2SMike Kravetz * retry, dst_vma will be set to NULL and we must lookup again. 25260d4d2d2SMike Kravetz */ 25360d4d2d2SMike Kravetz if (!dst_vma) { 25427d02568SMike Rapoport err = -ENOENT; 255643aa36eSWei Yang dst_vma = find_dst_vma(dst_mm, dst_start, len); 25660d4d2d2SMike Kravetz if (!dst_vma || !is_vm_hugetlb_page(dst_vma)) 25760d4d2d2SMike Kravetz goto out_unlock; 2581c9e8defSMike Kravetz 25927d02568SMike Rapoport err = -EINVAL; 26027d02568SMike Rapoport if (vma_hpagesize != vma_kernel_pagesize(dst_vma)) 26127d02568SMike Rapoport goto out_unlock; 26227d02568SMike Rapoport 2631c9e8defSMike Kravetz vm_shared = dst_vma->vm_flags & VM_SHARED; 26460d4d2d2SMike Kravetz } 26560d4d2d2SMike Kravetz 26660d4d2d2SMike Kravetz /* 2671c9e8defSMike Kravetz * If not shared, ensure the dst_vma has a anon_vma. 26860d4d2d2SMike Kravetz */ 26960d4d2d2SMike Kravetz err = -ENOMEM; 2701c9e8defSMike Kravetz if (!vm_shared) { 27160d4d2d2SMike Kravetz if (unlikely(anon_vma_prepare(dst_vma))) 27260d4d2d2SMike Kravetz goto out_unlock; 2731c9e8defSMike Kravetz } 27460d4d2d2SMike Kravetz 27560d4d2d2SMike Kravetz while (src_addr < src_start + len) { 27660d4d2d2SMike Kravetz BUG_ON(dst_addr >= dst_start + len); 27760d4d2d2SMike Kravetz 27860d4d2d2SMike Kravetz /* 279c0d0381aSMike Kravetz * Serialize via i_mmap_rwsem and hugetlb_fault_mutex. 280c0d0381aSMike Kravetz * i_mmap_rwsem ensures the dst_pte remains valid even 281c0d0381aSMike Kravetz * in the case of shared pmds. fault mutex prevents 282c0d0381aSMike Kravetz * races with other faulting threads. 28360d4d2d2SMike Kravetz */ 284ddeaab32SMike Kravetz mapping = dst_vma->vm_file->f_mapping; 285c0d0381aSMike Kravetz i_mmap_lock_read(mapping); 286c0d0381aSMike Kravetz idx = linear_page_index(dst_vma, dst_addr); 287188b04a7SWei Yang hash = hugetlb_fault_mutex_hash(mapping, idx); 28860d4d2d2SMike Kravetz mutex_lock(&hugetlb_fault_mutex_table[hash]); 28960d4d2d2SMike Kravetz 29060d4d2d2SMike Kravetz err = -ENOMEM; 291aec44e0fSPeter Xu dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize); 29260d4d2d2SMike Kravetz if (!dst_pte) { 29360d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 294c0d0381aSMike Kravetz i_mmap_unlock_read(mapping); 29560d4d2d2SMike Kravetz goto out_unlock; 29660d4d2d2SMike Kravetz } 29760d4d2d2SMike Kravetz 298*f6191471SAxel Rasmussen if (mode != MCOPY_ATOMIC_CONTINUE && 299*f6191471SAxel Rasmussen !huge_pte_none(huge_ptep_get(dst_pte))) { 30060d4d2d2SMike Kravetz err = -EEXIST; 30160d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 302c0d0381aSMike Kravetz i_mmap_unlock_read(mapping); 30360d4d2d2SMike Kravetz goto out_unlock; 30460d4d2d2SMike Kravetz } 30560d4d2d2SMike Kravetz 30660d4d2d2SMike Kravetz err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, 307*f6191471SAxel Rasmussen dst_addr, src_addr, mode, &page); 30860d4d2d2SMike Kravetz 30960d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 310c0d0381aSMike Kravetz i_mmap_unlock_read(mapping); 3111c9e8defSMike Kravetz vm_alloc_shared = vm_shared; 31260d4d2d2SMike Kravetz 31360d4d2d2SMike Kravetz cond_resched(); 31460d4d2d2SMike Kravetz 3159e368259SAndrea Arcangeli if (unlikely(err == -ENOENT)) { 316d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 31760d4d2d2SMike Kravetz BUG_ON(!page); 31860d4d2d2SMike Kravetz 31960d4d2d2SMike Kravetz err = copy_huge_page_from_user(page, 32060d4d2d2SMike Kravetz (const void __user *)src_addr, 3214fb07ee6SWei Yang vma_hpagesize / PAGE_SIZE, 3224fb07ee6SWei Yang true); 32360d4d2d2SMike Kravetz if (unlikely(err)) { 32460d4d2d2SMike Kravetz err = -EFAULT; 32560d4d2d2SMike Kravetz goto out; 32660d4d2d2SMike Kravetz } 327d8ed45c5SMichel Lespinasse mmap_read_lock(dst_mm); 32860d4d2d2SMike Kravetz 32960d4d2d2SMike Kravetz dst_vma = NULL; 33060d4d2d2SMike Kravetz goto retry; 33160d4d2d2SMike Kravetz } else 33260d4d2d2SMike Kravetz BUG_ON(page); 33360d4d2d2SMike Kravetz 33460d4d2d2SMike Kravetz if (!err) { 33560d4d2d2SMike Kravetz dst_addr += vma_hpagesize; 33660d4d2d2SMike Kravetz src_addr += vma_hpagesize; 33760d4d2d2SMike Kravetz copied += vma_hpagesize; 33860d4d2d2SMike Kravetz 33960d4d2d2SMike Kravetz if (fatal_signal_pending(current)) 34060d4d2d2SMike Kravetz err = -EINTR; 34160d4d2d2SMike Kravetz } 34260d4d2d2SMike Kravetz if (err) 34360d4d2d2SMike Kravetz break; 34460d4d2d2SMike Kravetz } 34560d4d2d2SMike Kravetz 34660d4d2d2SMike Kravetz out_unlock: 347d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 34860d4d2d2SMike Kravetz out: 34921205bf8SMike Kravetz if (page) { 35021205bf8SMike Kravetz /* 35121205bf8SMike Kravetz * We encountered an error and are about to free a newly 3521c9e8defSMike Kravetz * allocated huge page. 3531c9e8defSMike Kravetz * 3541c9e8defSMike Kravetz * Reservation handling is very subtle, and is different for 3551c9e8defSMike Kravetz * private and shared mappings. See the routine 3561c9e8defSMike Kravetz * restore_reserve_on_error for details. Unfortunately, we 3571c9e8defSMike Kravetz * can not call restore_reserve_on_error now as it would 358c1e8d7c6SMichel Lespinasse * require holding mmap_lock. 3591c9e8defSMike Kravetz * 3601c9e8defSMike Kravetz * If a reservation for the page existed in the reservation 3611c9e8defSMike Kravetz * map of a private mapping, the map was modified to indicate 3621c9e8defSMike Kravetz * the reservation was consumed when the page was allocated. 3631c9e8defSMike Kravetz * We clear the PagePrivate flag now so that the global 36421205bf8SMike Kravetz * reserve count will not be incremented in free_huge_page. 36521205bf8SMike Kravetz * The reservation map will still indicate the reservation 36621205bf8SMike Kravetz * was consumed and possibly prevent later page allocation. 3671c9e8defSMike Kravetz * This is better than leaking a global reservation. If no 3681c9e8defSMike Kravetz * reservation existed, it is still safe to clear PagePrivate 3691c9e8defSMike Kravetz * as no adjustments to reservation counts were made during 3701c9e8defSMike Kravetz * allocation. 3711c9e8defSMike Kravetz * 3721c9e8defSMike Kravetz * The reservation map for shared mappings indicates which 3731c9e8defSMike Kravetz * pages have reservations. When a huge page is allocated 3741c9e8defSMike Kravetz * for an address with a reservation, no change is made to 3751c9e8defSMike Kravetz * the reserve map. In this case PagePrivate will be set 3761c9e8defSMike Kravetz * to indicate that the global reservation count should be 3771c9e8defSMike Kravetz * incremented when the page is freed. This is the desired 3781c9e8defSMike Kravetz * behavior. However, when a huge page is allocated for an 3791c9e8defSMike Kravetz * address without a reservation a reservation entry is added 3801c9e8defSMike Kravetz * to the reservation map, and PagePrivate will not be set. 3811c9e8defSMike Kravetz * When the page is freed, the global reserve count will NOT 3821c9e8defSMike Kravetz * be incremented and it will appear as though we have leaked 3831c9e8defSMike Kravetz * reserved page. In this case, set PagePrivate so that the 3841c9e8defSMike Kravetz * global reserve count will be incremented to match the 3851c9e8defSMike Kravetz * reservation map entry which was created. 3861c9e8defSMike Kravetz * 3871c9e8defSMike Kravetz * Note that vm_alloc_shared is based on the flags of the vma 3881c9e8defSMike Kravetz * for which the page was originally allocated. dst_vma could 3891c9e8defSMike Kravetz * be different or NULL on error. 39021205bf8SMike Kravetz */ 3911c9e8defSMike Kravetz if (vm_alloc_shared) 3921c9e8defSMike Kravetz SetPagePrivate(page); 3931c9e8defSMike Kravetz else 39421205bf8SMike Kravetz ClearPagePrivate(page); 39560d4d2d2SMike Kravetz put_page(page); 39621205bf8SMike Kravetz } 39760d4d2d2SMike Kravetz BUG_ON(copied < 0); 39860d4d2d2SMike Kravetz BUG_ON(err > 0); 39960d4d2d2SMike Kravetz BUG_ON(!copied && !err); 40060d4d2d2SMike Kravetz return copied ? copied : err; 40160d4d2d2SMike Kravetz } 40260d4d2d2SMike Kravetz #else /* !CONFIG_HUGETLB_PAGE */ 40360d4d2d2SMike Kravetz /* fail at build time if gcc attempts to use this */ 40460d4d2d2SMike Kravetz extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, 40560d4d2d2SMike Kravetz struct vm_area_struct *dst_vma, 40660d4d2d2SMike Kravetz unsigned long dst_start, 40760d4d2d2SMike Kravetz unsigned long src_start, 40860d4d2d2SMike Kravetz unsigned long len, 409*f6191471SAxel Rasmussen enum mcopy_atomic_mode mode); 41060d4d2d2SMike Kravetz #endif /* CONFIG_HUGETLB_PAGE */ 41160d4d2d2SMike Kravetz 4123217d3c7SMike Rapoport static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm, 4133217d3c7SMike Rapoport pmd_t *dst_pmd, 4143217d3c7SMike Rapoport struct vm_area_struct *dst_vma, 4153217d3c7SMike Rapoport unsigned long dst_addr, 4163217d3c7SMike Rapoport unsigned long src_addr, 4173217d3c7SMike Rapoport struct page **page, 41872981e0eSAndrea Arcangeli bool zeropage, 41972981e0eSAndrea Arcangeli bool wp_copy) 4203217d3c7SMike Rapoport { 4213217d3c7SMike Rapoport ssize_t err; 4223217d3c7SMike Rapoport 4235b51072eSAndrea Arcangeli /* 4245b51072eSAndrea Arcangeli * The normal page fault path for a shmem will invoke the 4255b51072eSAndrea Arcangeli * fault, fill the hole in the file and COW it right away. The 4265b51072eSAndrea Arcangeli * result generates plain anonymous memory. So when we are 4275b51072eSAndrea Arcangeli * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll 4285b51072eSAndrea Arcangeli * generate anonymous memory directly without actually filling 4295b51072eSAndrea Arcangeli * the hole. For the MAP_PRIVATE case the robustness check 4305b51072eSAndrea Arcangeli * only happens in the pagetable (to verify it's still none) 4315b51072eSAndrea Arcangeli * and not in the radix tree. 4325b51072eSAndrea Arcangeli */ 4335b51072eSAndrea Arcangeli if (!(dst_vma->vm_flags & VM_SHARED)) { 4343217d3c7SMike Rapoport if (!zeropage) 4353217d3c7SMike Rapoport err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma, 43672981e0eSAndrea Arcangeli dst_addr, src_addr, page, 43772981e0eSAndrea Arcangeli wp_copy); 4383217d3c7SMike Rapoport else 4393217d3c7SMike Rapoport err = mfill_zeropage_pte(dst_mm, dst_pmd, 4403217d3c7SMike Rapoport dst_vma, dst_addr); 4413217d3c7SMike Rapoport } else { 44272981e0eSAndrea Arcangeli VM_WARN_ON_ONCE(wp_copy); 4438fb44e54SMike Rapoport if (!zeropage) 4443217d3c7SMike Rapoport err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd, 4453217d3c7SMike Rapoport dst_vma, dst_addr, 4463217d3c7SMike Rapoport src_addr, page); 4478fb44e54SMike Rapoport else 4488fb44e54SMike Rapoport err = shmem_mfill_zeropage_pte(dst_mm, dst_pmd, 4498fb44e54SMike Rapoport dst_vma, dst_addr); 4503217d3c7SMike Rapoport } 4513217d3c7SMike Rapoport 4523217d3c7SMike Rapoport return err; 4533217d3c7SMike Rapoport } 4543217d3c7SMike Rapoport 455c1a4de99SAndrea Arcangeli static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, 456c1a4de99SAndrea Arcangeli unsigned long dst_start, 457c1a4de99SAndrea Arcangeli unsigned long src_start, 458c1a4de99SAndrea Arcangeli unsigned long len, 459*f6191471SAxel Rasmussen enum mcopy_atomic_mode mcopy_mode, 46072981e0eSAndrea Arcangeli bool *mmap_changing, 46172981e0eSAndrea Arcangeli __u64 mode) 462c1a4de99SAndrea Arcangeli { 463c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma; 464c1a4de99SAndrea Arcangeli ssize_t err; 465c1a4de99SAndrea Arcangeli pmd_t *dst_pmd; 466c1a4de99SAndrea Arcangeli unsigned long src_addr, dst_addr; 467b6ebaedbSAndrea Arcangeli long copied; 468b6ebaedbSAndrea Arcangeli struct page *page; 46972981e0eSAndrea Arcangeli bool wp_copy; 470*f6191471SAxel Rasmussen bool zeropage = (mcopy_mode == MCOPY_ATOMIC_ZEROPAGE); 471c1a4de99SAndrea Arcangeli 472c1a4de99SAndrea Arcangeli /* 473c1a4de99SAndrea Arcangeli * Sanitize the command parameters: 474c1a4de99SAndrea Arcangeli */ 475c1a4de99SAndrea Arcangeli BUG_ON(dst_start & ~PAGE_MASK); 476c1a4de99SAndrea Arcangeli BUG_ON(len & ~PAGE_MASK); 477c1a4de99SAndrea Arcangeli 478c1a4de99SAndrea Arcangeli /* Does the address range wrap, or is the span zero-sized? */ 479c1a4de99SAndrea Arcangeli BUG_ON(src_start + len <= src_start); 480c1a4de99SAndrea Arcangeli BUG_ON(dst_start + len <= dst_start); 481c1a4de99SAndrea Arcangeli 482b6ebaedbSAndrea Arcangeli src_addr = src_start; 483b6ebaedbSAndrea Arcangeli dst_addr = dst_start; 484b6ebaedbSAndrea Arcangeli copied = 0; 485b6ebaedbSAndrea Arcangeli page = NULL; 486b6ebaedbSAndrea Arcangeli retry: 487d8ed45c5SMichel Lespinasse mmap_read_lock(dst_mm); 488c1a4de99SAndrea Arcangeli 489c1a4de99SAndrea Arcangeli /* 490df2cc96eSMike Rapoport * If memory mappings are changing because of non-cooperative 491df2cc96eSMike Rapoport * operation (e.g. mremap) running in parallel, bail out and 492df2cc96eSMike Rapoport * request the user to retry later 493df2cc96eSMike Rapoport */ 494df2cc96eSMike Rapoport err = -EAGAIN; 495df2cc96eSMike Rapoport if (mmap_changing && READ_ONCE(*mmap_changing)) 496df2cc96eSMike Rapoport goto out_unlock; 497df2cc96eSMike Rapoport 498df2cc96eSMike Rapoport /* 499c1a4de99SAndrea Arcangeli * Make sure the vma is not shared, that the dst range is 500c1a4de99SAndrea Arcangeli * both valid and fully within a single existing vma. 501c1a4de99SAndrea Arcangeli */ 50227d02568SMike Rapoport err = -ENOENT; 503643aa36eSWei Yang dst_vma = find_dst_vma(dst_mm, dst_start, len); 50426071cedSMike Rapoport if (!dst_vma) 50526071cedSMike Rapoport goto out_unlock; 50627d02568SMike Rapoport 50727d02568SMike Rapoport err = -EINVAL; 50827d02568SMike Rapoport /* 50927d02568SMike Rapoport * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but 51027d02568SMike Rapoport * it will overwrite vm_ops, so vma_is_anonymous must return false. 51127d02568SMike Rapoport */ 51227d02568SMike Rapoport if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) && 51327d02568SMike Rapoport dst_vma->vm_flags & VM_SHARED)) 51427d02568SMike Rapoport goto out_unlock; 51527d02568SMike Rapoport 51627d02568SMike Rapoport /* 51772981e0eSAndrea Arcangeli * validate 'mode' now that we know the dst_vma: don't allow 51872981e0eSAndrea Arcangeli * a wrprotect copy if the userfaultfd didn't register as WP. 51972981e0eSAndrea Arcangeli */ 52072981e0eSAndrea Arcangeli wp_copy = mode & UFFDIO_COPY_MODE_WP; 52172981e0eSAndrea Arcangeli if (wp_copy && !(dst_vma->vm_flags & VM_UFFD_WP)) 52272981e0eSAndrea Arcangeli goto out_unlock; 52372981e0eSAndrea Arcangeli 52472981e0eSAndrea Arcangeli /* 52527d02568SMike Rapoport * If this is a HUGETLB vma, pass off to appropriate routine 52627d02568SMike Rapoport */ 52727d02568SMike Rapoport if (is_vm_hugetlb_page(dst_vma)) 52827d02568SMike Rapoport return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start, 529*f6191471SAxel Rasmussen src_start, len, mcopy_mode); 53027d02568SMike Rapoport 53126071cedSMike Rapoport if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) 532b6ebaedbSAndrea Arcangeli goto out_unlock; 533*f6191471SAxel Rasmussen if (mcopy_mode == MCOPY_ATOMIC_CONTINUE) 534*f6191471SAxel Rasmussen goto out_unlock; 535c1a4de99SAndrea Arcangeli 536c1a4de99SAndrea Arcangeli /* 537c1a4de99SAndrea Arcangeli * Ensure the dst_vma has a anon_vma or this page 538c1a4de99SAndrea Arcangeli * would get a NULL anon_vma when moved in the 539c1a4de99SAndrea Arcangeli * dst_vma. 540c1a4de99SAndrea Arcangeli */ 541c1a4de99SAndrea Arcangeli err = -ENOMEM; 5425b51072eSAndrea Arcangeli if (!(dst_vma->vm_flags & VM_SHARED) && 5435b51072eSAndrea Arcangeli unlikely(anon_vma_prepare(dst_vma))) 544b6ebaedbSAndrea Arcangeli goto out_unlock; 545c1a4de99SAndrea Arcangeli 546b6ebaedbSAndrea Arcangeli while (src_addr < src_start + len) { 547c1a4de99SAndrea Arcangeli pmd_t dst_pmdval; 548b6ebaedbSAndrea Arcangeli 549c1a4de99SAndrea Arcangeli BUG_ON(dst_addr >= dst_start + len); 550b6ebaedbSAndrea Arcangeli 551c1a4de99SAndrea Arcangeli dst_pmd = mm_alloc_pmd(dst_mm, dst_addr); 552c1a4de99SAndrea Arcangeli if (unlikely(!dst_pmd)) { 553c1a4de99SAndrea Arcangeli err = -ENOMEM; 554c1a4de99SAndrea Arcangeli break; 555c1a4de99SAndrea Arcangeli } 556c1a4de99SAndrea Arcangeli 557c1a4de99SAndrea Arcangeli dst_pmdval = pmd_read_atomic(dst_pmd); 558c1a4de99SAndrea Arcangeli /* 559c1a4de99SAndrea Arcangeli * If the dst_pmd is mapped as THP don't 560c1a4de99SAndrea Arcangeli * override it and just be strict. 561c1a4de99SAndrea Arcangeli */ 562c1a4de99SAndrea Arcangeli if (unlikely(pmd_trans_huge(dst_pmdval))) { 563c1a4de99SAndrea Arcangeli err = -EEXIST; 564c1a4de99SAndrea Arcangeli break; 565c1a4de99SAndrea Arcangeli } 566c1a4de99SAndrea Arcangeli if (unlikely(pmd_none(dst_pmdval)) && 5674cf58924SJoel Fernandes (Google) unlikely(__pte_alloc(dst_mm, dst_pmd))) { 568c1a4de99SAndrea Arcangeli err = -ENOMEM; 569c1a4de99SAndrea Arcangeli break; 570c1a4de99SAndrea Arcangeli } 571c1a4de99SAndrea Arcangeli /* If an huge pmd materialized from under us fail */ 572c1a4de99SAndrea Arcangeli if (unlikely(pmd_trans_huge(*dst_pmd))) { 573c1a4de99SAndrea Arcangeli err = -EFAULT; 574c1a4de99SAndrea Arcangeli break; 575c1a4de99SAndrea Arcangeli } 576c1a4de99SAndrea Arcangeli 577c1a4de99SAndrea Arcangeli BUG_ON(pmd_none(*dst_pmd)); 578c1a4de99SAndrea Arcangeli BUG_ON(pmd_trans_huge(*dst_pmd)); 579c1a4de99SAndrea Arcangeli 5803217d3c7SMike Rapoport err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, 58172981e0eSAndrea Arcangeli src_addr, &page, zeropage, wp_copy); 582c1a4de99SAndrea Arcangeli cond_resched(); 583c1a4de99SAndrea Arcangeli 5849e368259SAndrea Arcangeli if (unlikely(err == -ENOENT)) { 585b6ebaedbSAndrea Arcangeli void *page_kaddr; 586b6ebaedbSAndrea Arcangeli 587d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 588b6ebaedbSAndrea Arcangeli BUG_ON(!page); 589b6ebaedbSAndrea Arcangeli 590b6ebaedbSAndrea Arcangeli page_kaddr = kmap(page); 591b6ebaedbSAndrea Arcangeli err = copy_from_user(page_kaddr, 592b6ebaedbSAndrea Arcangeli (const void __user *) src_addr, 593b6ebaedbSAndrea Arcangeli PAGE_SIZE); 594b6ebaedbSAndrea Arcangeli kunmap(page); 595b6ebaedbSAndrea Arcangeli if (unlikely(err)) { 596b6ebaedbSAndrea Arcangeli err = -EFAULT; 597b6ebaedbSAndrea Arcangeli goto out; 598b6ebaedbSAndrea Arcangeli } 599b6ebaedbSAndrea Arcangeli goto retry; 600b6ebaedbSAndrea Arcangeli } else 601b6ebaedbSAndrea Arcangeli BUG_ON(page); 602b6ebaedbSAndrea Arcangeli 603c1a4de99SAndrea Arcangeli if (!err) { 604c1a4de99SAndrea Arcangeli dst_addr += PAGE_SIZE; 605c1a4de99SAndrea Arcangeli src_addr += PAGE_SIZE; 606c1a4de99SAndrea Arcangeli copied += PAGE_SIZE; 607c1a4de99SAndrea Arcangeli 608c1a4de99SAndrea Arcangeli if (fatal_signal_pending(current)) 609c1a4de99SAndrea Arcangeli err = -EINTR; 610c1a4de99SAndrea Arcangeli } 611c1a4de99SAndrea Arcangeli if (err) 612c1a4de99SAndrea Arcangeli break; 613c1a4de99SAndrea Arcangeli } 614c1a4de99SAndrea Arcangeli 615b6ebaedbSAndrea Arcangeli out_unlock: 616d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 617b6ebaedbSAndrea Arcangeli out: 618b6ebaedbSAndrea Arcangeli if (page) 61909cbfeafSKirill A. Shutemov put_page(page); 620c1a4de99SAndrea Arcangeli BUG_ON(copied < 0); 621c1a4de99SAndrea Arcangeli BUG_ON(err > 0); 622c1a4de99SAndrea Arcangeli BUG_ON(!copied && !err); 623c1a4de99SAndrea Arcangeli return copied ? copied : err; 624c1a4de99SAndrea Arcangeli } 625c1a4de99SAndrea Arcangeli 626c1a4de99SAndrea Arcangeli ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, 627df2cc96eSMike Rapoport unsigned long src_start, unsigned long len, 62872981e0eSAndrea Arcangeli bool *mmap_changing, __u64 mode) 629c1a4de99SAndrea Arcangeli { 630*f6191471SAxel Rasmussen return __mcopy_atomic(dst_mm, dst_start, src_start, len, 631*f6191471SAxel Rasmussen MCOPY_ATOMIC_NORMAL, mmap_changing, mode); 632c1a4de99SAndrea Arcangeli } 633c1a4de99SAndrea Arcangeli 634c1a4de99SAndrea Arcangeli ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start, 635df2cc96eSMike Rapoport unsigned long len, bool *mmap_changing) 636c1a4de99SAndrea Arcangeli { 637*f6191471SAxel Rasmussen return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_ZEROPAGE, 638*f6191471SAxel Rasmussen mmap_changing, 0); 639*f6191471SAxel Rasmussen } 640*f6191471SAxel Rasmussen 641*f6191471SAxel Rasmussen ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long start, 642*f6191471SAxel Rasmussen unsigned long len, bool *mmap_changing) 643*f6191471SAxel Rasmussen { 644*f6191471SAxel Rasmussen return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_CONTINUE, 645*f6191471SAxel Rasmussen mmap_changing, 0); 646c1a4de99SAndrea Arcangeli } 647ffd05793SShaohua Li 648ffd05793SShaohua Li int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, 649ffd05793SShaohua Li unsigned long len, bool enable_wp, bool *mmap_changing) 650ffd05793SShaohua Li { 651ffd05793SShaohua Li struct vm_area_struct *dst_vma; 652ffd05793SShaohua Li pgprot_t newprot; 653ffd05793SShaohua Li int err; 654ffd05793SShaohua Li 655ffd05793SShaohua Li /* 656ffd05793SShaohua Li * Sanitize the command parameters: 657ffd05793SShaohua Li */ 658ffd05793SShaohua Li BUG_ON(start & ~PAGE_MASK); 659ffd05793SShaohua Li BUG_ON(len & ~PAGE_MASK); 660ffd05793SShaohua Li 661ffd05793SShaohua Li /* Does the address range wrap, or is the span zero-sized? */ 662ffd05793SShaohua Li BUG_ON(start + len <= start); 663ffd05793SShaohua Li 664d8ed45c5SMichel Lespinasse mmap_read_lock(dst_mm); 665ffd05793SShaohua Li 666ffd05793SShaohua Li /* 667ffd05793SShaohua Li * If memory mappings are changing because of non-cooperative 668ffd05793SShaohua Li * operation (e.g. mremap) running in parallel, bail out and 669ffd05793SShaohua Li * request the user to retry later 670ffd05793SShaohua Li */ 671ffd05793SShaohua Li err = -EAGAIN; 672ffd05793SShaohua Li if (mmap_changing && READ_ONCE(*mmap_changing)) 673ffd05793SShaohua Li goto out_unlock; 674ffd05793SShaohua Li 675ffd05793SShaohua Li err = -ENOENT; 676ffd05793SShaohua Li dst_vma = find_dst_vma(dst_mm, start, len); 677ffd05793SShaohua Li /* 678ffd05793SShaohua Li * Make sure the vma is not shared, that the dst range is 679ffd05793SShaohua Li * both valid and fully within a single existing vma. 680ffd05793SShaohua Li */ 681ffd05793SShaohua Li if (!dst_vma || (dst_vma->vm_flags & VM_SHARED)) 682ffd05793SShaohua Li goto out_unlock; 683ffd05793SShaohua Li if (!userfaultfd_wp(dst_vma)) 684ffd05793SShaohua Li goto out_unlock; 685ffd05793SShaohua Li if (!vma_is_anonymous(dst_vma)) 686ffd05793SShaohua Li goto out_unlock; 687ffd05793SShaohua Li 688ffd05793SShaohua Li if (enable_wp) 689ffd05793SShaohua Li newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE)); 690ffd05793SShaohua Li else 691ffd05793SShaohua Li newprot = vm_get_page_prot(dst_vma->vm_flags); 692ffd05793SShaohua Li 693ffd05793SShaohua Li change_protection(dst_vma, start, start + len, newprot, 694ffd05793SShaohua Li enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE); 695ffd05793SShaohua Li 696ffd05793SShaohua Li err = 0; 697ffd05793SShaohua Li out_unlock: 698d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 699ffd05793SShaohua Li return err; 700ffd05793SShaohua Li } 701