1c1a4de99SAndrea Arcangeli /* 2c1a4de99SAndrea Arcangeli * mm/userfaultfd.c 3c1a4de99SAndrea Arcangeli * 4c1a4de99SAndrea Arcangeli * Copyright (C) 2015 Red Hat, Inc. 5c1a4de99SAndrea Arcangeli * 6c1a4de99SAndrea Arcangeli * This work is licensed under the terms of the GNU GPL, version 2. See 7c1a4de99SAndrea Arcangeli * the COPYING file in the top-level directory. 8c1a4de99SAndrea Arcangeli */ 9c1a4de99SAndrea Arcangeli 10c1a4de99SAndrea Arcangeli #include <linux/mm.h> 11174cd4b1SIngo Molnar #include <linux/sched/signal.h> 12c1a4de99SAndrea Arcangeli #include <linux/pagemap.h> 13c1a4de99SAndrea Arcangeli #include <linux/rmap.h> 14c1a4de99SAndrea Arcangeli #include <linux/swap.h> 15c1a4de99SAndrea Arcangeli #include <linux/swapops.h> 16c1a4de99SAndrea Arcangeli #include <linux/userfaultfd_k.h> 17c1a4de99SAndrea Arcangeli #include <linux/mmu_notifier.h> 1860d4d2d2SMike Kravetz #include <linux/hugetlb.h> 1926071cedSMike Rapoport #include <linux/shmem_fs.h> 20c1a4de99SAndrea Arcangeli #include <asm/tlbflush.h> 21c1a4de99SAndrea Arcangeli #include "internal.h" 22c1a4de99SAndrea Arcangeli 23c1a4de99SAndrea Arcangeli static int mcopy_atomic_pte(struct mm_struct *dst_mm, 24c1a4de99SAndrea Arcangeli pmd_t *dst_pmd, 25c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma, 26c1a4de99SAndrea Arcangeli unsigned long dst_addr, 27b6ebaedbSAndrea Arcangeli unsigned long src_addr, 28b6ebaedbSAndrea Arcangeli struct page **pagep) 29c1a4de99SAndrea Arcangeli { 30c1a4de99SAndrea Arcangeli struct mem_cgroup *memcg; 31c1a4de99SAndrea Arcangeli pte_t _dst_pte, *dst_pte; 32c1a4de99SAndrea Arcangeli spinlock_t *ptl; 33c1a4de99SAndrea Arcangeli void *page_kaddr; 34c1a4de99SAndrea Arcangeli int ret; 35b6ebaedbSAndrea Arcangeli struct page *page; 36*e2a50c1fSAndrea Arcangeli pgoff_t offset, max_off; 37*e2a50c1fSAndrea Arcangeli struct inode *inode; 38c1a4de99SAndrea Arcangeli 39b6ebaedbSAndrea Arcangeli if (!*pagep) { 40c1a4de99SAndrea Arcangeli ret = -ENOMEM; 41c1a4de99SAndrea Arcangeli page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr); 42c1a4de99SAndrea Arcangeli if (!page) 43c1a4de99SAndrea Arcangeli goto out; 44c1a4de99SAndrea Arcangeli 45b6ebaedbSAndrea Arcangeli page_kaddr = kmap_atomic(page); 46b6ebaedbSAndrea Arcangeli ret = copy_from_user(page_kaddr, 47b6ebaedbSAndrea Arcangeli (const void __user *) src_addr, 48b6ebaedbSAndrea Arcangeli PAGE_SIZE); 49b6ebaedbSAndrea Arcangeli kunmap_atomic(page_kaddr); 50b6ebaedbSAndrea Arcangeli 51b6ebaedbSAndrea Arcangeli /* fallback to copy_from_user outside mmap_sem */ 52b6ebaedbSAndrea Arcangeli if (unlikely(ret)) { 539e368259SAndrea Arcangeli ret = -ENOENT; 54b6ebaedbSAndrea Arcangeli *pagep = page; 55b6ebaedbSAndrea Arcangeli /* don't free the page */ 56b6ebaedbSAndrea Arcangeli goto out; 57b6ebaedbSAndrea Arcangeli } 58b6ebaedbSAndrea Arcangeli } else { 59b6ebaedbSAndrea Arcangeli page = *pagep; 60b6ebaedbSAndrea Arcangeli *pagep = NULL; 61b6ebaedbSAndrea Arcangeli } 62c1a4de99SAndrea Arcangeli 63c1a4de99SAndrea Arcangeli /* 64c1a4de99SAndrea Arcangeli * The memory barrier inside __SetPageUptodate makes sure that 65c1a4de99SAndrea Arcangeli * preceeding stores to the page contents become visible before 66c1a4de99SAndrea Arcangeli * the set_pte_at() write. 67c1a4de99SAndrea Arcangeli */ 68c1a4de99SAndrea Arcangeli __SetPageUptodate(page); 69c1a4de99SAndrea Arcangeli 70c1a4de99SAndrea Arcangeli ret = -ENOMEM; 71f627c2f5SKirill A. Shutemov if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg, false)) 72c1a4de99SAndrea Arcangeli goto out_release; 73c1a4de99SAndrea Arcangeli 74c1a4de99SAndrea Arcangeli _dst_pte = mk_pte(page, dst_vma->vm_page_prot); 75c1a4de99SAndrea Arcangeli if (dst_vma->vm_flags & VM_WRITE) 76c1a4de99SAndrea Arcangeli _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte)); 77c1a4de99SAndrea Arcangeli 78c1a4de99SAndrea Arcangeli dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 79*e2a50c1fSAndrea Arcangeli if (dst_vma->vm_file) { 80*e2a50c1fSAndrea Arcangeli /* the shmem MAP_PRIVATE case requires checking the i_size */ 81*e2a50c1fSAndrea Arcangeli inode = dst_vma->vm_file->f_inode; 82*e2a50c1fSAndrea Arcangeli offset = linear_page_index(dst_vma, dst_addr); 83*e2a50c1fSAndrea Arcangeli max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 84*e2a50c1fSAndrea Arcangeli ret = -EFAULT; 85*e2a50c1fSAndrea Arcangeli if (unlikely(offset >= max_off)) 86*e2a50c1fSAndrea Arcangeli goto out_release_uncharge_unlock; 87*e2a50c1fSAndrea Arcangeli } 88*e2a50c1fSAndrea Arcangeli ret = -EEXIST; 89c1a4de99SAndrea Arcangeli if (!pte_none(*dst_pte)) 90c1a4de99SAndrea Arcangeli goto out_release_uncharge_unlock; 91c1a4de99SAndrea Arcangeli 92c1a4de99SAndrea Arcangeli inc_mm_counter(dst_mm, MM_ANONPAGES); 93d281ee61SKirill A. Shutemov page_add_new_anon_rmap(page, dst_vma, dst_addr, false); 94f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, false, false); 95c1a4de99SAndrea Arcangeli lru_cache_add_active_or_unevictable(page, dst_vma); 96c1a4de99SAndrea Arcangeli 97c1a4de99SAndrea Arcangeli set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 98c1a4de99SAndrea Arcangeli 99c1a4de99SAndrea Arcangeli /* No need to invalidate - it was non-present before */ 100c1a4de99SAndrea Arcangeli update_mmu_cache(dst_vma, dst_addr, dst_pte); 101c1a4de99SAndrea Arcangeli 102c1a4de99SAndrea Arcangeli pte_unmap_unlock(dst_pte, ptl); 103c1a4de99SAndrea Arcangeli ret = 0; 104c1a4de99SAndrea Arcangeli out: 105c1a4de99SAndrea Arcangeli return ret; 106c1a4de99SAndrea Arcangeli out_release_uncharge_unlock: 107c1a4de99SAndrea Arcangeli pte_unmap_unlock(dst_pte, ptl); 108f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, false); 109c1a4de99SAndrea Arcangeli out_release: 11009cbfeafSKirill A. Shutemov put_page(page); 111c1a4de99SAndrea Arcangeli goto out; 112c1a4de99SAndrea Arcangeli } 113c1a4de99SAndrea Arcangeli 114c1a4de99SAndrea Arcangeli static int mfill_zeropage_pte(struct mm_struct *dst_mm, 115c1a4de99SAndrea Arcangeli pmd_t *dst_pmd, 116c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma, 117c1a4de99SAndrea Arcangeli unsigned long dst_addr) 118c1a4de99SAndrea Arcangeli { 119c1a4de99SAndrea Arcangeli pte_t _dst_pte, *dst_pte; 120c1a4de99SAndrea Arcangeli spinlock_t *ptl; 121c1a4de99SAndrea Arcangeli int ret; 122*e2a50c1fSAndrea Arcangeli pgoff_t offset, max_off; 123*e2a50c1fSAndrea Arcangeli struct inode *inode; 124c1a4de99SAndrea Arcangeli 125c1a4de99SAndrea Arcangeli _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), 126c1a4de99SAndrea Arcangeli dst_vma->vm_page_prot)); 127c1a4de99SAndrea Arcangeli dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 128*e2a50c1fSAndrea Arcangeli if (dst_vma->vm_file) { 129*e2a50c1fSAndrea Arcangeli /* the shmem MAP_PRIVATE case requires checking the i_size */ 130*e2a50c1fSAndrea Arcangeli inode = dst_vma->vm_file->f_inode; 131*e2a50c1fSAndrea Arcangeli offset = linear_page_index(dst_vma, dst_addr); 132*e2a50c1fSAndrea Arcangeli max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 133*e2a50c1fSAndrea Arcangeli ret = -EFAULT; 134*e2a50c1fSAndrea Arcangeli if (unlikely(offset >= max_off)) 135*e2a50c1fSAndrea Arcangeli goto out_unlock; 136*e2a50c1fSAndrea Arcangeli } 137*e2a50c1fSAndrea Arcangeli ret = -EEXIST; 138c1a4de99SAndrea Arcangeli if (!pte_none(*dst_pte)) 139c1a4de99SAndrea Arcangeli goto out_unlock; 140c1a4de99SAndrea Arcangeli set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 141c1a4de99SAndrea Arcangeli /* No need to invalidate - it was non-present before */ 142c1a4de99SAndrea Arcangeli update_mmu_cache(dst_vma, dst_addr, dst_pte); 143c1a4de99SAndrea Arcangeli ret = 0; 144c1a4de99SAndrea Arcangeli out_unlock: 145c1a4de99SAndrea Arcangeli pte_unmap_unlock(dst_pte, ptl); 146c1a4de99SAndrea Arcangeli return ret; 147c1a4de99SAndrea Arcangeli } 148c1a4de99SAndrea Arcangeli 149c1a4de99SAndrea Arcangeli static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) 150c1a4de99SAndrea Arcangeli { 151c1a4de99SAndrea Arcangeli pgd_t *pgd; 152c2febafcSKirill A. Shutemov p4d_t *p4d; 153c1a4de99SAndrea Arcangeli pud_t *pud; 154c1a4de99SAndrea Arcangeli 155c1a4de99SAndrea Arcangeli pgd = pgd_offset(mm, address); 156c2febafcSKirill A. Shutemov p4d = p4d_alloc(mm, pgd, address); 157c2febafcSKirill A. Shutemov if (!p4d) 158c2febafcSKirill A. Shutemov return NULL; 159c2febafcSKirill A. Shutemov pud = pud_alloc(mm, p4d, address); 160c2febafcSKirill A. Shutemov if (!pud) 161c2febafcSKirill A. Shutemov return NULL; 162c1a4de99SAndrea Arcangeli /* 163c1a4de99SAndrea Arcangeli * Note that we didn't run this because the pmd was 164c1a4de99SAndrea Arcangeli * missing, the *pmd may be already established and in 165c1a4de99SAndrea Arcangeli * turn it may also be a trans_huge_pmd. 166c1a4de99SAndrea Arcangeli */ 167c2febafcSKirill A. Shutemov return pmd_alloc(mm, pud, address); 168c1a4de99SAndrea Arcangeli } 169c1a4de99SAndrea Arcangeli 17060d4d2d2SMike Kravetz #ifdef CONFIG_HUGETLB_PAGE 17160d4d2d2SMike Kravetz /* 17260d4d2d2SMike Kravetz * __mcopy_atomic processing for HUGETLB vmas. Note that this routine is 17360d4d2d2SMike Kravetz * called with mmap_sem held, it will release mmap_sem before returning. 17460d4d2d2SMike Kravetz */ 17560d4d2d2SMike Kravetz static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, 17660d4d2d2SMike Kravetz struct vm_area_struct *dst_vma, 17760d4d2d2SMike Kravetz unsigned long dst_start, 17860d4d2d2SMike Kravetz unsigned long src_start, 17960d4d2d2SMike Kravetz unsigned long len, 18060d4d2d2SMike Kravetz bool zeropage) 18160d4d2d2SMike Kravetz { 1821c9e8defSMike Kravetz int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED; 1831c9e8defSMike Kravetz int vm_shared = dst_vma->vm_flags & VM_SHARED; 18460d4d2d2SMike Kravetz ssize_t err; 18560d4d2d2SMike Kravetz pte_t *dst_pte; 18660d4d2d2SMike Kravetz unsigned long src_addr, dst_addr; 18760d4d2d2SMike Kravetz long copied; 18860d4d2d2SMike Kravetz struct page *page; 18960d4d2d2SMike Kravetz struct hstate *h; 19060d4d2d2SMike Kravetz unsigned long vma_hpagesize; 19160d4d2d2SMike Kravetz pgoff_t idx; 19260d4d2d2SMike Kravetz u32 hash; 19360d4d2d2SMike Kravetz struct address_space *mapping; 19460d4d2d2SMike Kravetz 19560d4d2d2SMike Kravetz /* 19660d4d2d2SMike Kravetz * There is no default zero huge page for all huge page sizes as 19760d4d2d2SMike Kravetz * supported by hugetlb. A PMD_SIZE huge pages may exist as used 19860d4d2d2SMike Kravetz * by THP. Since we can not reliably insert a zero page, this 19960d4d2d2SMike Kravetz * feature is not supported. 20060d4d2d2SMike Kravetz */ 20160d4d2d2SMike Kravetz if (zeropage) { 20260d4d2d2SMike Kravetz up_read(&dst_mm->mmap_sem); 20360d4d2d2SMike Kravetz return -EINVAL; 20460d4d2d2SMike Kravetz } 20560d4d2d2SMike Kravetz 20660d4d2d2SMike Kravetz src_addr = src_start; 20760d4d2d2SMike Kravetz dst_addr = dst_start; 20860d4d2d2SMike Kravetz copied = 0; 20960d4d2d2SMike Kravetz page = NULL; 21060d4d2d2SMike Kravetz vma_hpagesize = vma_kernel_pagesize(dst_vma); 21160d4d2d2SMike Kravetz 21260d4d2d2SMike Kravetz /* 21360d4d2d2SMike Kravetz * Validate alignment based on huge page size 21460d4d2d2SMike Kravetz */ 21560d4d2d2SMike Kravetz err = -EINVAL; 21660d4d2d2SMike Kravetz if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1)) 21760d4d2d2SMike Kravetz goto out_unlock; 21860d4d2d2SMike Kravetz 21960d4d2d2SMike Kravetz retry: 22060d4d2d2SMike Kravetz /* 22160d4d2d2SMike Kravetz * On routine entry dst_vma is set. If we had to drop mmap_sem and 22260d4d2d2SMike Kravetz * retry, dst_vma will be set to NULL and we must lookup again. 22360d4d2d2SMike Kravetz */ 22460d4d2d2SMike Kravetz if (!dst_vma) { 22527d02568SMike Rapoport err = -ENOENT; 22660d4d2d2SMike Kravetz dst_vma = find_vma(dst_mm, dst_start); 22760d4d2d2SMike Kravetz if (!dst_vma || !is_vm_hugetlb_page(dst_vma)) 22860d4d2d2SMike Kravetz goto out_unlock; 22927d02568SMike Rapoport /* 23029ec9066SAndrea Arcangeli * Check the vma is registered in uffd, this is 23129ec9066SAndrea Arcangeli * required to enforce the VM_MAYWRITE check done at 23229ec9066SAndrea Arcangeli * uffd registration time. 23327d02568SMike Rapoport */ 23427d02568SMike Rapoport if (!dst_vma->vm_userfaultfd_ctx.ctx) 23560d4d2d2SMike Kravetz goto out_unlock; 23660d4d2d2SMike Kravetz 23760d4d2d2SMike Kravetz if (dst_start < dst_vma->vm_start || 23860d4d2d2SMike Kravetz dst_start + len > dst_vma->vm_end) 23960d4d2d2SMike Kravetz goto out_unlock; 2401c9e8defSMike Kravetz 24127d02568SMike Rapoport err = -EINVAL; 24227d02568SMike Rapoport if (vma_hpagesize != vma_kernel_pagesize(dst_vma)) 24327d02568SMike Rapoport goto out_unlock; 24427d02568SMike Rapoport 2451c9e8defSMike Kravetz vm_shared = dst_vma->vm_flags & VM_SHARED; 24660d4d2d2SMike Kravetz } 24760d4d2d2SMike Kravetz 24860d4d2d2SMike Kravetz if (WARN_ON(dst_addr & (vma_hpagesize - 1) || 24960d4d2d2SMike Kravetz (len - copied) & (vma_hpagesize - 1))) 25060d4d2d2SMike Kravetz goto out_unlock; 25160d4d2d2SMike Kravetz 25260d4d2d2SMike Kravetz /* 2531c9e8defSMike Kravetz * If not shared, ensure the dst_vma has a anon_vma. 25460d4d2d2SMike Kravetz */ 25560d4d2d2SMike Kravetz err = -ENOMEM; 2561c9e8defSMike Kravetz if (!vm_shared) { 25760d4d2d2SMike Kravetz if (unlikely(anon_vma_prepare(dst_vma))) 25860d4d2d2SMike Kravetz goto out_unlock; 2591c9e8defSMike Kravetz } 26060d4d2d2SMike Kravetz 26160d4d2d2SMike Kravetz h = hstate_vma(dst_vma); 26260d4d2d2SMike Kravetz 26360d4d2d2SMike Kravetz while (src_addr < src_start + len) { 26460d4d2d2SMike Kravetz pte_t dst_pteval; 26560d4d2d2SMike Kravetz 26660d4d2d2SMike Kravetz BUG_ON(dst_addr >= dst_start + len); 26760d4d2d2SMike Kravetz VM_BUG_ON(dst_addr & ~huge_page_mask(h)); 26860d4d2d2SMike Kravetz 26960d4d2d2SMike Kravetz /* 27060d4d2d2SMike Kravetz * Serialize via hugetlb_fault_mutex 27160d4d2d2SMike Kravetz */ 27260d4d2d2SMike Kravetz idx = linear_page_index(dst_vma, dst_addr); 27360d4d2d2SMike Kravetz mapping = dst_vma->vm_file->f_mapping; 27460d4d2d2SMike Kravetz hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping, 27560d4d2d2SMike Kravetz idx, dst_addr); 27660d4d2d2SMike Kravetz mutex_lock(&hugetlb_fault_mutex_table[hash]); 27760d4d2d2SMike Kravetz 27860d4d2d2SMike Kravetz err = -ENOMEM; 27960d4d2d2SMike Kravetz dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h)); 28060d4d2d2SMike Kravetz if (!dst_pte) { 28160d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 28260d4d2d2SMike Kravetz goto out_unlock; 28360d4d2d2SMike Kravetz } 28460d4d2d2SMike Kravetz 28560d4d2d2SMike Kravetz err = -EEXIST; 28660d4d2d2SMike Kravetz dst_pteval = huge_ptep_get(dst_pte); 28760d4d2d2SMike Kravetz if (!huge_pte_none(dst_pteval)) { 28860d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 28960d4d2d2SMike Kravetz goto out_unlock; 29060d4d2d2SMike Kravetz } 29160d4d2d2SMike Kravetz 29260d4d2d2SMike Kravetz err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, 29360d4d2d2SMike Kravetz dst_addr, src_addr, &page); 29460d4d2d2SMike Kravetz 29560d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 2961c9e8defSMike Kravetz vm_alloc_shared = vm_shared; 29760d4d2d2SMike Kravetz 29860d4d2d2SMike Kravetz cond_resched(); 29960d4d2d2SMike Kravetz 3009e368259SAndrea Arcangeli if (unlikely(err == -ENOENT)) { 30160d4d2d2SMike Kravetz up_read(&dst_mm->mmap_sem); 30260d4d2d2SMike Kravetz BUG_ON(!page); 30360d4d2d2SMike Kravetz 30460d4d2d2SMike Kravetz err = copy_huge_page_from_user(page, 30560d4d2d2SMike Kravetz (const void __user *)src_addr, 306810a56b9SMike Kravetz pages_per_huge_page(h), true); 30760d4d2d2SMike Kravetz if (unlikely(err)) { 30860d4d2d2SMike Kravetz err = -EFAULT; 30960d4d2d2SMike Kravetz goto out; 31060d4d2d2SMike Kravetz } 31160d4d2d2SMike Kravetz down_read(&dst_mm->mmap_sem); 31260d4d2d2SMike Kravetz 31360d4d2d2SMike Kravetz dst_vma = NULL; 31460d4d2d2SMike Kravetz goto retry; 31560d4d2d2SMike Kravetz } else 31660d4d2d2SMike Kravetz BUG_ON(page); 31760d4d2d2SMike Kravetz 31860d4d2d2SMike Kravetz if (!err) { 31960d4d2d2SMike Kravetz dst_addr += vma_hpagesize; 32060d4d2d2SMike Kravetz src_addr += vma_hpagesize; 32160d4d2d2SMike Kravetz copied += vma_hpagesize; 32260d4d2d2SMike Kravetz 32360d4d2d2SMike Kravetz if (fatal_signal_pending(current)) 32460d4d2d2SMike Kravetz err = -EINTR; 32560d4d2d2SMike Kravetz } 32660d4d2d2SMike Kravetz if (err) 32760d4d2d2SMike Kravetz break; 32860d4d2d2SMike Kravetz } 32960d4d2d2SMike Kravetz 33060d4d2d2SMike Kravetz out_unlock: 33160d4d2d2SMike Kravetz up_read(&dst_mm->mmap_sem); 33260d4d2d2SMike Kravetz out: 33321205bf8SMike Kravetz if (page) { 33421205bf8SMike Kravetz /* 33521205bf8SMike Kravetz * We encountered an error and are about to free a newly 3361c9e8defSMike Kravetz * allocated huge page. 3371c9e8defSMike Kravetz * 3381c9e8defSMike Kravetz * Reservation handling is very subtle, and is different for 3391c9e8defSMike Kravetz * private and shared mappings. See the routine 3401c9e8defSMike Kravetz * restore_reserve_on_error for details. Unfortunately, we 3411c9e8defSMike Kravetz * can not call restore_reserve_on_error now as it would 3421c9e8defSMike Kravetz * require holding mmap_sem. 3431c9e8defSMike Kravetz * 3441c9e8defSMike Kravetz * If a reservation for the page existed in the reservation 3451c9e8defSMike Kravetz * map of a private mapping, the map was modified to indicate 3461c9e8defSMike Kravetz * the reservation was consumed when the page was allocated. 3471c9e8defSMike Kravetz * We clear the PagePrivate flag now so that the global 34821205bf8SMike Kravetz * reserve count will not be incremented in free_huge_page. 34921205bf8SMike Kravetz * The reservation map will still indicate the reservation 35021205bf8SMike Kravetz * was consumed and possibly prevent later page allocation. 3511c9e8defSMike Kravetz * This is better than leaking a global reservation. If no 3521c9e8defSMike Kravetz * reservation existed, it is still safe to clear PagePrivate 3531c9e8defSMike Kravetz * as no adjustments to reservation counts were made during 3541c9e8defSMike Kravetz * allocation. 3551c9e8defSMike Kravetz * 3561c9e8defSMike Kravetz * The reservation map for shared mappings indicates which 3571c9e8defSMike Kravetz * pages have reservations. When a huge page is allocated 3581c9e8defSMike Kravetz * for an address with a reservation, no change is made to 3591c9e8defSMike Kravetz * the reserve map. In this case PagePrivate will be set 3601c9e8defSMike Kravetz * to indicate that the global reservation count should be 3611c9e8defSMike Kravetz * incremented when the page is freed. This is the desired 3621c9e8defSMike Kravetz * behavior. However, when a huge page is allocated for an 3631c9e8defSMike Kravetz * address without a reservation a reservation entry is added 3641c9e8defSMike Kravetz * to the reservation map, and PagePrivate will not be set. 3651c9e8defSMike Kravetz * When the page is freed, the global reserve count will NOT 3661c9e8defSMike Kravetz * be incremented and it will appear as though we have leaked 3671c9e8defSMike Kravetz * reserved page. In this case, set PagePrivate so that the 3681c9e8defSMike Kravetz * global reserve count will be incremented to match the 3691c9e8defSMike Kravetz * reservation map entry which was created. 3701c9e8defSMike Kravetz * 3711c9e8defSMike Kravetz * Note that vm_alloc_shared is based on the flags of the vma 3721c9e8defSMike Kravetz * for which the page was originally allocated. dst_vma could 3731c9e8defSMike Kravetz * be different or NULL on error. 37421205bf8SMike Kravetz */ 3751c9e8defSMike Kravetz if (vm_alloc_shared) 3761c9e8defSMike Kravetz SetPagePrivate(page); 3771c9e8defSMike Kravetz else 37821205bf8SMike Kravetz ClearPagePrivate(page); 37960d4d2d2SMike Kravetz put_page(page); 38021205bf8SMike Kravetz } 38160d4d2d2SMike Kravetz BUG_ON(copied < 0); 38260d4d2d2SMike Kravetz BUG_ON(err > 0); 38360d4d2d2SMike Kravetz BUG_ON(!copied && !err); 38460d4d2d2SMike Kravetz return copied ? copied : err; 38560d4d2d2SMike Kravetz } 38660d4d2d2SMike Kravetz #else /* !CONFIG_HUGETLB_PAGE */ 38760d4d2d2SMike Kravetz /* fail at build time if gcc attempts to use this */ 38860d4d2d2SMike Kravetz extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, 38960d4d2d2SMike Kravetz struct vm_area_struct *dst_vma, 39060d4d2d2SMike Kravetz unsigned long dst_start, 39160d4d2d2SMike Kravetz unsigned long src_start, 39260d4d2d2SMike Kravetz unsigned long len, 39360d4d2d2SMike Kravetz bool zeropage); 39460d4d2d2SMike Kravetz #endif /* CONFIG_HUGETLB_PAGE */ 39560d4d2d2SMike Kravetz 3963217d3c7SMike Rapoport static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm, 3973217d3c7SMike Rapoport pmd_t *dst_pmd, 3983217d3c7SMike Rapoport struct vm_area_struct *dst_vma, 3993217d3c7SMike Rapoport unsigned long dst_addr, 4003217d3c7SMike Rapoport unsigned long src_addr, 4013217d3c7SMike Rapoport struct page **page, 4023217d3c7SMike Rapoport bool zeropage) 4033217d3c7SMike Rapoport { 4043217d3c7SMike Rapoport ssize_t err; 4053217d3c7SMike Rapoport 4065b51072eSAndrea Arcangeli /* 4075b51072eSAndrea Arcangeli * The normal page fault path for a shmem will invoke the 4085b51072eSAndrea Arcangeli * fault, fill the hole in the file and COW it right away. The 4095b51072eSAndrea Arcangeli * result generates plain anonymous memory. So when we are 4105b51072eSAndrea Arcangeli * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll 4115b51072eSAndrea Arcangeli * generate anonymous memory directly without actually filling 4125b51072eSAndrea Arcangeli * the hole. For the MAP_PRIVATE case the robustness check 4135b51072eSAndrea Arcangeli * only happens in the pagetable (to verify it's still none) 4145b51072eSAndrea Arcangeli * and not in the radix tree. 4155b51072eSAndrea Arcangeli */ 4165b51072eSAndrea Arcangeli if (!(dst_vma->vm_flags & VM_SHARED)) { 4173217d3c7SMike Rapoport if (!zeropage) 4183217d3c7SMike Rapoport err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma, 4193217d3c7SMike Rapoport dst_addr, src_addr, page); 4203217d3c7SMike Rapoport else 4213217d3c7SMike Rapoport err = mfill_zeropage_pte(dst_mm, dst_pmd, 4223217d3c7SMike Rapoport dst_vma, dst_addr); 4233217d3c7SMike Rapoport } else { 4248fb44e54SMike Rapoport if (!zeropage) 4253217d3c7SMike Rapoport err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd, 4263217d3c7SMike Rapoport dst_vma, dst_addr, 4273217d3c7SMike Rapoport src_addr, page); 4288fb44e54SMike Rapoport else 4298fb44e54SMike Rapoport err = shmem_mfill_zeropage_pte(dst_mm, dst_pmd, 4308fb44e54SMike Rapoport dst_vma, dst_addr); 4313217d3c7SMike Rapoport } 4323217d3c7SMike Rapoport 4333217d3c7SMike Rapoport return err; 4343217d3c7SMike Rapoport } 4353217d3c7SMike Rapoport 436c1a4de99SAndrea Arcangeli static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, 437c1a4de99SAndrea Arcangeli unsigned long dst_start, 438c1a4de99SAndrea Arcangeli unsigned long src_start, 439c1a4de99SAndrea Arcangeli unsigned long len, 440df2cc96eSMike Rapoport bool zeropage, 441df2cc96eSMike Rapoport bool *mmap_changing) 442c1a4de99SAndrea Arcangeli { 443c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma; 444c1a4de99SAndrea Arcangeli ssize_t err; 445c1a4de99SAndrea Arcangeli pmd_t *dst_pmd; 446c1a4de99SAndrea Arcangeli unsigned long src_addr, dst_addr; 447b6ebaedbSAndrea Arcangeli long copied; 448b6ebaedbSAndrea Arcangeli struct page *page; 449c1a4de99SAndrea Arcangeli 450c1a4de99SAndrea Arcangeli /* 451c1a4de99SAndrea Arcangeli * Sanitize the command parameters: 452c1a4de99SAndrea Arcangeli */ 453c1a4de99SAndrea Arcangeli BUG_ON(dst_start & ~PAGE_MASK); 454c1a4de99SAndrea Arcangeli BUG_ON(len & ~PAGE_MASK); 455c1a4de99SAndrea Arcangeli 456c1a4de99SAndrea Arcangeli /* Does the address range wrap, or is the span zero-sized? */ 457c1a4de99SAndrea Arcangeli BUG_ON(src_start + len <= src_start); 458c1a4de99SAndrea Arcangeli BUG_ON(dst_start + len <= dst_start); 459c1a4de99SAndrea Arcangeli 460b6ebaedbSAndrea Arcangeli src_addr = src_start; 461b6ebaedbSAndrea Arcangeli dst_addr = dst_start; 462b6ebaedbSAndrea Arcangeli copied = 0; 463b6ebaedbSAndrea Arcangeli page = NULL; 464b6ebaedbSAndrea Arcangeli retry: 465c1a4de99SAndrea Arcangeli down_read(&dst_mm->mmap_sem); 466c1a4de99SAndrea Arcangeli 467c1a4de99SAndrea Arcangeli /* 468df2cc96eSMike Rapoport * If memory mappings are changing because of non-cooperative 469df2cc96eSMike Rapoport * operation (e.g. mremap) running in parallel, bail out and 470df2cc96eSMike Rapoport * request the user to retry later 471df2cc96eSMike Rapoport */ 472df2cc96eSMike Rapoport err = -EAGAIN; 473df2cc96eSMike Rapoport if (mmap_changing && READ_ONCE(*mmap_changing)) 474df2cc96eSMike Rapoport goto out_unlock; 475df2cc96eSMike Rapoport 476df2cc96eSMike Rapoport /* 477c1a4de99SAndrea Arcangeli * Make sure the vma is not shared, that the dst range is 478c1a4de99SAndrea Arcangeli * both valid and fully within a single existing vma. 479c1a4de99SAndrea Arcangeli */ 48027d02568SMike Rapoport err = -ENOENT; 481c1a4de99SAndrea Arcangeli dst_vma = find_vma(dst_mm, dst_start); 48226071cedSMike Rapoport if (!dst_vma) 48326071cedSMike Rapoport goto out_unlock; 4841c9e8defSMike Kravetz /* 48529ec9066SAndrea Arcangeli * Check the vma is registered in uffd, this is required to 48629ec9066SAndrea Arcangeli * enforce the VM_MAYWRITE check done at uffd registration 48729ec9066SAndrea Arcangeli * time. 488c1a4de99SAndrea Arcangeli */ 489c1a4de99SAndrea Arcangeli if (!dst_vma->vm_userfaultfd_ctx.ctx) 490b6ebaedbSAndrea Arcangeli goto out_unlock; 491c1a4de99SAndrea Arcangeli 49227d02568SMike Rapoport if (dst_start < dst_vma->vm_start || 49327d02568SMike Rapoport dst_start + len > dst_vma->vm_end) 49427d02568SMike Rapoport goto out_unlock; 49527d02568SMike Rapoport 49627d02568SMike Rapoport err = -EINVAL; 49727d02568SMike Rapoport /* 49827d02568SMike Rapoport * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but 49927d02568SMike Rapoport * it will overwrite vm_ops, so vma_is_anonymous must return false. 50027d02568SMike Rapoport */ 50127d02568SMike Rapoport if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) && 50227d02568SMike Rapoport dst_vma->vm_flags & VM_SHARED)) 50327d02568SMike Rapoport goto out_unlock; 50427d02568SMike Rapoport 50527d02568SMike Rapoport /* 50627d02568SMike Rapoport * If this is a HUGETLB vma, pass off to appropriate routine 50727d02568SMike Rapoport */ 50827d02568SMike Rapoport if (is_vm_hugetlb_page(dst_vma)) 50927d02568SMike Rapoport return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start, 51027d02568SMike Rapoport src_start, len, zeropage); 51127d02568SMike Rapoport 51226071cedSMike Rapoport if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) 513b6ebaedbSAndrea Arcangeli goto out_unlock; 514c1a4de99SAndrea Arcangeli 515c1a4de99SAndrea Arcangeli /* 516c1a4de99SAndrea Arcangeli * Ensure the dst_vma has a anon_vma or this page 517c1a4de99SAndrea Arcangeli * would get a NULL anon_vma when moved in the 518c1a4de99SAndrea Arcangeli * dst_vma. 519c1a4de99SAndrea Arcangeli */ 520c1a4de99SAndrea Arcangeli err = -ENOMEM; 5215b51072eSAndrea Arcangeli if (!(dst_vma->vm_flags & VM_SHARED) && 5225b51072eSAndrea Arcangeli unlikely(anon_vma_prepare(dst_vma))) 523b6ebaedbSAndrea Arcangeli goto out_unlock; 524c1a4de99SAndrea Arcangeli 525b6ebaedbSAndrea Arcangeli while (src_addr < src_start + len) { 526c1a4de99SAndrea Arcangeli pmd_t dst_pmdval; 527b6ebaedbSAndrea Arcangeli 528c1a4de99SAndrea Arcangeli BUG_ON(dst_addr >= dst_start + len); 529b6ebaedbSAndrea Arcangeli 530c1a4de99SAndrea Arcangeli dst_pmd = mm_alloc_pmd(dst_mm, dst_addr); 531c1a4de99SAndrea Arcangeli if (unlikely(!dst_pmd)) { 532c1a4de99SAndrea Arcangeli err = -ENOMEM; 533c1a4de99SAndrea Arcangeli break; 534c1a4de99SAndrea Arcangeli } 535c1a4de99SAndrea Arcangeli 536c1a4de99SAndrea Arcangeli dst_pmdval = pmd_read_atomic(dst_pmd); 537c1a4de99SAndrea Arcangeli /* 538c1a4de99SAndrea Arcangeli * If the dst_pmd is mapped as THP don't 539c1a4de99SAndrea Arcangeli * override it and just be strict. 540c1a4de99SAndrea Arcangeli */ 541c1a4de99SAndrea Arcangeli if (unlikely(pmd_trans_huge(dst_pmdval))) { 542c1a4de99SAndrea Arcangeli err = -EEXIST; 543c1a4de99SAndrea Arcangeli break; 544c1a4de99SAndrea Arcangeli } 545c1a4de99SAndrea Arcangeli if (unlikely(pmd_none(dst_pmdval)) && 5463ed3a4f0SKirill A. Shutemov unlikely(__pte_alloc(dst_mm, dst_pmd, dst_addr))) { 547c1a4de99SAndrea Arcangeli err = -ENOMEM; 548c1a4de99SAndrea Arcangeli break; 549c1a4de99SAndrea Arcangeli } 550c1a4de99SAndrea Arcangeli /* If an huge pmd materialized from under us fail */ 551c1a4de99SAndrea Arcangeli if (unlikely(pmd_trans_huge(*dst_pmd))) { 552c1a4de99SAndrea Arcangeli err = -EFAULT; 553c1a4de99SAndrea Arcangeli break; 554c1a4de99SAndrea Arcangeli } 555c1a4de99SAndrea Arcangeli 556c1a4de99SAndrea Arcangeli BUG_ON(pmd_none(*dst_pmd)); 557c1a4de99SAndrea Arcangeli BUG_ON(pmd_trans_huge(*dst_pmd)); 558c1a4de99SAndrea Arcangeli 5593217d3c7SMike Rapoport err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, 5603217d3c7SMike Rapoport src_addr, &page, zeropage); 561c1a4de99SAndrea Arcangeli cond_resched(); 562c1a4de99SAndrea Arcangeli 5639e368259SAndrea Arcangeli if (unlikely(err == -ENOENT)) { 564b6ebaedbSAndrea Arcangeli void *page_kaddr; 565b6ebaedbSAndrea Arcangeli 566b6ebaedbSAndrea Arcangeli up_read(&dst_mm->mmap_sem); 567b6ebaedbSAndrea Arcangeli BUG_ON(!page); 568b6ebaedbSAndrea Arcangeli 569b6ebaedbSAndrea Arcangeli page_kaddr = kmap(page); 570b6ebaedbSAndrea Arcangeli err = copy_from_user(page_kaddr, 571b6ebaedbSAndrea Arcangeli (const void __user *) src_addr, 572b6ebaedbSAndrea Arcangeli PAGE_SIZE); 573b6ebaedbSAndrea Arcangeli kunmap(page); 574b6ebaedbSAndrea Arcangeli if (unlikely(err)) { 575b6ebaedbSAndrea Arcangeli err = -EFAULT; 576b6ebaedbSAndrea Arcangeli goto out; 577b6ebaedbSAndrea Arcangeli } 578b6ebaedbSAndrea Arcangeli goto retry; 579b6ebaedbSAndrea Arcangeli } else 580b6ebaedbSAndrea Arcangeli BUG_ON(page); 581b6ebaedbSAndrea Arcangeli 582c1a4de99SAndrea Arcangeli if (!err) { 583c1a4de99SAndrea Arcangeli dst_addr += PAGE_SIZE; 584c1a4de99SAndrea Arcangeli src_addr += PAGE_SIZE; 585c1a4de99SAndrea Arcangeli copied += PAGE_SIZE; 586c1a4de99SAndrea Arcangeli 587c1a4de99SAndrea Arcangeli if (fatal_signal_pending(current)) 588c1a4de99SAndrea Arcangeli err = -EINTR; 589c1a4de99SAndrea Arcangeli } 590c1a4de99SAndrea Arcangeli if (err) 591c1a4de99SAndrea Arcangeli break; 592c1a4de99SAndrea Arcangeli } 593c1a4de99SAndrea Arcangeli 594b6ebaedbSAndrea Arcangeli out_unlock: 595c1a4de99SAndrea Arcangeli up_read(&dst_mm->mmap_sem); 596b6ebaedbSAndrea Arcangeli out: 597b6ebaedbSAndrea Arcangeli if (page) 59809cbfeafSKirill A. Shutemov put_page(page); 599c1a4de99SAndrea Arcangeli BUG_ON(copied < 0); 600c1a4de99SAndrea Arcangeli BUG_ON(err > 0); 601c1a4de99SAndrea Arcangeli BUG_ON(!copied && !err); 602c1a4de99SAndrea Arcangeli return copied ? copied : err; 603c1a4de99SAndrea Arcangeli } 604c1a4de99SAndrea Arcangeli 605c1a4de99SAndrea Arcangeli ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, 606df2cc96eSMike Rapoport unsigned long src_start, unsigned long len, 607df2cc96eSMike Rapoport bool *mmap_changing) 608c1a4de99SAndrea Arcangeli { 609df2cc96eSMike Rapoport return __mcopy_atomic(dst_mm, dst_start, src_start, len, false, 610df2cc96eSMike Rapoport mmap_changing); 611c1a4de99SAndrea Arcangeli } 612c1a4de99SAndrea Arcangeli 613c1a4de99SAndrea Arcangeli ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start, 614df2cc96eSMike Rapoport unsigned long len, bool *mmap_changing) 615c1a4de99SAndrea Arcangeli { 616df2cc96eSMike Rapoport return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing); 617c1a4de99SAndrea Arcangeli } 618