1c1a4de99SAndrea Arcangeli /* 2c1a4de99SAndrea Arcangeli * mm/userfaultfd.c 3c1a4de99SAndrea Arcangeli * 4c1a4de99SAndrea Arcangeli * Copyright (C) 2015 Red Hat, Inc. 5c1a4de99SAndrea Arcangeli * 6c1a4de99SAndrea Arcangeli * This work is licensed under the terms of the GNU GPL, version 2. See 7c1a4de99SAndrea Arcangeli * the COPYING file in the top-level directory. 8c1a4de99SAndrea Arcangeli */ 9c1a4de99SAndrea Arcangeli 10c1a4de99SAndrea Arcangeli #include <linux/mm.h> 11*174cd4b1SIngo Molnar #include <linux/sched/signal.h> 12c1a4de99SAndrea Arcangeli #include <linux/pagemap.h> 13c1a4de99SAndrea Arcangeli #include <linux/rmap.h> 14c1a4de99SAndrea Arcangeli #include <linux/swap.h> 15c1a4de99SAndrea Arcangeli #include <linux/swapops.h> 16c1a4de99SAndrea Arcangeli #include <linux/userfaultfd_k.h> 17c1a4de99SAndrea Arcangeli #include <linux/mmu_notifier.h> 1860d4d2d2SMike Kravetz #include <linux/hugetlb.h> 1960d4d2d2SMike Kravetz #include <linux/pagemap.h> 2026071cedSMike Rapoport #include <linux/shmem_fs.h> 21c1a4de99SAndrea Arcangeli #include <asm/tlbflush.h> 22c1a4de99SAndrea Arcangeli #include "internal.h" 23c1a4de99SAndrea Arcangeli 24c1a4de99SAndrea Arcangeli static int mcopy_atomic_pte(struct mm_struct *dst_mm, 25c1a4de99SAndrea Arcangeli pmd_t *dst_pmd, 26c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma, 27c1a4de99SAndrea Arcangeli unsigned long dst_addr, 28b6ebaedbSAndrea Arcangeli unsigned long src_addr, 29b6ebaedbSAndrea Arcangeli struct page **pagep) 30c1a4de99SAndrea Arcangeli { 31c1a4de99SAndrea Arcangeli struct mem_cgroup *memcg; 32c1a4de99SAndrea Arcangeli pte_t _dst_pte, *dst_pte; 33c1a4de99SAndrea Arcangeli spinlock_t *ptl; 34c1a4de99SAndrea Arcangeli void *page_kaddr; 35c1a4de99SAndrea Arcangeli int ret; 36b6ebaedbSAndrea Arcangeli struct page *page; 37c1a4de99SAndrea Arcangeli 38b6ebaedbSAndrea Arcangeli if (!*pagep) { 39c1a4de99SAndrea Arcangeli ret = -ENOMEM; 40c1a4de99SAndrea Arcangeli page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr); 41c1a4de99SAndrea Arcangeli if (!page) 42c1a4de99SAndrea Arcangeli goto out; 43c1a4de99SAndrea Arcangeli 44b6ebaedbSAndrea Arcangeli page_kaddr = kmap_atomic(page); 45b6ebaedbSAndrea Arcangeli ret = copy_from_user(page_kaddr, 46b6ebaedbSAndrea Arcangeli (const void __user *) src_addr, 47b6ebaedbSAndrea Arcangeli PAGE_SIZE); 48b6ebaedbSAndrea Arcangeli kunmap_atomic(page_kaddr); 49b6ebaedbSAndrea Arcangeli 50b6ebaedbSAndrea Arcangeli /* fallback to copy_from_user outside mmap_sem */ 51b6ebaedbSAndrea Arcangeli if (unlikely(ret)) { 52c1a4de99SAndrea Arcangeli ret = -EFAULT; 53b6ebaedbSAndrea Arcangeli *pagep = page; 54b6ebaedbSAndrea Arcangeli /* don't free the page */ 55b6ebaedbSAndrea Arcangeli goto out; 56b6ebaedbSAndrea Arcangeli } 57b6ebaedbSAndrea Arcangeli } else { 58b6ebaedbSAndrea Arcangeli page = *pagep; 59b6ebaedbSAndrea Arcangeli *pagep = NULL; 60b6ebaedbSAndrea Arcangeli } 61c1a4de99SAndrea Arcangeli 62c1a4de99SAndrea Arcangeli /* 63c1a4de99SAndrea Arcangeli * The memory barrier inside __SetPageUptodate makes sure that 64c1a4de99SAndrea Arcangeli * preceeding stores to the page contents become visible before 65c1a4de99SAndrea Arcangeli * the set_pte_at() write. 66c1a4de99SAndrea Arcangeli */ 67c1a4de99SAndrea Arcangeli __SetPageUptodate(page); 68c1a4de99SAndrea Arcangeli 69c1a4de99SAndrea Arcangeli ret = -ENOMEM; 70f627c2f5SKirill A. Shutemov if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg, false)) 71c1a4de99SAndrea Arcangeli goto out_release; 72c1a4de99SAndrea Arcangeli 73c1a4de99SAndrea Arcangeli _dst_pte = mk_pte(page, dst_vma->vm_page_prot); 74c1a4de99SAndrea Arcangeli if (dst_vma->vm_flags & VM_WRITE) 75c1a4de99SAndrea Arcangeli _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte)); 76c1a4de99SAndrea Arcangeli 77c1a4de99SAndrea Arcangeli ret = -EEXIST; 78c1a4de99SAndrea Arcangeli dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 79c1a4de99SAndrea Arcangeli if (!pte_none(*dst_pte)) 80c1a4de99SAndrea Arcangeli goto out_release_uncharge_unlock; 81c1a4de99SAndrea Arcangeli 82c1a4de99SAndrea Arcangeli inc_mm_counter(dst_mm, MM_ANONPAGES); 83d281ee61SKirill A. Shutemov page_add_new_anon_rmap(page, dst_vma, dst_addr, false); 84f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, false, false); 85c1a4de99SAndrea Arcangeli lru_cache_add_active_or_unevictable(page, dst_vma); 86c1a4de99SAndrea Arcangeli 87c1a4de99SAndrea Arcangeli set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 88c1a4de99SAndrea Arcangeli 89c1a4de99SAndrea Arcangeli /* No need to invalidate - it was non-present before */ 90c1a4de99SAndrea Arcangeli update_mmu_cache(dst_vma, dst_addr, dst_pte); 91c1a4de99SAndrea Arcangeli 92c1a4de99SAndrea Arcangeli pte_unmap_unlock(dst_pte, ptl); 93c1a4de99SAndrea Arcangeli ret = 0; 94c1a4de99SAndrea Arcangeli out: 95c1a4de99SAndrea Arcangeli return ret; 96c1a4de99SAndrea Arcangeli out_release_uncharge_unlock: 97c1a4de99SAndrea Arcangeli pte_unmap_unlock(dst_pte, ptl); 98f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, false); 99c1a4de99SAndrea Arcangeli out_release: 10009cbfeafSKirill A. Shutemov put_page(page); 101c1a4de99SAndrea Arcangeli goto out; 102c1a4de99SAndrea Arcangeli } 103c1a4de99SAndrea Arcangeli 104c1a4de99SAndrea Arcangeli static int mfill_zeropage_pte(struct mm_struct *dst_mm, 105c1a4de99SAndrea Arcangeli pmd_t *dst_pmd, 106c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma, 107c1a4de99SAndrea Arcangeli unsigned long dst_addr) 108c1a4de99SAndrea Arcangeli { 109c1a4de99SAndrea Arcangeli pte_t _dst_pte, *dst_pte; 110c1a4de99SAndrea Arcangeli spinlock_t *ptl; 111c1a4de99SAndrea Arcangeli int ret; 112c1a4de99SAndrea Arcangeli 113c1a4de99SAndrea Arcangeli _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), 114c1a4de99SAndrea Arcangeli dst_vma->vm_page_prot)); 115c1a4de99SAndrea Arcangeli ret = -EEXIST; 116c1a4de99SAndrea Arcangeli dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 117c1a4de99SAndrea Arcangeli if (!pte_none(*dst_pte)) 118c1a4de99SAndrea Arcangeli goto out_unlock; 119c1a4de99SAndrea Arcangeli set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 120c1a4de99SAndrea Arcangeli /* No need to invalidate - it was non-present before */ 121c1a4de99SAndrea Arcangeli update_mmu_cache(dst_vma, dst_addr, dst_pte); 122c1a4de99SAndrea Arcangeli ret = 0; 123c1a4de99SAndrea Arcangeli out_unlock: 124c1a4de99SAndrea Arcangeli pte_unmap_unlock(dst_pte, ptl); 125c1a4de99SAndrea Arcangeli return ret; 126c1a4de99SAndrea Arcangeli } 127c1a4de99SAndrea Arcangeli 128c1a4de99SAndrea Arcangeli static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) 129c1a4de99SAndrea Arcangeli { 130c1a4de99SAndrea Arcangeli pgd_t *pgd; 131c1a4de99SAndrea Arcangeli pud_t *pud; 132c1a4de99SAndrea Arcangeli pmd_t *pmd = NULL; 133c1a4de99SAndrea Arcangeli 134c1a4de99SAndrea Arcangeli pgd = pgd_offset(mm, address); 135c1a4de99SAndrea Arcangeli pud = pud_alloc(mm, pgd, address); 136c1a4de99SAndrea Arcangeli if (pud) 137c1a4de99SAndrea Arcangeli /* 138c1a4de99SAndrea Arcangeli * Note that we didn't run this because the pmd was 139c1a4de99SAndrea Arcangeli * missing, the *pmd may be already established and in 140c1a4de99SAndrea Arcangeli * turn it may also be a trans_huge_pmd. 141c1a4de99SAndrea Arcangeli */ 142c1a4de99SAndrea Arcangeli pmd = pmd_alloc(mm, pud, address); 143c1a4de99SAndrea Arcangeli return pmd; 144c1a4de99SAndrea Arcangeli } 145c1a4de99SAndrea Arcangeli 14660d4d2d2SMike Kravetz #ifdef CONFIG_HUGETLB_PAGE 14760d4d2d2SMike Kravetz /* 14860d4d2d2SMike Kravetz * __mcopy_atomic processing for HUGETLB vmas. Note that this routine is 14960d4d2d2SMike Kravetz * called with mmap_sem held, it will release mmap_sem before returning. 15060d4d2d2SMike Kravetz */ 15160d4d2d2SMike Kravetz static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, 15260d4d2d2SMike Kravetz struct vm_area_struct *dst_vma, 15360d4d2d2SMike Kravetz unsigned long dst_start, 15460d4d2d2SMike Kravetz unsigned long src_start, 15560d4d2d2SMike Kravetz unsigned long len, 15660d4d2d2SMike Kravetz bool zeropage) 15760d4d2d2SMike Kravetz { 1581c9e8defSMike Kravetz int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED; 1591c9e8defSMike Kravetz int vm_shared = dst_vma->vm_flags & VM_SHARED; 16060d4d2d2SMike Kravetz ssize_t err; 16160d4d2d2SMike Kravetz pte_t *dst_pte; 16260d4d2d2SMike Kravetz unsigned long src_addr, dst_addr; 16360d4d2d2SMike Kravetz long copied; 16460d4d2d2SMike Kravetz struct page *page; 16560d4d2d2SMike Kravetz struct hstate *h; 16660d4d2d2SMike Kravetz unsigned long vma_hpagesize; 16760d4d2d2SMike Kravetz pgoff_t idx; 16860d4d2d2SMike Kravetz u32 hash; 16960d4d2d2SMike Kravetz struct address_space *mapping; 17060d4d2d2SMike Kravetz 17160d4d2d2SMike Kravetz /* 17260d4d2d2SMike Kravetz * There is no default zero huge page for all huge page sizes as 17360d4d2d2SMike Kravetz * supported by hugetlb. A PMD_SIZE huge pages may exist as used 17460d4d2d2SMike Kravetz * by THP. Since we can not reliably insert a zero page, this 17560d4d2d2SMike Kravetz * feature is not supported. 17660d4d2d2SMike Kravetz */ 17760d4d2d2SMike Kravetz if (zeropage) { 17860d4d2d2SMike Kravetz up_read(&dst_mm->mmap_sem); 17960d4d2d2SMike Kravetz return -EINVAL; 18060d4d2d2SMike Kravetz } 18160d4d2d2SMike Kravetz 18260d4d2d2SMike Kravetz src_addr = src_start; 18360d4d2d2SMike Kravetz dst_addr = dst_start; 18460d4d2d2SMike Kravetz copied = 0; 18560d4d2d2SMike Kravetz page = NULL; 18660d4d2d2SMike Kravetz vma_hpagesize = vma_kernel_pagesize(dst_vma); 18760d4d2d2SMike Kravetz 18860d4d2d2SMike Kravetz /* 18960d4d2d2SMike Kravetz * Validate alignment based on huge page size 19060d4d2d2SMike Kravetz */ 19160d4d2d2SMike Kravetz err = -EINVAL; 19260d4d2d2SMike Kravetz if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1)) 19360d4d2d2SMike Kravetz goto out_unlock; 19460d4d2d2SMike Kravetz 19560d4d2d2SMike Kravetz retry: 19660d4d2d2SMike Kravetz /* 19760d4d2d2SMike Kravetz * On routine entry dst_vma is set. If we had to drop mmap_sem and 19860d4d2d2SMike Kravetz * retry, dst_vma will be set to NULL and we must lookup again. 19960d4d2d2SMike Kravetz */ 20060d4d2d2SMike Kravetz if (!dst_vma) { 20127d02568SMike Rapoport err = -ENOENT; 20260d4d2d2SMike Kravetz dst_vma = find_vma(dst_mm, dst_start); 20360d4d2d2SMike Kravetz if (!dst_vma || !is_vm_hugetlb_page(dst_vma)) 20460d4d2d2SMike Kravetz goto out_unlock; 20527d02568SMike Rapoport /* 20627d02568SMike Rapoport * Only allow __mcopy_atomic_hugetlb on userfaultfd 20727d02568SMike Rapoport * registered ranges. 20827d02568SMike Rapoport */ 20927d02568SMike Rapoport if (!dst_vma->vm_userfaultfd_ctx.ctx) 21060d4d2d2SMike Kravetz goto out_unlock; 21160d4d2d2SMike Kravetz 21260d4d2d2SMike Kravetz if (dst_start < dst_vma->vm_start || 21360d4d2d2SMike Kravetz dst_start + len > dst_vma->vm_end) 21460d4d2d2SMike Kravetz goto out_unlock; 2151c9e8defSMike Kravetz 21627d02568SMike Rapoport err = -EINVAL; 21727d02568SMike Rapoport if (vma_hpagesize != vma_kernel_pagesize(dst_vma)) 21827d02568SMike Rapoport goto out_unlock; 21927d02568SMike Rapoport 2201c9e8defSMike Kravetz vm_shared = dst_vma->vm_flags & VM_SHARED; 22160d4d2d2SMike Kravetz } 22260d4d2d2SMike Kravetz 22360d4d2d2SMike Kravetz if (WARN_ON(dst_addr & (vma_hpagesize - 1) || 22460d4d2d2SMike Kravetz (len - copied) & (vma_hpagesize - 1))) 22560d4d2d2SMike Kravetz goto out_unlock; 22660d4d2d2SMike Kravetz 22760d4d2d2SMike Kravetz /* 2281c9e8defSMike Kravetz * If not shared, ensure the dst_vma has a anon_vma. 22960d4d2d2SMike Kravetz */ 23060d4d2d2SMike Kravetz err = -ENOMEM; 2311c9e8defSMike Kravetz if (!vm_shared) { 23260d4d2d2SMike Kravetz if (unlikely(anon_vma_prepare(dst_vma))) 23360d4d2d2SMike Kravetz goto out_unlock; 2341c9e8defSMike Kravetz } 23560d4d2d2SMike Kravetz 23660d4d2d2SMike Kravetz h = hstate_vma(dst_vma); 23760d4d2d2SMike Kravetz 23860d4d2d2SMike Kravetz while (src_addr < src_start + len) { 23960d4d2d2SMike Kravetz pte_t dst_pteval; 24060d4d2d2SMike Kravetz 24160d4d2d2SMike Kravetz BUG_ON(dst_addr >= dst_start + len); 24260d4d2d2SMike Kravetz VM_BUG_ON(dst_addr & ~huge_page_mask(h)); 24360d4d2d2SMike Kravetz 24460d4d2d2SMike Kravetz /* 24560d4d2d2SMike Kravetz * Serialize via hugetlb_fault_mutex 24660d4d2d2SMike Kravetz */ 24760d4d2d2SMike Kravetz idx = linear_page_index(dst_vma, dst_addr); 24860d4d2d2SMike Kravetz mapping = dst_vma->vm_file->f_mapping; 24960d4d2d2SMike Kravetz hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping, 25060d4d2d2SMike Kravetz idx, dst_addr); 25160d4d2d2SMike Kravetz mutex_lock(&hugetlb_fault_mutex_table[hash]); 25260d4d2d2SMike Kravetz 25360d4d2d2SMike Kravetz err = -ENOMEM; 25460d4d2d2SMike Kravetz dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h)); 25560d4d2d2SMike Kravetz if (!dst_pte) { 25660d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 25760d4d2d2SMike Kravetz goto out_unlock; 25860d4d2d2SMike Kravetz } 25960d4d2d2SMike Kravetz 26060d4d2d2SMike Kravetz err = -EEXIST; 26160d4d2d2SMike Kravetz dst_pteval = huge_ptep_get(dst_pte); 26260d4d2d2SMike Kravetz if (!huge_pte_none(dst_pteval)) { 26360d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 26460d4d2d2SMike Kravetz goto out_unlock; 26560d4d2d2SMike Kravetz } 26660d4d2d2SMike Kravetz 26760d4d2d2SMike Kravetz err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, 26860d4d2d2SMike Kravetz dst_addr, src_addr, &page); 26960d4d2d2SMike Kravetz 27060d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 2711c9e8defSMike Kravetz vm_alloc_shared = vm_shared; 27260d4d2d2SMike Kravetz 27360d4d2d2SMike Kravetz cond_resched(); 27460d4d2d2SMike Kravetz 27560d4d2d2SMike Kravetz if (unlikely(err == -EFAULT)) { 27660d4d2d2SMike Kravetz up_read(&dst_mm->mmap_sem); 27760d4d2d2SMike Kravetz BUG_ON(!page); 27860d4d2d2SMike Kravetz 27960d4d2d2SMike Kravetz err = copy_huge_page_from_user(page, 28060d4d2d2SMike Kravetz (const void __user *)src_addr, 281810a56b9SMike Kravetz pages_per_huge_page(h), true); 28260d4d2d2SMike Kravetz if (unlikely(err)) { 28360d4d2d2SMike Kravetz err = -EFAULT; 28460d4d2d2SMike Kravetz goto out; 28560d4d2d2SMike Kravetz } 28660d4d2d2SMike Kravetz down_read(&dst_mm->mmap_sem); 28760d4d2d2SMike Kravetz 28860d4d2d2SMike Kravetz dst_vma = NULL; 28960d4d2d2SMike Kravetz goto retry; 29060d4d2d2SMike Kravetz } else 29160d4d2d2SMike Kravetz BUG_ON(page); 29260d4d2d2SMike Kravetz 29360d4d2d2SMike Kravetz if (!err) { 29460d4d2d2SMike Kravetz dst_addr += vma_hpagesize; 29560d4d2d2SMike Kravetz src_addr += vma_hpagesize; 29660d4d2d2SMike Kravetz copied += vma_hpagesize; 29760d4d2d2SMike Kravetz 29860d4d2d2SMike Kravetz if (fatal_signal_pending(current)) 29960d4d2d2SMike Kravetz err = -EINTR; 30060d4d2d2SMike Kravetz } 30160d4d2d2SMike Kravetz if (err) 30260d4d2d2SMike Kravetz break; 30360d4d2d2SMike Kravetz } 30460d4d2d2SMike Kravetz 30560d4d2d2SMike Kravetz out_unlock: 30660d4d2d2SMike Kravetz up_read(&dst_mm->mmap_sem); 30760d4d2d2SMike Kravetz out: 30821205bf8SMike Kravetz if (page) { 30921205bf8SMike Kravetz /* 31021205bf8SMike Kravetz * We encountered an error and are about to free a newly 3111c9e8defSMike Kravetz * allocated huge page. 3121c9e8defSMike Kravetz * 3131c9e8defSMike Kravetz * Reservation handling is very subtle, and is different for 3141c9e8defSMike Kravetz * private and shared mappings. See the routine 3151c9e8defSMike Kravetz * restore_reserve_on_error for details. Unfortunately, we 3161c9e8defSMike Kravetz * can not call restore_reserve_on_error now as it would 3171c9e8defSMike Kravetz * require holding mmap_sem. 3181c9e8defSMike Kravetz * 3191c9e8defSMike Kravetz * If a reservation for the page existed in the reservation 3201c9e8defSMike Kravetz * map of a private mapping, the map was modified to indicate 3211c9e8defSMike Kravetz * the reservation was consumed when the page was allocated. 3221c9e8defSMike Kravetz * We clear the PagePrivate flag now so that the global 32321205bf8SMike Kravetz * reserve count will not be incremented in free_huge_page. 32421205bf8SMike Kravetz * The reservation map will still indicate the reservation 32521205bf8SMike Kravetz * was consumed and possibly prevent later page allocation. 3261c9e8defSMike Kravetz * This is better than leaking a global reservation. If no 3271c9e8defSMike Kravetz * reservation existed, it is still safe to clear PagePrivate 3281c9e8defSMike Kravetz * as no adjustments to reservation counts were made during 3291c9e8defSMike Kravetz * allocation. 3301c9e8defSMike Kravetz * 3311c9e8defSMike Kravetz * The reservation map for shared mappings indicates which 3321c9e8defSMike Kravetz * pages have reservations. When a huge page is allocated 3331c9e8defSMike Kravetz * for an address with a reservation, no change is made to 3341c9e8defSMike Kravetz * the reserve map. In this case PagePrivate will be set 3351c9e8defSMike Kravetz * to indicate that the global reservation count should be 3361c9e8defSMike Kravetz * incremented when the page is freed. This is the desired 3371c9e8defSMike Kravetz * behavior. However, when a huge page is allocated for an 3381c9e8defSMike Kravetz * address without a reservation a reservation entry is added 3391c9e8defSMike Kravetz * to the reservation map, and PagePrivate will not be set. 3401c9e8defSMike Kravetz * When the page is freed, the global reserve count will NOT 3411c9e8defSMike Kravetz * be incremented and it will appear as though we have leaked 3421c9e8defSMike Kravetz * reserved page. In this case, set PagePrivate so that the 3431c9e8defSMike Kravetz * global reserve count will be incremented to match the 3441c9e8defSMike Kravetz * reservation map entry which was created. 3451c9e8defSMike Kravetz * 3461c9e8defSMike Kravetz * Note that vm_alloc_shared is based on the flags of the vma 3471c9e8defSMike Kravetz * for which the page was originally allocated. dst_vma could 3481c9e8defSMike Kravetz * be different or NULL on error. 34921205bf8SMike Kravetz */ 3501c9e8defSMike Kravetz if (vm_alloc_shared) 3511c9e8defSMike Kravetz SetPagePrivate(page); 3521c9e8defSMike Kravetz else 35321205bf8SMike Kravetz ClearPagePrivate(page); 35460d4d2d2SMike Kravetz put_page(page); 35521205bf8SMike Kravetz } 35660d4d2d2SMike Kravetz BUG_ON(copied < 0); 35760d4d2d2SMike Kravetz BUG_ON(err > 0); 35860d4d2d2SMike Kravetz BUG_ON(!copied && !err); 35960d4d2d2SMike Kravetz return copied ? copied : err; 36060d4d2d2SMike Kravetz } 36160d4d2d2SMike Kravetz #else /* !CONFIG_HUGETLB_PAGE */ 36260d4d2d2SMike Kravetz /* fail at build time if gcc attempts to use this */ 36360d4d2d2SMike Kravetz extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, 36460d4d2d2SMike Kravetz struct vm_area_struct *dst_vma, 36560d4d2d2SMike Kravetz unsigned long dst_start, 36660d4d2d2SMike Kravetz unsigned long src_start, 36760d4d2d2SMike Kravetz unsigned long len, 36860d4d2d2SMike Kravetz bool zeropage); 36960d4d2d2SMike Kravetz #endif /* CONFIG_HUGETLB_PAGE */ 37060d4d2d2SMike Kravetz 371c1a4de99SAndrea Arcangeli static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, 372c1a4de99SAndrea Arcangeli unsigned long dst_start, 373c1a4de99SAndrea Arcangeli unsigned long src_start, 374c1a4de99SAndrea Arcangeli unsigned long len, 375c1a4de99SAndrea Arcangeli bool zeropage) 376c1a4de99SAndrea Arcangeli { 377c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma; 378c1a4de99SAndrea Arcangeli ssize_t err; 379c1a4de99SAndrea Arcangeli pmd_t *dst_pmd; 380c1a4de99SAndrea Arcangeli unsigned long src_addr, dst_addr; 381b6ebaedbSAndrea Arcangeli long copied; 382b6ebaedbSAndrea Arcangeli struct page *page; 383c1a4de99SAndrea Arcangeli 384c1a4de99SAndrea Arcangeli /* 385c1a4de99SAndrea Arcangeli * Sanitize the command parameters: 386c1a4de99SAndrea Arcangeli */ 387c1a4de99SAndrea Arcangeli BUG_ON(dst_start & ~PAGE_MASK); 388c1a4de99SAndrea Arcangeli BUG_ON(len & ~PAGE_MASK); 389c1a4de99SAndrea Arcangeli 390c1a4de99SAndrea Arcangeli /* Does the address range wrap, or is the span zero-sized? */ 391c1a4de99SAndrea Arcangeli BUG_ON(src_start + len <= src_start); 392c1a4de99SAndrea Arcangeli BUG_ON(dst_start + len <= dst_start); 393c1a4de99SAndrea Arcangeli 394b6ebaedbSAndrea Arcangeli src_addr = src_start; 395b6ebaedbSAndrea Arcangeli dst_addr = dst_start; 396b6ebaedbSAndrea Arcangeli copied = 0; 397b6ebaedbSAndrea Arcangeli page = NULL; 398b6ebaedbSAndrea Arcangeli retry: 399c1a4de99SAndrea Arcangeli down_read(&dst_mm->mmap_sem); 400c1a4de99SAndrea Arcangeli 401c1a4de99SAndrea Arcangeli /* 402c1a4de99SAndrea Arcangeli * Make sure the vma is not shared, that the dst range is 403c1a4de99SAndrea Arcangeli * both valid and fully within a single existing vma. 404c1a4de99SAndrea Arcangeli */ 40527d02568SMike Rapoport err = -ENOENT; 406c1a4de99SAndrea Arcangeli dst_vma = find_vma(dst_mm, dst_start); 40726071cedSMike Rapoport if (!dst_vma) 40826071cedSMike Rapoport goto out_unlock; 4091c9e8defSMike Kravetz /* 410c1a4de99SAndrea Arcangeli * Be strict and only allow __mcopy_atomic on userfaultfd 411c1a4de99SAndrea Arcangeli * registered ranges to prevent userland errors going 412c1a4de99SAndrea Arcangeli * unnoticed. As far as the VM consistency is concerned, it 413c1a4de99SAndrea Arcangeli * would be perfectly safe to remove this check, but there's 414c1a4de99SAndrea Arcangeli * no useful usage for __mcopy_atomic ouside of userfaultfd 415c1a4de99SAndrea Arcangeli * registered ranges. This is after all why these are ioctls 416c1a4de99SAndrea Arcangeli * belonging to the userfaultfd and not syscalls. 417c1a4de99SAndrea Arcangeli */ 418c1a4de99SAndrea Arcangeli if (!dst_vma->vm_userfaultfd_ctx.ctx) 419b6ebaedbSAndrea Arcangeli goto out_unlock; 420c1a4de99SAndrea Arcangeli 42127d02568SMike Rapoport if (dst_start < dst_vma->vm_start || 42227d02568SMike Rapoport dst_start + len > dst_vma->vm_end) 42327d02568SMike Rapoport goto out_unlock; 42427d02568SMike Rapoport 42527d02568SMike Rapoport err = -EINVAL; 42627d02568SMike Rapoport /* 42727d02568SMike Rapoport * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but 42827d02568SMike Rapoport * it will overwrite vm_ops, so vma_is_anonymous must return false. 42927d02568SMike Rapoport */ 43027d02568SMike Rapoport if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) && 43127d02568SMike Rapoport dst_vma->vm_flags & VM_SHARED)) 43227d02568SMike Rapoport goto out_unlock; 43327d02568SMike Rapoport 43427d02568SMike Rapoport /* 43527d02568SMike Rapoport * If this is a HUGETLB vma, pass off to appropriate routine 43627d02568SMike Rapoport */ 43727d02568SMike Rapoport if (is_vm_hugetlb_page(dst_vma)) 43827d02568SMike Rapoport return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start, 43927d02568SMike Rapoport src_start, len, zeropage); 44027d02568SMike Rapoport 44126071cedSMike Rapoport if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) 442b6ebaedbSAndrea Arcangeli goto out_unlock; 443c1a4de99SAndrea Arcangeli 444c1a4de99SAndrea Arcangeli /* 445c1a4de99SAndrea Arcangeli * Ensure the dst_vma has a anon_vma or this page 446c1a4de99SAndrea Arcangeli * would get a NULL anon_vma when moved in the 447c1a4de99SAndrea Arcangeli * dst_vma. 448c1a4de99SAndrea Arcangeli */ 449c1a4de99SAndrea Arcangeli err = -ENOMEM; 45026071cedSMike Rapoport if (vma_is_anonymous(dst_vma) && unlikely(anon_vma_prepare(dst_vma))) 451b6ebaedbSAndrea Arcangeli goto out_unlock; 452c1a4de99SAndrea Arcangeli 453b6ebaedbSAndrea Arcangeli while (src_addr < src_start + len) { 454c1a4de99SAndrea Arcangeli pmd_t dst_pmdval; 455b6ebaedbSAndrea Arcangeli 456c1a4de99SAndrea Arcangeli BUG_ON(dst_addr >= dst_start + len); 457b6ebaedbSAndrea Arcangeli 458c1a4de99SAndrea Arcangeli dst_pmd = mm_alloc_pmd(dst_mm, dst_addr); 459c1a4de99SAndrea Arcangeli if (unlikely(!dst_pmd)) { 460c1a4de99SAndrea Arcangeli err = -ENOMEM; 461c1a4de99SAndrea Arcangeli break; 462c1a4de99SAndrea Arcangeli } 463c1a4de99SAndrea Arcangeli 464c1a4de99SAndrea Arcangeli dst_pmdval = pmd_read_atomic(dst_pmd); 465c1a4de99SAndrea Arcangeli /* 466c1a4de99SAndrea Arcangeli * If the dst_pmd is mapped as THP don't 467c1a4de99SAndrea Arcangeli * override it and just be strict. 468c1a4de99SAndrea Arcangeli */ 469c1a4de99SAndrea Arcangeli if (unlikely(pmd_trans_huge(dst_pmdval))) { 470c1a4de99SAndrea Arcangeli err = -EEXIST; 471c1a4de99SAndrea Arcangeli break; 472c1a4de99SAndrea Arcangeli } 473c1a4de99SAndrea Arcangeli if (unlikely(pmd_none(dst_pmdval)) && 4743ed3a4f0SKirill A. Shutemov unlikely(__pte_alloc(dst_mm, dst_pmd, dst_addr))) { 475c1a4de99SAndrea Arcangeli err = -ENOMEM; 476c1a4de99SAndrea Arcangeli break; 477c1a4de99SAndrea Arcangeli } 478c1a4de99SAndrea Arcangeli /* If an huge pmd materialized from under us fail */ 479c1a4de99SAndrea Arcangeli if (unlikely(pmd_trans_huge(*dst_pmd))) { 480c1a4de99SAndrea Arcangeli err = -EFAULT; 481c1a4de99SAndrea Arcangeli break; 482c1a4de99SAndrea Arcangeli } 483c1a4de99SAndrea Arcangeli 484c1a4de99SAndrea Arcangeli BUG_ON(pmd_none(*dst_pmd)); 485c1a4de99SAndrea Arcangeli BUG_ON(pmd_trans_huge(*dst_pmd)); 486c1a4de99SAndrea Arcangeli 48726071cedSMike Rapoport if (vma_is_anonymous(dst_vma)) { 488c1a4de99SAndrea Arcangeli if (!zeropage) 489c1a4de99SAndrea Arcangeli err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma, 49026071cedSMike Rapoport dst_addr, src_addr, 49126071cedSMike Rapoport &page); 492c1a4de99SAndrea Arcangeli else 49326071cedSMike Rapoport err = mfill_zeropage_pte(dst_mm, dst_pmd, 49426071cedSMike Rapoport dst_vma, dst_addr); 49526071cedSMike Rapoport } else { 49626071cedSMike Rapoport err = -EINVAL; /* if zeropage is true return -EINVAL */ 49726071cedSMike Rapoport if (likely(!zeropage)) 49826071cedSMike Rapoport err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd, 49926071cedSMike Rapoport dst_vma, dst_addr, 50026071cedSMike Rapoport src_addr, &page); 50126071cedSMike Rapoport } 502c1a4de99SAndrea Arcangeli 503c1a4de99SAndrea Arcangeli cond_resched(); 504c1a4de99SAndrea Arcangeli 505b6ebaedbSAndrea Arcangeli if (unlikely(err == -EFAULT)) { 506b6ebaedbSAndrea Arcangeli void *page_kaddr; 507b6ebaedbSAndrea Arcangeli 508b6ebaedbSAndrea Arcangeli up_read(&dst_mm->mmap_sem); 509b6ebaedbSAndrea Arcangeli BUG_ON(!page); 510b6ebaedbSAndrea Arcangeli 511b6ebaedbSAndrea Arcangeli page_kaddr = kmap(page); 512b6ebaedbSAndrea Arcangeli err = copy_from_user(page_kaddr, 513b6ebaedbSAndrea Arcangeli (const void __user *) src_addr, 514b6ebaedbSAndrea Arcangeli PAGE_SIZE); 515b6ebaedbSAndrea Arcangeli kunmap(page); 516b6ebaedbSAndrea Arcangeli if (unlikely(err)) { 517b6ebaedbSAndrea Arcangeli err = -EFAULT; 518b6ebaedbSAndrea Arcangeli goto out; 519b6ebaedbSAndrea Arcangeli } 520b6ebaedbSAndrea Arcangeli goto retry; 521b6ebaedbSAndrea Arcangeli } else 522b6ebaedbSAndrea Arcangeli BUG_ON(page); 523b6ebaedbSAndrea Arcangeli 524c1a4de99SAndrea Arcangeli if (!err) { 525c1a4de99SAndrea Arcangeli dst_addr += PAGE_SIZE; 526c1a4de99SAndrea Arcangeli src_addr += PAGE_SIZE; 527c1a4de99SAndrea Arcangeli copied += PAGE_SIZE; 528c1a4de99SAndrea Arcangeli 529c1a4de99SAndrea Arcangeli if (fatal_signal_pending(current)) 530c1a4de99SAndrea Arcangeli err = -EINTR; 531c1a4de99SAndrea Arcangeli } 532c1a4de99SAndrea Arcangeli if (err) 533c1a4de99SAndrea Arcangeli break; 534c1a4de99SAndrea Arcangeli } 535c1a4de99SAndrea Arcangeli 536b6ebaedbSAndrea Arcangeli out_unlock: 537c1a4de99SAndrea Arcangeli up_read(&dst_mm->mmap_sem); 538b6ebaedbSAndrea Arcangeli out: 539b6ebaedbSAndrea Arcangeli if (page) 54009cbfeafSKirill A. Shutemov put_page(page); 541c1a4de99SAndrea Arcangeli BUG_ON(copied < 0); 542c1a4de99SAndrea Arcangeli BUG_ON(err > 0); 543c1a4de99SAndrea Arcangeli BUG_ON(!copied && !err); 544c1a4de99SAndrea Arcangeli return copied ? copied : err; 545c1a4de99SAndrea Arcangeli } 546c1a4de99SAndrea Arcangeli 547c1a4de99SAndrea Arcangeli ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, 548c1a4de99SAndrea Arcangeli unsigned long src_start, unsigned long len) 549c1a4de99SAndrea Arcangeli { 550c1a4de99SAndrea Arcangeli return __mcopy_atomic(dst_mm, dst_start, src_start, len, false); 551c1a4de99SAndrea Arcangeli } 552c1a4de99SAndrea Arcangeli 553c1a4de99SAndrea Arcangeli ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start, 554c1a4de99SAndrea Arcangeli unsigned long len) 555c1a4de99SAndrea Arcangeli { 556c1a4de99SAndrea Arcangeli return __mcopy_atomic(dst_mm, start, 0, len, true); 557c1a4de99SAndrea Arcangeli } 558