1c1a4de99SAndrea Arcangeli /* 2c1a4de99SAndrea Arcangeli * mm/userfaultfd.c 3c1a4de99SAndrea Arcangeli * 4c1a4de99SAndrea Arcangeli * Copyright (C) 2015 Red Hat, Inc. 5c1a4de99SAndrea Arcangeli * 6c1a4de99SAndrea Arcangeli * This work is licensed under the terms of the GNU GPL, version 2. See 7c1a4de99SAndrea Arcangeli * the COPYING file in the top-level directory. 8c1a4de99SAndrea Arcangeli */ 9c1a4de99SAndrea Arcangeli 10c1a4de99SAndrea Arcangeli #include <linux/mm.h> 11174cd4b1SIngo Molnar #include <linux/sched/signal.h> 12c1a4de99SAndrea Arcangeli #include <linux/pagemap.h> 13c1a4de99SAndrea Arcangeli #include <linux/rmap.h> 14c1a4de99SAndrea Arcangeli #include <linux/swap.h> 15c1a4de99SAndrea Arcangeli #include <linux/swapops.h> 16c1a4de99SAndrea Arcangeli #include <linux/userfaultfd_k.h> 17c1a4de99SAndrea Arcangeli #include <linux/mmu_notifier.h> 1860d4d2d2SMike Kravetz #include <linux/hugetlb.h> 1926071cedSMike Rapoport #include <linux/shmem_fs.h> 20c1a4de99SAndrea Arcangeli #include <asm/tlbflush.h> 21c1a4de99SAndrea Arcangeli #include "internal.h" 22c1a4de99SAndrea Arcangeli 23c1a4de99SAndrea Arcangeli static int mcopy_atomic_pte(struct mm_struct *dst_mm, 24c1a4de99SAndrea Arcangeli pmd_t *dst_pmd, 25c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma, 26c1a4de99SAndrea Arcangeli unsigned long dst_addr, 27b6ebaedbSAndrea Arcangeli unsigned long src_addr, 28b6ebaedbSAndrea Arcangeli struct page **pagep) 29c1a4de99SAndrea Arcangeli { 30c1a4de99SAndrea Arcangeli struct mem_cgroup *memcg; 31c1a4de99SAndrea Arcangeli pte_t _dst_pte, *dst_pte; 32c1a4de99SAndrea Arcangeli spinlock_t *ptl; 33c1a4de99SAndrea Arcangeli void *page_kaddr; 34c1a4de99SAndrea Arcangeli int ret; 35b6ebaedbSAndrea Arcangeli struct page *page; 36c1a4de99SAndrea Arcangeli 37b6ebaedbSAndrea Arcangeli if (!*pagep) { 38c1a4de99SAndrea Arcangeli ret = -ENOMEM; 39c1a4de99SAndrea Arcangeli page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr); 40c1a4de99SAndrea Arcangeli if (!page) 41c1a4de99SAndrea Arcangeli goto out; 42c1a4de99SAndrea Arcangeli 43b6ebaedbSAndrea Arcangeli page_kaddr = kmap_atomic(page); 44b6ebaedbSAndrea Arcangeli ret = copy_from_user(page_kaddr, 45b6ebaedbSAndrea Arcangeli (const void __user *) src_addr, 46b6ebaedbSAndrea Arcangeli PAGE_SIZE); 47b6ebaedbSAndrea Arcangeli kunmap_atomic(page_kaddr); 48b6ebaedbSAndrea Arcangeli 49b6ebaedbSAndrea Arcangeli /* fallback to copy_from_user outside mmap_sem */ 50b6ebaedbSAndrea Arcangeli if (unlikely(ret)) { 51c1a4de99SAndrea Arcangeli ret = -EFAULT; 52b6ebaedbSAndrea Arcangeli *pagep = page; 53b6ebaedbSAndrea Arcangeli /* don't free the page */ 54b6ebaedbSAndrea Arcangeli goto out; 55b6ebaedbSAndrea Arcangeli } 56b6ebaedbSAndrea Arcangeli } else { 57b6ebaedbSAndrea Arcangeli page = *pagep; 58b6ebaedbSAndrea Arcangeli *pagep = NULL; 59b6ebaedbSAndrea Arcangeli } 60c1a4de99SAndrea Arcangeli 61c1a4de99SAndrea Arcangeli /* 62c1a4de99SAndrea Arcangeli * The memory barrier inside __SetPageUptodate makes sure that 63c1a4de99SAndrea Arcangeli * preceeding stores to the page contents become visible before 64c1a4de99SAndrea Arcangeli * the set_pte_at() write. 65c1a4de99SAndrea Arcangeli */ 66c1a4de99SAndrea Arcangeli __SetPageUptodate(page); 67c1a4de99SAndrea Arcangeli 68c1a4de99SAndrea Arcangeli ret = -ENOMEM; 69f627c2f5SKirill A. Shutemov if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg, false)) 70c1a4de99SAndrea Arcangeli goto out_release; 71c1a4de99SAndrea Arcangeli 72c1a4de99SAndrea Arcangeli _dst_pte = mk_pte(page, dst_vma->vm_page_prot); 73c1a4de99SAndrea Arcangeli if (dst_vma->vm_flags & VM_WRITE) 74c1a4de99SAndrea Arcangeli _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte)); 75c1a4de99SAndrea Arcangeli 76c1a4de99SAndrea Arcangeli ret = -EEXIST; 77c1a4de99SAndrea Arcangeli dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 78c1a4de99SAndrea Arcangeli if (!pte_none(*dst_pte)) 79c1a4de99SAndrea Arcangeli goto out_release_uncharge_unlock; 80c1a4de99SAndrea Arcangeli 81c1a4de99SAndrea Arcangeli inc_mm_counter(dst_mm, MM_ANONPAGES); 82d281ee61SKirill A. Shutemov page_add_new_anon_rmap(page, dst_vma, dst_addr, false); 83f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, false, false); 84c1a4de99SAndrea Arcangeli lru_cache_add_active_or_unevictable(page, dst_vma); 85c1a4de99SAndrea Arcangeli 86c1a4de99SAndrea Arcangeli set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 87c1a4de99SAndrea Arcangeli 88c1a4de99SAndrea Arcangeli /* No need to invalidate - it was non-present before */ 89c1a4de99SAndrea Arcangeli update_mmu_cache(dst_vma, dst_addr, dst_pte); 90c1a4de99SAndrea Arcangeli 91c1a4de99SAndrea Arcangeli pte_unmap_unlock(dst_pte, ptl); 92c1a4de99SAndrea Arcangeli ret = 0; 93c1a4de99SAndrea Arcangeli out: 94c1a4de99SAndrea Arcangeli return ret; 95c1a4de99SAndrea Arcangeli out_release_uncharge_unlock: 96c1a4de99SAndrea Arcangeli pte_unmap_unlock(dst_pte, ptl); 97f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, false); 98c1a4de99SAndrea Arcangeli out_release: 9909cbfeafSKirill A. Shutemov put_page(page); 100c1a4de99SAndrea Arcangeli goto out; 101c1a4de99SAndrea Arcangeli } 102c1a4de99SAndrea Arcangeli 103c1a4de99SAndrea Arcangeli static int mfill_zeropage_pte(struct mm_struct *dst_mm, 104c1a4de99SAndrea Arcangeli pmd_t *dst_pmd, 105c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma, 106c1a4de99SAndrea Arcangeli unsigned long dst_addr) 107c1a4de99SAndrea Arcangeli { 108c1a4de99SAndrea Arcangeli pte_t _dst_pte, *dst_pte; 109c1a4de99SAndrea Arcangeli spinlock_t *ptl; 110c1a4de99SAndrea Arcangeli int ret; 111c1a4de99SAndrea Arcangeli 112c1a4de99SAndrea Arcangeli _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), 113c1a4de99SAndrea Arcangeli dst_vma->vm_page_prot)); 114c1a4de99SAndrea Arcangeli ret = -EEXIST; 115c1a4de99SAndrea Arcangeli dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 116c1a4de99SAndrea Arcangeli if (!pte_none(*dst_pte)) 117c1a4de99SAndrea Arcangeli goto out_unlock; 118c1a4de99SAndrea Arcangeli set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 119c1a4de99SAndrea Arcangeli /* No need to invalidate - it was non-present before */ 120c1a4de99SAndrea Arcangeli update_mmu_cache(dst_vma, dst_addr, dst_pte); 121c1a4de99SAndrea Arcangeli ret = 0; 122c1a4de99SAndrea Arcangeli out_unlock: 123c1a4de99SAndrea Arcangeli pte_unmap_unlock(dst_pte, ptl); 124c1a4de99SAndrea Arcangeli return ret; 125c1a4de99SAndrea Arcangeli } 126c1a4de99SAndrea Arcangeli 127c1a4de99SAndrea Arcangeli static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) 128c1a4de99SAndrea Arcangeli { 129c1a4de99SAndrea Arcangeli pgd_t *pgd; 130c2febafcSKirill A. Shutemov p4d_t *p4d; 131c1a4de99SAndrea Arcangeli pud_t *pud; 132c1a4de99SAndrea Arcangeli 133c1a4de99SAndrea Arcangeli pgd = pgd_offset(mm, address); 134c2febafcSKirill A. Shutemov p4d = p4d_alloc(mm, pgd, address); 135c2febafcSKirill A. Shutemov if (!p4d) 136c2febafcSKirill A. Shutemov return NULL; 137c2febafcSKirill A. Shutemov pud = pud_alloc(mm, p4d, address); 138c2febafcSKirill A. Shutemov if (!pud) 139c2febafcSKirill A. Shutemov return NULL; 140c1a4de99SAndrea Arcangeli /* 141c1a4de99SAndrea Arcangeli * Note that we didn't run this because the pmd was 142c1a4de99SAndrea Arcangeli * missing, the *pmd may be already established and in 143c1a4de99SAndrea Arcangeli * turn it may also be a trans_huge_pmd. 144c1a4de99SAndrea Arcangeli */ 145c2febafcSKirill A. Shutemov return pmd_alloc(mm, pud, address); 146c1a4de99SAndrea Arcangeli } 147c1a4de99SAndrea Arcangeli 14860d4d2d2SMike Kravetz #ifdef CONFIG_HUGETLB_PAGE 14960d4d2d2SMike Kravetz /* 15060d4d2d2SMike Kravetz * __mcopy_atomic processing for HUGETLB vmas. Note that this routine is 15160d4d2d2SMike Kravetz * called with mmap_sem held, it will release mmap_sem before returning. 15260d4d2d2SMike Kravetz */ 15360d4d2d2SMike Kravetz static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, 15460d4d2d2SMike Kravetz struct vm_area_struct *dst_vma, 15560d4d2d2SMike Kravetz unsigned long dst_start, 15660d4d2d2SMike Kravetz unsigned long src_start, 15760d4d2d2SMike Kravetz unsigned long len, 15860d4d2d2SMike Kravetz bool zeropage) 15960d4d2d2SMike Kravetz { 1601c9e8defSMike Kravetz int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED; 1611c9e8defSMike Kravetz int vm_shared = dst_vma->vm_flags & VM_SHARED; 16260d4d2d2SMike Kravetz ssize_t err; 16360d4d2d2SMike Kravetz pte_t *dst_pte; 16460d4d2d2SMike Kravetz unsigned long src_addr, dst_addr; 16560d4d2d2SMike Kravetz long copied; 16660d4d2d2SMike Kravetz struct page *page; 16760d4d2d2SMike Kravetz struct hstate *h; 16860d4d2d2SMike Kravetz unsigned long vma_hpagesize; 16960d4d2d2SMike Kravetz pgoff_t idx; 17060d4d2d2SMike Kravetz u32 hash; 17160d4d2d2SMike Kravetz struct address_space *mapping; 17260d4d2d2SMike Kravetz 17360d4d2d2SMike Kravetz /* 17460d4d2d2SMike Kravetz * There is no default zero huge page for all huge page sizes as 17560d4d2d2SMike Kravetz * supported by hugetlb. A PMD_SIZE huge pages may exist as used 17660d4d2d2SMike Kravetz * by THP. Since we can not reliably insert a zero page, this 17760d4d2d2SMike Kravetz * feature is not supported. 17860d4d2d2SMike Kravetz */ 17960d4d2d2SMike Kravetz if (zeropage) { 18060d4d2d2SMike Kravetz up_read(&dst_mm->mmap_sem); 18160d4d2d2SMike Kravetz return -EINVAL; 18260d4d2d2SMike Kravetz } 18360d4d2d2SMike Kravetz 18460d4d2d2SMike Kravetz src_addr = src_start; 18560d4d2d2SMike Kravetz dst_addr = dst_start; 18660d4d2d2SMike Kravetz copied = 0; 18760d4d2d2SMike Kravetz page = NULL; 18860d4d2d2SMike Kravetz vma_hpagesize = vma_kernel_pagesize(dst_vma); 18960d4d2d2SMike Kravetz 19060d4d2d2SMike Kravetz /* 19160d4d2d2SMike Kravetz * Validate alignment based on huge page size 19260d4d2d2SMike Kravetz */ 19360d4d2d2SMike Kravetz err = -EINVAL; 19460d4d2d2SMike Kravetz if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1)) 19560d4d2d2SMike Kravetz goto out_unlock; 19660d4d2d2SMike Kravetz 19760d4d2d2SMike Kravetz retry: 19860d4d2d2SMike Kravetz /* 19960d4d2d2SMike Kravetz * On routine entry dst_vma is set. If we had to drop mmap_sem and 20060d4d2d2SMike Kravetz * retry, dst_vma will be set to NULL and we must lookup again. 20160d4d2d2SMike Kravetz */ 20260d4d2d2SMike Kravetz if (!dst_vma) { 20327d02568SMike Rapoport err = -ENOENT; 20460d4d2d2SMike Kravetz dst_vma = find_vma(dst_mm, dst_start); 20560d4d2d2SMike Kravetz if (!dst_vma || !is_vm_hugetlb_page(dst_vma)) 20660d4d2d2SMike Kravetz goto out_unlock; 20727d02568SMike Rapoport /* 20827d02568SMike Rapoport * Only allow __mcopy_atomic_hugetlb on userfaultfd 20927d02568SMike Rapoport * registered ranges. 21027d02568SMike Rapoport */ 21127d02568SMike Rapoport if (!dst_vma->vm_userfaultfd_ctx.ctx) 21260d4d2d2SMike Kravetz goto out_unlock; 21360d4d2d2SMike Kravetz 21460d4d2d2SMike Kravetz if (dst_start < dst_vma->vm_start || 21560d4d2d2SMike Kravetz dst_start + len > dst_vma->vm_end) 21660d4d2d2SMike Kravetz goto out_unlock; 2171c9e8defSMike Kravetz 21827d02568SMike Rapoport err = -EINVAL; 21927d02568SMike Rapoport if (vma_hpagesize != vma_kernel_pagesize(dst_vma)) 22027d02568SMike Rapoport goto out_unlock; 22127d02568SMike Rapoport 2221c9e8defSMike Kravetz vm_shared = dst_vma->vm_flags & VM_SHARED; 22360d4d2d2SMike Kravetz } 22460d4d2d2SMike Kravetz 22560d4d2d2SMike Kravetz if (WARN_ON(dst_addr & (vma_hpagesize - 1) || 22660d4d2d2SMike Kravetz (len - copied) & (vma_hpagesize - 1))) 22760d4d2d2SMike Kravetz goto out_unlock; 22860d4d2d2SMike Kravetz 22960d4d2d2SMike Kravetz /* 2301c9e8defSMike Kravetz * If not shared, ensure the dst_vma has a anon_vma. 23160d4d2d2SMike Kravetz */ 23260d4d2d2SMike Kravetz err = -ENOMEM; 2331c9e8defSMike Kravetz if (!vm_shared) { 23460d4d2d2SMike Kravetz if (unlikely(anon_vma_prepare(dst_vma))) 23560d4d2d2SMike Kravetz goto out_unlock; 2361c9e8defSMike Kravetz } 23760d4d2d2SMike Kravetz 23860d4d2d2SMike Kravetz h = hstate_vma(dst_vma); 23960d4d2d2SMike Kravetz 24060d4d2d2SMike Kravetz while (src_addr < src_start + len) { 24160d4d2d2SMike Kravetz pte_t dst_pteval; 24260d4d2d2SMike Kravetz 24360d4d2d2SMike Kravetz BUG_ON(dst_addr >= dst_start + len); 24460d4d2d2SMike Kravetz VM_BUG_ON(dst_addr & ~huge_page_mask(h)); 24560d4d2d2SMike Kravetz 24660d4d2d2SMike Kravetz /* 24760d4d2d2SMike Kravetz * Serialize via hugetlb_fault_mutex 24860d4d2d2SMike Kravetz */ 24960d4d2d2SMike Kravetz idx = linear_page_index(dst_vma, dst_addr); 25060d4d2d2SMike Kravetz mapping = dst_vma->vm_file->f_mapping; 25160d4d2d2SMike Kravetz hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping, 25260d4d2d2SMike Kravetz idx, dst_addr); 25360d4d2d2SMike Kravetz mutex_lock(&hugetlb_fault_mutex_table[hash]); 25460d4d2d2SMike Kravetz 25560d4d2d2SMike Kravetz err = -ENOMEM; 25660d4d2d2SMike Kravetz dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h)); 25760d4d2d2SMike Kravetz if (!dst_pte) { 25860d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 25960d4d2d2SMike Kravetz goto out_unlock; 26060d4d2d2SMike Kravetz } 26160d4d2d2SMike Kravetz 26260d4d2d2SMike Kravetz err = -EEXIST; 26360d4d2d2SMike Kravetz dst_pteval = huge_ptep_get(dst_pte); 26460d4d2d2SMike Kravetz if (!huge_pte_none(dst_pteval)) { 26560d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 26660d4d2d2SMike Kravetz goto out_unlock; 26760d4d2d2SMike Kravetz } 26860d4d2d2SMike Kravetz 26960d4d2d2SMike Kravetz err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, 27060d4d2d2SMike Kravetz dst_addr, src_addr, &page); 27160d4d2d2SMike Kravetz 27260d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 2731c9e8defSMike Kravetz vm_alloc_shared = vm_shared; 27460d4d2d2SMike Kravetz 27560d4d2d2SMike Kravetz cond_resched(); 27660d4d2d2SMike Kravetz 27760d4d2d2SMike Kravetz if (unlikely(err == -EFAULT)) { 27860d4d2d2SMike Kravetz up_read(&dst_mm->mmap_sem); 27960d4d2d2SMike Kravetz BUG_ON(!page); 28060d4d2d2SMike Kravetz 28160d4d2d2SMike Kravetz err = copy_huge_page_from_user(page, 28260d4d2d2SMike Kravetz (const void __user *)src_addr, 283810a56b9SMike Kravetz pages_per_huge_page(h), true); 28460d4d2d2SMike Kravetz if (unlikely(err)) { 28560d4d2d2SMike Kravetz err = -EFAULT; 28660d4d2d2SMike Kravetz goto out; 28760d4d2d2SMike Kravetz } 28860d4d2d2SMike Kravetz down_read(&dst_mm->mmap_sem); 28960d4d2d2SMike Kravetz 29060d4d2d2SMike Kravetz dst_vma = NULL; 29160d4d2d2SMike Kravetz goto retry; 29260d4d2d2SMike Kravetz } else 29360d4d2d2SMike Kravetz BUG_ON(page); 29460d4d2d2SMike Kravetz 29560d4d2d2SMike Kravetz if (!err) { 29660d4d2d2SMike Kravetz dst_addr += vma_hpagesize; 29760d4d2d2SMike Kravetz src_addr += vma_hpagesize; 29860d4d2d2SMike Kravetz copied += vma_hpagesize; 29960d4d2d2SMike Kravetz 30060d4d2d2SMike Kravetz if (fatal_signal_pending(current)) 30160d4d2d2SMike Kravetz err = -EINTR; 30260d4d2d2SMike Kravetz } 30360d4d2d2SMike Kravetz if (err) 30460d4d2d2SMike Kravetz break; 30560d4d2d2SMike Kravetz } 30660d4d2d2SMike Kravetz 30760d4d2d2SMike Kravetz out_unlock: 30860d4d2d2SMike Kravetz up_read(&dst_mm->mmap_sem); 30960d4d2d2SMike Kravetz out: 31021205bf8SMike Kravetz if (page) { 31121205bf8SMike Kravetz /* 31221205bf8SMike Kravetz * We encountered an error and are about to free a newly 3131c9e8defSMike Kravetz * allocated huge page. 3141c9e8defSMike Kravetz * 3151c9e8defSMike Kravetz * Reservation handling is very subtle, and is different for 3161c9e8defSMike Kravetz * private and shared mappings. See the routine 3171c9e8defSMike Kravetz * restore_reserve_on_error for details. Unfortunately, we 3181c9e8defSMike Kravetz * can not call restore_reserve_on_error now as it would 3191c9e8defSMike Kravetz * require holding mmap_sem. 3201c9e8defSMike Kravetz * 3211c9e8defSMike Kravetz * If a reservation for the page existed in the reservation 3221c9e8defSMike Kravetz * map of a private mapping, the map was modified to indicate 3231c9e8defSMike Kravetz * the reservation was consumed when the page was allocated. 3241c9e8defSMike Kravetz * We clear the PagePrivate flag now so that the global 32521205bf8SMike Kravetz * reserve count will not be incremented in free_huge_page. 32621205bf8SMike Kravetz * The reservation map will still indicate the reservation 32721205bf8SMike Kravetz * was consumed and possibly prevent later page allocation. 3281c9e8defSMike Kravetz * This is better than leaking a global reservation. If no 3291c9e8defSMike Kravetz * reservation existed, it is still safe to clear PagePrivate 3301c9e8defSMike Kravetz * as no adjustments to reservation counts were made during 3311c9e8defSMike Kravetz * allocation. 3321c9e8defSMike Kravetz * 3331c9e8defSMike Kravetz * The reservation map for shared mappings indicates which 3341c9e8defSMike Kravetz * pages have reservations. When a huge page is allocated 3351c9e8defSMike Kravetz * for an address with a reservation, no change is made to 3361c9e8defSMike Kravetz * the reserve map. In this case PagePrivate will be set 3371c9e8defSMike Kravetz * to indicate that the global reservation count should be 3381c9e8defSMike Kravetz * incremented when the page is freed. This is the desired 3391c9e8defSMike Kravetz * behavior. However, when a huge page is allocated for an 3401c9e8defSMike Kravetz * address without a reservation a reservation entry is added 3411c9e8defSMike Kravetz * to the reservation map, and PagePrivate will not be set. 3421c9e8defSMike Kravetz * When the page is freed, the global reserve count will NOT 3431c9e8defSMike Kravetz * be incremented and it will appear as though we have leaked 3441c9e8defSMike Kravetz * reserved page. In this case, set PagePrivate so that the 3451c9e8defSMike Kravetz * global reserve count will be incremented to match the 3461c9e8defSMike Kravetz * reservation map entry which was created. 3471c9e8defSMike Kravetz * 3481c9e8defSMike Kravetz * Note that vm_alloc_shared is based on the flags of the vma 3491c9e8defSMike Kravetz * for which the page was originally allocated. dst_vma could 3501c9e8defSMike Kravetz * be different or NULL on error. 35121205bf8SMike Kravetz */ 3521c9e8defSMike Kravetz if (vm_alloc_shared) 3531c9e8defSMike Kravetz SetPagePrivate(page); 3541c9e8defSMike Kravetz else 35521205bf8SMike Kravetz ClearPagePrivate(page); 35660d4d2d2SMike Kravetz put_page(page); 35721205bf8SMike Kravetz } 35860d4d2d2SMike Kravetz BUG_ON(copied < 0); 35960d4d2d2SMike Kravetz BUG_ON(err > 0); 36060d4d2d2SMike Kravetz BUG_ON(!copied && !err); 36160d4d2d2SMike Kravetz return copied ? copied : err; 36260d4d2d2SMike Kravetz } 36360d4d2d2SMike Kravetz #else /* !CONFIG_HUGETLB_PAGE */ 36460d4d2d2SMike Kravetz /* fail at build time if gcc attempts to use this */ 36560d4d2d2SMike Kravetz extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, 36660d4d2d2SMike Kravetz struct vm_area_struct *dst_vma, 36760d4d2d2SMike Kravetz unsigned long dst_start, 36860d4d2d2SMike Kravetz unsigned long src_start, 36960d4d2d2SMike Kravetz unsigned long len, 37060d4d2d2SMike Kravetz bool zeropage); 37160d4d2d2SMike Kravetz #endif /* CONFIG_HUGETLB_PAGE */ 37260d4d2d2SMike Kravetz 3733217d3c7SMike Rapoport static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm, 3743217d3c7SMike Rapoport pmd_t *dst_pmd, 3753217d3c7SMike Rapoport struct vm_area_struct *dst_vma, 3763217d3c7SMike Rapoport unsigned long dst_addr, 3773217d3c7SMike Rapoport unsigned long src_addr, 3783217d3c7SMike Rapoport struct page **page, 3793217d3c7SMike Rapoport bool zeropage) 3803217d3c7SMike Rapoport { 3813217d3c7SMike Rapoport ssize_t err; 3823217d3c7SMike Rapoport 3833217d3c7SMike Rapoport if (vma_is_anonymous(dst_vma)) { 3843217d3c7SMike Rapoport if (!zeropage) 3853217d3c7SMike Rapoport err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma, 3863217d3c7SMike Rapoport dst_addr, src_addr, page); 3873217d3c7SMike Rapoport else 3883217d3c7SMike Rapoport err = mfill_zeropage_pte(dst_mm, dst_pmd, 3893217d3c7SMike Rapoport dst_vma, dst_addr); 3903217d3c7SMike Rapoport } else { 3918fb44e54SMike Rapoport if (!zeropage) 3923217d3c7SMike Rapoport err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd, 3933217d3c7SMike Rapoport dst_vma, dst_addr, 3943217d3c7SMike Rapoport src_addr, page); 3958fb44e54SMike Rapoport else 3968fb44e54SMike Rapoport err = shmem_mfill_zeropage_pte(dst_mm, dst_pmd, 3978fb44e54SMike Rapoport dst_vma, dst_addr); 3983217d3c7SMike Rapoport } 3993217d3c7SMike Rapoport 4003217d3c7SMike Rapoport return err; 4013217d3c7SMike Rapoport } 4023217d3c7SMike Rapoport 403c1a4de99SAndrea Arcangeli static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, 404c1a4de99SAndrea Arcangeli unsigned long dst_start, 405c1a4de99SAndrea Arcangeli unsigned long src_start, 406c1a4de99SAndrea Arcangeli unsigned long len, 407*df2cc96eSMike Rapoport bool zeropage, 408*df2cc96eSMike Rapoport bool *mmap_changing) 409c1a4de99SAndrea Arcangeli { 410c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma; 411c1a4de99SAndrea Arcangeli ssize_t err; 412c1a4de99SAndrea Arcangeli pmd_t *dst_pmd; 413c1a4de99SAndrea Arcangeli unsigned long src_addr, dst_addr; 414b6ebaedbSAndrea Arcangeli long copied; 415b6ebaedbSAndrea Arcangeli struct page *page; 416c1a4de99SAndrea Arcangeli 417c1a4de99SAndrea Arcangeli /* 418c1a4de99SAndrea Arcangeli * Sanitize the command parameters: 419c1a4de99SAndrea Arcangeli */ 420c1a4de99SAndrea Arcangeli BUG_ON(dst_start & ~PAGE_MASK); 421c1a4de99SAndrea Arcangeli BUG_ON(len & ~PAGE_MASK); 422c1a4de99SAndrea Arcangeli 423c1a4de99SAndrea Arcangeli /* Does the address range wrap, or is the span zero-sized? */ 424c1a4de99SAndrea Arcangeli BUG_ON(src_start + len <= src_start); 425c1a4de99SAndrea Arcangeli BUG_ON(dst_start + len <= dst_start); 426c1a4de99SAndrea Arcangeli 427b6ebaedbSAndrea Arcangeli src_addr = src_start; 428b6ebaedbSAndrea Arcangeli dst_addr = dst_start; 429b6ebaedbSAndrea Arcangeli copied = 0; 430b6ebaedbSAndrea Arcangeli page = NULL; 431b6ebaedbSAndrea Arcangeli retry: 432c1a4de99SAndrea Arcangeli down_read(&dst_mm->mmap_sem); 433c1a4de99SAndrea Arcangeli 434c1a4de99SAndrea Arcangeli /* 435*df2cc96eSMike Rapoport * If memory mappings are changing because of non-cooperative 436*df2cc96eSMike Rapoport * operation (e.g. mremap) running in parallel, bail out and 437*df2cc96eSMike Rapoport * request the user to retry later 438*df2cc96eSMike Rapoport */ 439*df2cc96eSMike Rapoport err = -EAGAIN; 440*df2cc96eSMike Rapoport if (mmap_changing && READ_ONCE(*mmap_changing)) 441*df2cc96eSMike Rapoport goto out_unlock; 442*df2cc96eSMike Rapoport 443*df2cc96eSMike Rapoport /* 444c1a4de99SAndrea Arcangeli * Make sure the vma is not shared, that the dst range is 445c1a4de99SAndrea Arcangeli * both valid and fully within a single existing vma. 446c1a4de99SAndrea Arcangeli */ 44727d02568SMike Rapoport err = -ENOENT; 448c1a4de99SAndrea Arcangeli dst_vma = find_vma(dst_mm, dst_start); 44926071cedSMike Rapoport if (!dst_vma) 45026071cedSMike Rapoport goto out_unlock; 4511c9e8defSMike Kravetz /* 452c1a4de99SAndrea Arcangeli * Be strict and only allow __mcopy_atomic on userfaultfd 453c1a4de99SAndrea Arcangeli * registered ranges to prevent userland errors going 454c1a4de99SAndrea Arcangeli * unnoticed. As far as the VM consistency is concerned, it 455c1a4de99SAndrea Arcangeli * would be perfectly safe to remove this check, but there's 456c1a4de99SAndrea Arcangeli * no useful usage for __mcopy_atomic ouside of userfaultfd 457c1a4de99SAndrea Arcangeli * registered ranges. This is after all why these are ioctls 458c1a4de99SAndrea Arcangeli * belonging to the userfaultfd and not syscalls. 459c1a4de99SAndrea Arcangeli */ 460c1a4de99SAndrea Arcangeli if (!dst_vma->vm_userfaultfd_ctx.ctx) 461b6ebaedbSAndrea Arcangeli goto out_unlock; 462c1a4de99SAndrea Arcangeli 46327d02568SMike Rapoport if (dst_start < dst_vma->vm_start || 46427d02568SMike Rapoport dst_start + len > dst_vma->vm_end) 46527d02568SMike Rapoport goto out_unlock; 46627d02568SMike Rapoport 46727d02568SMike Rapoport err = -EINVAL; 46827d02568SMike Rapoport /* 46927d02568SMike Rapoport * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but 47027d02568SMike Rapoport * it will overwrite vm_ops, so vma_is_anonymous must return false. 47127d02568SMike Rapoport */ 47227d02568SMike Rapoport if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) && 47327d02568SMike Rapoport dst_vma->vm_flags & VM_SHARED)) 47427d02568SMike Rapoport goto out_unlock; 47527d02568SMike Rapoport 47627d02568SMike Rapoport /* 47727d02568SMike Rapoport * If this is a HUGETLB vma, pass off to appropriate routine 47827d02568SMike Rapoport */ 47927d02568SMike Rapoport if (is_vm_hugetlb_page(dst_vma)) 48027d02568SMike Rapoport return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start, 48127d02568SMike Rapoport src_start, len, zeropage); 48227d02568SMike Rapoport 48326071cedSMike Rapoport if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) 484b6ebaedbSAndrea Arcangeli goto out_unlock; 485c1a4de99SAndrea Arcangeli 486c1a4de99SAndrea Arcangeli /* 487c1a4de99SAndrea Arcangeli * Ensure the dst_vma has a anon_vma or this page 488c1a4de99SAndrea Arcangeli * would get a NULL anon_vma when moved in the 489c1a4de99SAndrea Arcangeli * dst_vma. 490c1a4de99SAndrea Arcangeli */ 491c1a4de99SAndrea Arcangeli err = -ENOMEM; 49226071cedSMike Rapoport if (vma_is_anonymous(dst_vma) && unlikely(anon_vma_prepare(dst_vma))) 493b6ebaedbSAndrea Arcangeli goto out_unlock; 494c1a4de99SAndrea Arcangeli 495b6ebaedbSAndrea Arcangeli while (src_addr < src_start + len) { 496c1a4de99SAndrea Arcangeli pmd_t dst_pmdval; 497b6ebaedbSAndrea Arcangeli 498c1a4de99SAndrea Arcangeli BUG_ON(dst_addr >= dst_start + len); 499b6ebaedbSAndrea Arcangeli 500c1a4de99SAndrea Arcangeli dst_pmd = mm_alloc_pmd(dst_mm, dst_addr); 501c1a4de99SAndrea Arcangeli if (unlikely(!dst_pmd)) { 502c1a4de99SAndrea Arcangeli err = -ENOMEM; 503c1a4de99SAndrea Arcangeli break; 504c1a4de99SAndrea Arcangeli } 505c1a4de99SAndrea Arcangeli 506c1a4de99SAndrea Arcangeli dst_pmdval = pmd_read_atomic(dst_pmd); 507c1a4de99SAndrea Arcangeli /* 508c1a4de99SAndrea Arcangeli * If the dst_pmd is mapped as THP don't 509c1a4de99SAndrea Arcangeli * override it and just be strict. 510c1a4de99SAndrea Arcangeli */ 511c1a4de99SAndrea Arcangeli if (unlikely(pmd_trans_huge(dst_pmdval))) { 512c1a4de99SAndrea Arcangeli err = -EEXIST; 513c1a4de99SAndrea Arcangeli break; 514c1a4de99SAndrea Arcangeli } 515c1a4de99SAndrea Arcangeli if (unlikely(pmd_none(dst_pmdval)) && 5163ed3a4f0SKirill A. Shutemov unlikely(__pte_alloc(dst_mm, dst_pmd, dst_addr))) { 517c1a4de99SAndrea Arcangeli err = -ENOMEM; 518c1a4de99SAndrea Arcangeli break; 519c1a4de99SAndrea Arcangeli } 520c1a4de99SAndrea Arcangeli /* If an huge pmd materialized from under us fail */ 521c1a4de99SAndrea Arcangeli if (unlikely(pmd_trans_huge(*dst_pmd))) { 522c1a4de99SAndrea Arcangeli err = -EFAULT; 523c1a4de99SAndrea Arcangeli break; 524c1a4de99SAndrea Arcangeli } 525c1a4de99SAndrea Arcangeli 526c1a4de99SAndrea Arcangeli BUG_ON(pmd_none(*dst_pmd)); 527c1a4de99SAndrea Arcangeli BUG_ON(pmd_trans_huge(*dst_pmd)); 528c1a4de99SAndrea Arcangeli 5293217d3c7SMike Rapoport err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, 5303217d3c7SMike Rapoport src_addr, &page, zeropage); 531c1a4de99SAndrea Arcangeli cond_resched(); 532c1a4de99SAndrea Arcangeli 533b6ebaedbSAndrea Arcangeli if (unlikely(err == -EFAULT)) { 534b6ebaedbSAndrea Arcangeli void *page_kaddr; 535b6ebaedbSAndrea Arcangeli 536b6ebaedbSAndrea Arcangeli up_read(&dst_mm->mmap_sem); 537b6ebaedbSAndrea Arcangeli BUG_ON(!page); 538b6ebaedbSAndrea Arcangeli 539b6ebaedbSAndrea Arcangeli page_kaddr = kmap(page); 540b6ebaedbSAndrea Arcangeli err = copy_from_user(page_kaddr, 541b6ebaedbSAndrea Arcangeli (const void __user *) src_addr, 542b6ebaedbSAndrea Arcangeli PAGE_SIZE); 543b6ebaedbSAndrea Arcangeli kunmap(page); 544b6ebaedbSAndrea Arcangeli if (unlikely(err)) { 545b6ebaedbSAndrea Arcangeli err = -EFAULT; 546b6ebaedbSAndrea Arcangeli goto out; 547b6ebaedbSAndrea Arcangeli } 548b6ebaedbSAndrea Arcangeli goto retry; 549b6ebaedbSAndrea Arcangeli } else 550b6ebaedbSAndrea Arcangeli BUG_ON(page); 551b6ebaedbSAndrea Arcangeli 552c1a4de99SAndrea Arcangeli if (!err) { 553c1a4de99SAndrea Arcangeli dst_addr += PAGE_SIZE; 554c1a4de99SAndrea Arcangeli src_addr += PAGE_SIZE; 555c1a4de99SAndrea Arcangeli copied += PAGE_SIZE; 556c1a4de99SAndrea Arcangeli 557c1a4de99SAndrea Arcangeli if (fatal_signal_pending(current)) 558c1a4de99SAndrea Arcangeli err = -EINTR; 559c1a4de99SAndrea Arcangeli } 560c1a4de99SAndrea Arcangeli if (err) 561c1a4de99SAndrea Arcangeli break; 562c1a4de99SAndrea Arcangeli } 563c1a4de99SAndrea Arcangeli 564b6ebaedbSAndrea Arcangeli out_unlock: 565c1a4de99SAndrea Arcangeli up_read(&dst_mm->mmap_sem); 566b6ebaedbSAndrea Arcangeli out: 567b6ebaedbSAndrea Arcangeli if (page) 56809cbfeafSKirill A. Shutemov put_page(page); 569c1a4de99SAndrea Arcangeli BUG_ON(copied < 0); 570c1a4de99SAndrea Arcangeli BUG_ON(err > 0); 571c1a4de99SAndrea Arcangeli BUG_ON(!copied && !err); 572c1a4de99SAndrea Arcangeli return copied ? copied : err; 573c1a4de99SAndrea Arcangeli } 574c1a4de99SAndrea Arcangeli 575c1a4de99SAndrea Arcangeli ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, 576*df2cc96eSMike Rapoport unsigned long src_start, unsigned long len, 577*df2cc96eSMike Rapoport bool *mmap_changing) 578c1a4de99SAndrea Arcangeli { 579*df2cc96eSMike Rapoport return __mcopy_atomic(dst_mm, dst_start, src_start, len, false, 580*df2cc96eSMike Rapoport mmap_changing); 581c1a4de99SAndrea Arcangeli } 582c1a4de99SAndrea Arcangeli 583c1a4de99SAndrea Arcangeli ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start, 584*df2cc96eSMike Rapoport unsigned long len, bool *mmap_changing) 585c1a4de99SAndrea Arcangeli { 586*df2cc96eSMike Rapoport return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing); 587c1a4de99SAndrea Arcangeli } 588