120c8ccb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2c1a4de99SAndrea Arcangeli /* 3c1a4de99SAndrea Arcangeli * mm/userfaultfd.c 4c1a4de99SAndrea Arcangeli * 5c1a4de99SAndrea Arcangeli * Copyright (C) 2015 Red Hat, Inc. 6c1a4de99SAndrea Arcangeli */ 7c1a4de99SAndrea Arcangeli 8c1a4de99SAndrea Arcangeli #include <linux/mm.h> 9174cd4b1SIngo Molnar #include <linux/sched/signal.h> 10c1a4de99SAndrea Arcangeli #include <linux/pagemap.h> 11c1a4de99SAndrea Arcangeli #include <linux/rmap.h> 12c1a4de99SAndrea Arcangeli #include <linux/swap.h> 13c1a4de99SAndrea Arcangeli #include <linux/swapops.h> 14c1a4de99SAndrea Arcangeli #include <linux/userfaultfd_k.h> 15c1a4de99SAndrea Arcangeli #include <linux/mmu_notifier.h> 1660d4d2d2SMike Kravetz #include <linux/hugetlb.h> 1726071cedSMike Rapoport #include <linux/shmem_fs.h> 18c1a4de99SAndrea Arcangeli #include <asm/tlbflush.h> 194a18419fSNadav Amit #include <asm/tlb.h> 20c1a4de99SAndrea Arcangeli #include "internal.h" 21c1a4de99SAndrea Arcangeli 22643aa36eSWei Yang static __always_inline 23643aa36eSWei Yang struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm, 24643aa36eSWei Yang unsigned long dst_start, 25643aa36eSWei Yang unsigned long len) 26643aa36eSWei Yang { 27643aa36eSWei Yang /* 28643aa36eSWei Yang * Make sure that the dst range is both valid and fully within a 29643aa36eSWei Yang * single existing vma. 30643aa36eSWei Yang */ 31643aa36eSWei Yang struct vm_area_struct *dst_vma; 32643aa36eSWei Yang 33643aa36eSWei Yang dst_vma = find_vma(dst_mm, dst_start); 34686ea6e6SZhangPeng if (!range_in_vma(dst_vma, dst_start, dst_start + len)) 35643aa36eSWei Yang return NULL; 36643aa36eSWei Yang 37643aa36eSWei Yang /* 38643aa36eSWei Yang * Check the vma is registered in uffd, this is required to 39643aa36eSWei Yang * enforce the VM_MAYWRITE check done at uffd registration 40643aa36eSWei Yang * time. 41643aa36eSWei Yang */ 42643aa36eSWei Yang if (!dst_vma->vm_userfaultfd_ctx.ctx) 43643aa36eSWei Yang return NULL; 44643aa36eSWei Yang 45643aa36eSWei Yang return dst_vma; 46643aa36eSWei Yang } 47643aa36eSWei Yang 48435cdb41SAxel Rasmussen /* Check if dst_addr is outside of file's size. Must be called with ptl held. */ 49435cdb41SAxel Rasmussen static bool mfill_file_over_size(struct vm_area_struct *dst_vma, 50435cdb41SAxel Rasmussen unsigned long dst_addr) 51435cdb41SAxel Rasmussen { 52435cdb41SAxel Rasmussen struct inode *inode; 53435cdb41SAxel Rasmussen pgoff_t offset, max_off; 54435cdb41SAxel Rasmussen 55435cdb41SAxel Rasmussen if (!dst_vma->vm_file) 56435cdb41SAxel Rasmussen return false; 57435cdb41SAxel Rasmussen 58435cdb41SAxel Rasmussen inode = dst_vma->vm_file->f_inode; 59435cdb41SAxel Rasmussen offset = linear_page_index(dst_vma, dst_addr); 60435cdb41SAxel Rasmussen max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 61435cdb41SAxel Rasmussen return offset >= max_off; 62435cdb41SAxel Rasmussen } 63435cdb41SAxel Rasmussen 6415313257SAxel Rasmussen /* 6515313257SAxel Rasmussen * Install PTEs, to map dst_addr (within dst_vma) to page. 6615313257SAxel Rasmussen * 677d64ae3aSAxel Rasmussen * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem 687d64ae3aSAxel Rasmussen * and anon, and for both shared and private VMAs. 6915313257SAxel Rasmussen */ 7061c50040SAxel Rasmussen int mfill_atomic_install_pte(pmd_t *dst_pmd, 7115313257SAxel Rasmussen struct vm_area_struct *dst_vma, 7215313257SAxel Rasmussen unsigned long dst_addr, struct page *page, 73d9712937SAxel Rasmussen bool newly_allocated, uffd_flags_t flags) 7415313257SAxel Rasmussen { 7515313257SAxel Rasmussen int ret; 7661c50040SAxel Rasmussen struct mm_struct *dst_mm = dst_vma->vm_mm; 7715313257SAxel Rasmussen pte_t _dst_pte, *dst_pte; 7815313257SAxel Rasmussen bool writable = dst_vma->vm_flags & VM_WRITE; 7915313257SAxel Rasmussen bool vm_shared = dst_vma->vm_flags & VM_SHARED; 8093b0d917SPeter Xu bool page_in_cache = page_mapping(page); 8115313257SAxel Rasmussen spinlock_t *ptl; 8228965f0fSVishal Moola (Oracle) struct folio *folio; 8315313257SAxel Rasmussen 8415313257SAxel Rasmussen _dst_pte = mk_pte(page, dst_vma->vm_page_prot); 859ae0f87dSPeter Xu _dst_pte = pte_mkdirty(_dst_pte); 8615313257SAxel Rasmussen if (page_in_cache && !vm_shared) 8715313257SAxel Rasmussen writable = false; 888ee79edfSPeter Xu if (writable) 89161e393cSRick Edgecombe _dst_pte = pte_mkwrite(_dst_pte, dst_vma); 90d9712937SAxel Rasmussen if (flags & MFILL_ATOMIC_WP) 91f1eb1bacSPeter Xu _dst_pte = pte_mkuffd_wp(_dst_pte); 9215313257SAxel Rasmussen 933622d3cdSHugh Dickins ret = -EAGAIN; 9415313257SAxel Rasmussen dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 953622d3cdSHugh Dickins if (!dst_pte) 963622d3cdSHugh Dickins goto out; 9715313257SAxel Rasmussen 98435cdb41SAxel Rasmussen if (mfill_file_over_size(dst_vma, dst_addr)) { 9915313257SAxel Rasmussen ret = -EFAULT; 10015313257SAxel Rasmussen goto out_unlock; 10115313257SAxel Rasmussen } 10215313257SAxel Rasmussen 10315313257SAxel Rasmussen ret = -EEXIST; 1048ee79edfSPeter Xu /* 1058ee79edfSPeter Xu * We allow to overwrite a pte marker: consider when both MISSING|WP 1068ee79edfSPeter Xu * registered, we firstly wr-protect a none pte which has no page cache 1078ee79edfSPeter Xu * page backing it, then access the page. 1088ee79edfSPeter Xu */ 109c33c7948SRyan Roberts if (!pte_none_mostly(ptep_get(dst_pte))) 11015313257SAxel Rasmussen goto out_unlock; 11115313257SAxel Rasmussen 11228965f0fSVishal Moola (Oracle) folio = page_folio(page); 113cea86fe2SHugh Dickins if (page_in_cache) { 114cea86fe2SHugh Dickins /* Usually, cache pages are already added to LRU */ 115cea86fe2SHugh Dickins if (newly_allocated) 11628965f0fSVishal Moola (Oracle) folio_add_lru(folio); 1177123e19cSDavid Hildenbrand folio_add_file_rmap_pte(folio, page, dst_vma); 118cea86fe2SHugh Dickins } else { 1192853b66bSMatthew Wilcox (Oracle) folio_add_new_anon_rmap(folio, dst_vma, dst_addr); 12028965f0fSVishal Moola (Oracle) folio_add_lru_vma(folio, dst_vma); 121cea86fe2SHugh Dickins } 12215313257SAxel Rasmussen 12315313257SAxel Rasmussen /* 12415313257SAxel Rasmussen * Must happen after rmap, as mm_counter() checks mapping (via 12515313257SAxel Rasmussen * PageAnon()), which is set by __page_set_anon_rmap(). 12615313257SAxel Rasmussen */ 12715313257SAxel Rasmussen inc_mm_counter(dst_mm, mm_counter(page)); 12815313257SAxel Rasmussen 12915313257SAxel Rasmussen set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 13015313257SAxel Rasmussen 13115313257SAxel Rasmussen /* No need to invalidate - it was non-present before */ 13215313257SAxel Rasmussen update_mmu_cache(dst_vma, dst_addr, dst_pte); 13315313257SAxel Rasmussen ret = 0; 13415313257SAxel Rasmussen out_unlock: 13515313257SAxel Rasmussen pte_unmap_unlock(dst_pte, ptl); 1363622d3cdSHugh Dickins out: 13715313257SAxel Rasmussen return ret; 13815313257SAxel Rasmussen } 13915313257SAxel Rasmussen 14061c50040SAxel Rasmussen static int mfill_atomic_pte_copy(pmd_t *dst_pmd, 141c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma, 142c1a4de99SAndrea Arcangeli unsigned long dst_addr, 143b6ebaedbSAndrea Arcangeli unsigned long src_addr, 144d9712937SAxel Rasmussen uffd_flags_t flags, 145d7be6d7eSZhangPeng struct folio **foliop) 146c1a4de99SAndrea Arcangeli { 14707e6d409SZhangPeng void *kaddr; 148c1a4de99SAndrea Arcangeli int ret; 14907e6d409SZhangPeng struct folio *folio; 150c1a4de99SAndrea Arcangeli 151d7be6d7eSZhangPeng if (!*foliop) { 152c1a4de99SAndrea Arcangeli ret = -ENOMEM; 15307e6d409SZhangPeng folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma, 15407e6d409SZhangPeng dst_addr, false); 15507e6d409SZhangPeng if (!folio) 156c1a4de99SAndrea Arcangeli goto out; 157c1a4de99SAndrea Arcangeli 15807e6d409SZhangPeng kaddr = kmap_local_folio(folio, 0); 1595521de7dSIra Weiny /* 1605521de7dSIra Weiny * The read mmap_lock is held here. Despite the 1615521de7dSIra Weiny * mmap_lock being read recursive a deadlock is still 1625521de7dSIra Weiny * possible if a writer has taken a lock. For example: 1635521de7dSIra Weiny * 1645521de7dSIra Weiny * process A thread 1 takes read lock on own mmap_lock 1655521de7dSIra Weiny * process A thread 2 calls mmap, blocks taking write lock 1665521de7dSIra Weiny * process B thread 1 takes page fault, read lock on own mmap lock 1675521de7dSIra Weiny * process B thread 2 calls mmap, blocks taking write lock 1685521de7dSIra Weiny * process A thread 1 blocks taking read lock on process B 1695521de7dSIra Weiny * process B thread 1 blocks taking read lock on process A 1705521de7dSIra Weiny * 1715521de7dSIra Weiny * Disable page faults to prevent potential deadlock 1725521de7dSIra Weiny * and retry the copy outside the mmap_lock. 1735521de7dSIra Weiny */ 1745521de7dSIra Weiny pagefault_disable(); 17507e6d409SZhangPeng ret = copy_from_user(kaddr, (const void __user *) src_addr, 176b6ebaedbSAndrea Arcangeli PAGE_SIZE); 1775521de7dSIra Weiny pagefault_enable(); 17807e6d409SZhangPeng kunmap_local(kaddr); 179b6ebaedbSAndrea Arcangeli 180c1e8d7c6SMichel Lespinasse /* fallback to copy_from_user outside mmap_lock */ 181b6ebaedbSAndrea Arcangeli if (unlikely(ret)) { 1829e368259SAndrea Arcangeli ret = -ENOENT; 183d7be6d7eSZhangPeng *foliop = folio; 184b6ebaedbSAndrea Arcangeli /* don't free the page */ 185b6ebaedbSAndrea Arcangeli goto out; 186b6ebaedbSAndrea Arcangeli } 1877c25a0b8SMuchun Song 18807e6d409SZhangPeng flush_dcache_folio(folio); 189b6ebaedbSAndrea Arcangeli } else { 190d7be6d7eSZhangPeng folio = *foliop; 191d7be6d7eSZhangPeng *foliop = NULL; 192b6ebaedbSAndrea Arcangeli } 193c1a4de99SAndrea Arcangeli 194c1a4de99SAndrea Arcangeli /* 19507e6d409SZhangPeng * The memory barrier inside __folio_mark_uptodate makes sure that 196f4f5329dSWei Yang * preceding stores to the page contents become visible before 197c1a4de99SAndrea Arcangeli * the set_pte_at() write. 198c1a4de99SAndrea Arcangeli */ 19907e6d409SZhangPeng __folio_mark_uptodate(folio); 200c1a4de99SAndrea Arcangeli 201c1a4de99SAndrea Arcangeli ret = -ENOMEM; 20207e6d409SZhangPeng if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL)) 203c1a4de99SAndrea Arcangeli goto out_release; 204c1a4de99SAndrea Arcangeli 20561c50040SAxel Rasmussen ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, 20607e6d409SZhangPeng &folio->page, true, flags); 20715313257SAxel Rasmussen if (ret) 20815313257SAxel Rasmussen goto out_release; 209c1a4de99SAndrea Arcangeli out: 210c1a4de99SAndrea Arcangeli return ret; 211c1a4de99SAndrea Arcangeli out_release: 21207e6d409SZhangPeng folio_put(folio); 213c1a4de99SAndrea Arcangeli goto out; 214c1a4de99SAndrea Arcangeli } 215c1a4de99SAndrea Arcangeli 21661c50040SAxel Rasmussen static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd, 217c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma, 218c1a4de99SAndrea Arcangeli unsigned long dst_addr) 219c1a4de99SAndrea Arcangeli { 220c1a4de99SAndrea Arcangeli pte_t _dst_pte, *dst_pte; 221c1a4de99SAndrea Arcangeli spinlock_t *ptl; 222c1a4de99SAndrea Arcangeli int ret; 223c1a4de99SAndrea Arcangeli 224c1a4de99SAndrea Arcangeli _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), 225c1a4de99SAndrea Arcangeli dst_vma->vm_page_prot)); 2263622d3cdSHugh Dickins ret = -EAGAIN; 22761c50040SAxel Rasmussen dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl); 2283622d3cdSHugh Dickins if (!dst_pte) 2293622d3cdSHugh Dickins goto out; 230435cdb41SAxel Rasmussen if (mfill_file_over_size(dst_vma, dst_addr)) { 231e2a50c1fSAndrea Arcangeli ret = -EFAULT; 232e2a50c1fSAndrea Arcangeli goto out_unlock; 233e2a50c1fSAndrea Arcangeli } 234e2a50c1fSAndrea Arcangeli ret = -EEXIST; 235c33c7948SRyan Roberts if (!pte_none(ptep_get(dst_pte))) 236c1a4de99SAndrea Arcangeli goto out_unlock; 23761c50040SAxel Rasmussen set_pte_at(dst_vma->vm_mm, dst_addr, dst_pte, _dst_pte); 238c1a4de99SAndrea Arcangeli /* No need to invalidate - it was non-present before */ 239c1a4de99SAndrea Arcangeli update_mmu_cache(dst_vma, dst_addr, dst_pte); 240c1a4de99SAndrea Arcangeli ret = 0; 241c1a4de99SAndrea Arcangeli out_unlock: 242c1a4de99SAndrea Arcangeli pte_unmap_unlock(dst_pte, ptl); 2433622d3cdSHugh Dickins out: 244c1a4de99SAndrea Arcangeli return ret; 245c1a4de99SAndrea Arcangeli } 246c1a4de99SAndrea Arcangeli 24715313257SAxel Rasmussen /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */ 24861c50040SAxel Rasmussen static int mfill_atomic_pte_continue(pmd_t *dst_pmd, 24915313257SAxel Rasmussen struct vm_area_struct *dst_vma, 25015313257SAxel Rasmussen unsigned long dst_addr, 251d9712937SAxel Rasmussen uffd_flags_t flags) 25215313257SAxel Rasmussen { 25315313257SAxel Rasmussen struct inode *inode = file_inode(dst_vma->vm_file); 25415313257SAxel Rasmussen pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); 25512acf4fbSMatthew Wilcox (Oracle) struct folio *folio; 25615313257SAxel Rasmussen struct page *page; 25715313257SAxel Rasmussen int ret; 25815313257SAxel Rasmussen 25912acf4fbSMatthew Wilcox (Oracle) ret = shmem_get_folio(inode, pgoff, &folio, SGP_NOALLOC); 26012acf4fbSMatthew Wilcox (Oracle) /* Our caller expects us to return -EFAULT if we failed to find folio */ 26173f37dbcSAxel Rasmussen if (ret == -ENOENT) 26273f37dbcSAxel Rasmussen ret = -EFAULT; 26315313257SAxel Rasmussen if (ret) 26415313257SAxel Rasmussen goto out; 26512acf4fbSMatthew Wilcox (Oracle) if (!folio) { 26615313257SAxel Rasmussen ret = -EFAULT; 26715313257SAxel Rasmussen goto out; 26815313257SAxel Rasmussen } 26915313257SAxel Rasmussen 27012acf4fbSMatthew Wilcox (Oracle) page = folio_file_page(folio, pgoff); 271a7605426SYang Shi if (PageHWPoison(page)) { 272a7605426SYang Shi ret = -EIO; 273a7605426SYang Shi goto out_release; 274a7605426SYang Shi } 275a7605426SYang Shi 27661c50040SAxel Rasmussen ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, 277d9712937SAxel Rasmussen page, false, flags); 27815313257SAxel Rasmussen if (ret) 27915313257SAxel Rasmussen goto out_release; 28015313257SAxel Rasmussen 28112acf4fbSMatthew Wilcox (Oracle) folio_unlock(folio); 28215313257SAxel Rasmussen ret = 0; 28315313257SAxel Rasmussen out: 28415313257SAxel Rasmussen return ret; 28515313257SAxel Rasmussen out_release: 28612acf4fbSMatthew Wilcox (Oracle) folio_unlock(folio); 28712acf4fbSMatthew Wilcox (Oracle) folio_put(folio); 28815313257SAxel Rasmussen goto out; 28915313257SAxel Rasmussen } 29015313257SAxel Rasmussen 291fc71884aSAxel Rasmussen /* Handles UFFDIO_POISON for all non-hugetlb VMAs. */ 292fc71884aSAxel Rasmussen static int mfill_atomic_pte_poison(pmd_t *dst_pmd, 293fc71884aSAxel Rasmussen struct vm_area_struct *dst_vma, 294fc71884aSAxel Rasmussen unsigned long dst_addr, 295fc71884aSAxel Rasmussen uffd_flags_t flags) 296fc71884aSAxel Rasmussen { 297fc71884aSAxel Rasmussen int ret; 298fc71884aSAxel Rasmussen struct mm_struct *dst_mm = dst_vma->vm_mm; 299fc71884aSAxel Rasmussen pte_t _dst_pte, *dst_pte; 300fc71884aSAxel Rasmussen spinlock_t *ptl; 301fc71884aSAxel Rasmussen 302fc71884aSAxel Rasmussen _dst_pte = make_pte_marker(PTE_MARKER_POISONED); 303597425dfSHugh Dickins ret = -EAGAIN; 304fc71884aSAxel Rasmussen dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 305597425dfSHugh Dickins if (!dst_pte) 306597425dfSHugh Dickins goto out; 307fc71884aSAxel Rasmussen 308fc71884aSAxel Rasmussen if (mfill_file_over_size(dst_vma, dst_addr)) { 309fc71884aSAxel Rasmussen ret = -EFAULT; 310fc71884aSAxel Rasmussen goto out_unlock; 311fc71884aSAxel Rasmussen } 312fc71884aSAxel Rasmussen 313fc71884aSAxel Rasmussen ret = -EEXIST; 314fc71884aSAxel Rasmussen /* Refuse to overwrite any PTE, even a PTE marker (e.g. UFFD WP). */ 315afccb080SRyan Roberts if (!pte_none(ptep_get(dst_pte))) 316fc71884aSAxel Rasmussen goto out_unlock; 317fc71884aSAxel Rasmussen 318fc71884aSAxel Rasmussen set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 319fc71884aSAxel Rasmussen 320fc71884aSAxel Rasmussen /* No need to invalidate - it was non-present before */ 321fc71884aSAxel Rasmussen update_mmu_cache(dst_vma, dst_addr, dst_pte); 322fc71884aSAxel Rasmussen ret = 0; 323fc71884aSAxel Rasmussen out_unlock: 324fc71884aSAxel Rasmussen pte_unmap_unlock(dst_pte, ptl); 325597425dfSHugh Dickins out: 326fc71884aSAxel Rasmussen return ret; 327fc71884aSAxel Rasmussen } 328fc71884aSAxel Rasmussen 329c1a4de99SAndrea Arcangeli static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) 330c1a4de99SAndrea Arcangeli { 331c1a4de99SAndrea Arcangeli pgd_t *pgd; 332c2febafcSKirill A. Shutemov p4d_t *p4d; 333c1a4de99SAndrea Arcangeli pud_t *pud; 334c1a4de99SAndrea Arcangeli 335c1a4de99SAndrea Arcangeli pgd = pgd_offset(mm, address); 336c2febafcSKirill A. Shutemov p4d = p4d_alloc(mm, pgd, address); 337c2febafcSKirill A. Shutemov if (!p4d) 338c2febafcSKirill A. Shutemov return NULL; 339c2febafcSKirill A. Shutemov pud = pud_alloc(mm, p4d, address); 340c2febafcSKirill A. Shutemov if (!pud) 341c2febafcSKirill A. Shutemov return NULL; 342c1a4de99SAndrea Arcangeli /* 343c1a4de99SAndrea Arcangeli * Note that we didn't run this because the pmd was 344c1a4de99SAndrea Arcangeli * missing, the *pmd may be already established and in 345c1a4de99SAndrea Arcangeli * turn it may also be a trans_huge_pmd. 346c1a4de99SAndrea Arcangeli */ 347c2febafcSKirill A. Shutemov return pmd_alloc(mm, pud, address); 348c1a4de99SAndrea Arcangeli } 349c1a4de99SAndrea Arcangeli 35060d4d2d2SMike Kravetz #ifdef CONFIG_HUGETLB_PAGE 35160d4d2d2SMike Kravetz /* 352a734991cSAxel Rasmussen * mfill_atomic processing for HUGETLB vmas. Note that this routine is 353c1e8d7c6SMichel Lespinasse * called with mmap_lock held, it will release mmap_lock before returning. 35460d4d2d2SMike Kravetz */ 35561c50040SAxel Rasmussen static __always_inline ssize_t mfill_atomic_hugetlb( 35660d4d2d2SMike Kravetz struct vm_area_struct *dst_vma, 35760d4d2d2SMike Kravetz unsigned long dst_start, 35860d4d2d2SMike Kravetz unsigned long src_start, 35960d4d2d2SMike Kravetz unsigned long len, 36067695f18SLokesh Gidra atomic_t *mmap_changing, 361d9712937SAxel Rasmussen uffd_flags_t flags) 36260d4d2d2SMike Kravetz { 36361c50040SAxel Rasmussen struct mm_struct *dst_mm = dst_vma->vm_mm; 3641c9e8defSMike Kravetz int vm_shared = dst_vma->vm_flags & VM_SHARED; 36560d4d2d2SMike Kravetz ssize_t err; 36660d4d2d2SMike Kravetz pte_t *dst_pte; 36760d4d2d2SMike Kravetz unsigned long src_addr, dst_addr; 36860d4d2d2SMike Kravetz long copied; 3690169fd51SZhangPeng struct folio *folio; 37060d4d2d2SMike Kravetz unsigned long vma_hpagesize; 37160d4d2d2SMike Kravetz pgoff_t idx; 37260d4d2d2SMike Kravetz u32 hash; 37360d4d2d2SMike Kravetz struct address_space *mapping; 37460d4d2d2SMike Kravetz 37560d4d2d2SMike Kravetz /* 37660d4d2d2SMike Kravetz * There is no default zero huge page for all huge page sizes as 37760d4d2d2SMike Kravetz * supported by hugetlb. A PMD_SIZE huge pages may exist as used 37860d4d2d2SMike Kravetz * by THP. Since we can not reliably insert a zero page, this 37960d4d2d2SMike Kravetz * feature is not supported. 38060d4d2d2SMike Kravetz */ 3818a13897fSAxel Rasmussen if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) { 382d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 38360d4d2d2SMike Kravetz return -EINVAL; 38460d4d2d2SMike Kravetz } 38560d4d2d2SMike Kravetz 38660d4d2d2SMike Kravetz src_addr = src_start; 38760d4d2d2SMike Kravetz dst_addr = dst_start; 38860d4d2d2SMike Kravetz copied = 0; 3890169fd51SZhangPeng folio = NULL; 39060d4d2d2SMike Kravetz vma_hpagesize = vma_kernel_pagesize(dst_vma); 39160d4d2d2SMike Kravetz 39260d4d2d2SMike Kravetz /* 39360d4d2d2SMike Kravetz * Validate alignment based on huge page size 39460d4d2d2SMike Kravetz */ 39560d4d2d2SMike Kravetz err = -EINVAL; 39660d4d2d2SMike Kravetz if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1)) 39760d4d2d2SMike Kravetz goto out_unlock; 39860d4d2d2SMike Kravetz 39960d4d2d2SMike Kravetz retry: 40060d4d2d2SMike Kravetz /* 401c1e8d7c6SMichel Lespinasse * On routine entry dst_vma is set. If we had to drop mmap_lock and 40260d4d2d2SMike Kravetz * retry, dst_vma will be set to NULL and we must lookup again. 40360d4d2d2SMike Kravetz */ 40460d4d2d2SMike Kravetz if (!dst_vma) { 40527d02568SMike Rapoport err = -ENOENT; 406643aa36eSWei Yang dst_vma = find_dst_vma(dst_mm, dst_start, len); 40760d4d2d2SMike Kravetz if (!dst_vma || !is_vm_hugetlb_page(dst_vma)) 40860d4d2d2SMike Kravetz goto out_unlock; 4091c9e8defSMike Kravetz 41027d02568SMike Rapoport err = -EINVAL; 41127d02568SMike Rapoport if (vma_hpagesize != vma_kernel_pagesize(dst_vma)) 41227d02568SMike Rapoport goto out_unlock; 41327d02568SMike Rapoport 4141c9e8defSMike Kravetz vm_shared = dst_vma->vm_flags & VM_SHARED; 41560d4d2d2SMike Kravetz } 41660d4d2d2SMike Kravetz 41760d4d2d2SMike Kravetz /* 4181c9e8defSMike Kravetz * If not shared, ensure the dst_vma has a anon_vma. 41960d4d2d2SMike Kravetz */ 42060d4d2d2SMike Kravetz err = -ENOMEM; 4211c9e8defSMike Kravetz if (!vm_shared) { 42260d4d2d2SMike Kravetz if (unlikely(anon_vma_prepare(dst_vma))) 42360d4d2d2SMike Kravetz goto out_unlock; 4241c9e8defSMike Kravetz } 42560d4d2d2SMike Kravetz 42660d4d2d2SMike Kravetz while (src_addr < src_start + len) { 42760d4d2d2SMike Kravetz BUG_ON(dst_addr >= dst_start + len); 42860d4d2d2SMike Kravetz 42960d4d2d2SMike Kravetz /* 43040549ba8SMike Kravetz * Serialize via vma_lock and hugetlb_fault_mutex. 43140549ba8SMike Kravetz * vma_lock ensures the dst_pte remains valid even 43240549ba8SMike Kravetz * in the case of shared pmds. fault mutex prevents 43340549ba8SMike Kravetz * races with other faulting threads. 43460d4d2d2SMike Kravetz */ 435c0d0381aSMike Kravetz idx = linear_page_index(dst_vma, dst_addr); 4363a47c54fSMike Kravetz mapping = dst_vma->vm_file->f_mapping; 437188b04a7SWei Yang hash = hugetlb_fault_mutex_hash(mapping, idx); 43860d4d2d2SMike Kravetz mutex_lock(&hugetlb_fault_mutex_table[hash]); 43940549ba8SMike Kravetz hugetlb_vma_lock_read(dst_vma); 44060d4d2d2SMike Kravetz 44160d4d2d2SMike Kravetz err = -ENOMEM; 442aec44e0fSPeter Xu dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize); 44360d4d2d2SMike Kravetz if (!dst_pte) { 44440549ba8SMike Kravetz hugetlb_vma_unlock_read(dst_vma); 44560d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 44660d4d2d2SMike Kravetz goto out_unlock; 44760d4d2d2SMike Kravetz } 44860d4d2d2SMike Kravetz 449d9712937SAxel Rasmussen if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) && 4506041c691SPeter Xu !huge_pte_none_mostly(huge_ptep_get(dst_pte))) { 45160d4d2d2SMike Kravetz err = -EEXIST; 45240549ba8SMike Kravetz hugetlb_vma_unlock_read(dst_vma); 45360d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 45460d4d2d2SMike Kravetz goto out_unlock; 45560d4d2d2SMike Kravetz } 45660d4d2d2SMike Kravetz 457d9712937SAxel Rasmussen err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr, 4580169fd51SZhangPeng src_addr, flags, &folio); 45960d4d2d2SMike Kravetz 46040549ba8SMike Kravetz hugetlb_vma_unlock_read(dst_vma); 46160d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 46260d4d2d2SMike Kravetz 46360d4d2d2SMike Kravetz cond_resched(); 46460d4d2d2SMike Kravetz 4659e368259SAndrea Arcangeli if (unlikely(err == -ENOENT)) { 466d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 4670169fd51SZhangPeng BUG_ON(!folio); 46860d4d2d2SMike Kravetz 4690169fd51SZhangPeng err = copy_folio_from_user(folio, 470e87340caSZhangPeng (const void __user *)src_addr, true); 47160d4d2d2SMike Kravetz if (unlikely(err)) { 47260d4d2d2SMike Kravetz err = -EFAULT; 47360d4d2d2SMike Kravetz goto out; 47460d4d2d2SMike Kravetz } 475d8ed45c5SMichel Lespinasse mmap_read_lock(dst_mm); 47667695f18SLokesh Gidra /* 47767695f18SLokesh Gidra * If memory mappings are changing because of non-cooperative 47867695f18SLokesh Gidra * operation (e.g. mremap) running in parallel, bail out and 47967695f18SLokesh Gidra * request the user to retry later 48067695f18SLokesh Gidra */ 48167695f18SLokesh Gidra if (mmap_changing && atomic_read(mmap_changing)) { 48267695f18SLokesh Gidra err = -EAGAIN; 48367695f18SLokesh Gidra break; 48467695f18SLokesh Gidra } 48560d4d2d2SMike Kravetz 48660d4d2d2SMike Kravetz dst_vma = NULL; 48760d4d2d2SMike Kravetz goto retry; 48860d4d2d2SMike Kravetz } else 4890169fd51SZhangPeng BUG_ON(folio); 49060d4d2d2SMike Kravetz 49160d4d2d2SMike Kravetz if (!err) { 49260d4d2d2SMike Kravetz dst_addr += vma_hpagesize; 49360d4d2d2SMike Kravetz src_addr += vma_hpagesize; 49460d4d2d2SMike Kravetz copied += vma_hpagesize; 49560d4d2d2SMike Kravetz 49660d4d2d2SMike Kravetz if (fatal_signal_pending(current)) 49760d4d2d2SMike Kravetz err = -EINTR; 49860d4d2d2SMike Kravetz } 49960d4d2d2SMike Kravetz if (err) 50060d4d2d2SMike Kravetz break; 50160d4d2d2SMike Kravetz } 50260d4d2d2SMike Kravetz 50360d4d2d2SMike Kravetz out_unlock: 504d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 50560d4d2d2SMike Kravetz out: 5060169fd51SZhangPeng if (folio) 5070169fd51SZhangPeng folio_put(folio); 50860d4d2d2SMike Kravetz BUG_ON(copied < 0); 50960d4d2d2SMike Kravetz BUG_ON(err > 0); 51060d4d2d2SMike Kravetz BUG_ON(!copied && !err); 51160d4d2d2SMike Kravetz return copied ? copied : err; 51260d4d2d2SMike Kravetz } 51360d4d2d2SMike Kravetz #else /* !CONFIG_HUGETLB_PAGE */ 51460d4d2d2SMike Kravetz /* fail at build time if gcc attempts to use this */ 51561c50040SAxel Rasmussen extern ssize_t mfill_atomic_hugetlb(struct vm_area_struct *dst_vma, 51660d4d2d2SMike Kravetz unsigned long dst_start, 51760d4d2d2SMike Kravetz unsigned long src_start, 51860d4d2d2SMike Kravetz unsigned long len, 51967695f18SLokesh Gidra atomic_t *mmap_changing, 520d9712937SAxel Rasmussen uffd_flags_t flags); 52160d4d2d2SMike Kravetz #endif /* CONFIG_HUGETLB_PAGE */ 52260d4d2d2SMike Kravetz 52361c50040SAxel Rasmussen static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd, 5243217d3c7SMike Rapoport struct vm_area_struct *dst_vma, 5253217d3c7SMike Rapoport unsigned long dst_addr, 5263217d3c7SMike Rapoport unsigned long src_addr, 527d9712937SAxel Rasmussen uffd_flags_t flags, 528d7be6d7eSZhangPeng struct folio **foliop) 5293217d3c7SMike Rapoport { 5303217d3c7SMike Rapoport ssize_t err; 5313217d3c7SMike Rapoport 532d9712937SAxel Rasmussen if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) { 53361c50040SAxel Rasmussen return mfill_atomic_pte_continue(dst_pmd, dst_vma, 534d9712937SAxel Rasmussen dst_addr, flags); 535fc71884aSAxel Rasmussen } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) { 536fc71884aSAxel Rasmussen return mfill_atomic_pte_poison(dst_pmd, dst_vma, 537fc71884aSAxel Rasmussen dst_addr, flags); 53815313257SAxel Rasmussen } 53915313257SAxel Rasmussen 5405b51072eSAndrea Arcangeli /* 5415b51072eSAndrea Arcangeli * The normal page fault path for a shmem will invoke the 5425b51072eSAndrea Arcangeli * fault, fill the hole in the file and COW it right away. The 5435b51072eSAndrea Arcangeli * result generates plain anonymous memory. So when we are 5445b51072eSAndrea Arcangeli * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll 5455b51072eSAndrea Arcangeli * generate anonymous memory directly without actually filling 5465b51072eSAndrea Arcangeli * the hole. For the MAP_PRIVATE case the robustness check 5475b51072eSAndrea Arcangeli * only happens in the pagetable (to verify it's still none) 5485b51072eSAndrea Arcangeli * and not in the radix tree. 5495b51072eSAndrea Arcangeli */ 5505b51072eSAndrea Arcangeli if (!(dst_vma->vm_flags & VM_SHARED)) { 551d9712937SAxel Rasmussen if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) 55261c50040SAxel Rasmussen err = mfill_atomic_pte_copy(dst_pmd, dst_vma, 553d9712937SAxel Rasmussen dst_addr, src_addr, 554d7be6d7eSZhangPeng flags, foliop); 5553217d3c7SMike Rapoport else 55661c50040SAxel Rasmussen err = mfill_atomic_pte_zeropage(dst_pmd, 5573217d3c7SMike Rapoport dst_vma, dst_addr); 5583217d3c7SMike Rapoport } else { 55961c50040SAxel Rasmussen err = shmem_mfill_atomic_pte(dst_pmd, dst_vma, 56015313257SAxel Rasmussen dst_addr, src_addr, 561d7be6d7eSZhangPeng flags, foliop); 5623217d3c7SMike Rapoport } 5633217d3c7SMike Rapoport 5643217d3c7SMike Rapoport return err; 5653217d3c7SMike Rapoport } 5663217d3c7SMike Rapoport 567a734991cSAxel Rasmussen static __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm, 568c1a4de99SAndrea Arcangeli unsigned long dst_start, 569c1a4de99SAndrea Arcangeli unsigned long src_start, 570c1a4de99SAndrea Arcangeli unsigned long len, 571a759a909SNadav Amit atomic_t *mmap_changing, 572d9712937SAxel Rasmussen uffd_flags_t flags) 573c1a4de99SAndrea Arcangeli { 574c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma; 575c1a4de99SAndrea Arcangeli ssize_t err; 576c1a4de99SAndrea Arcangeli pmd_t *dst_pmd; 577c1a4de99SAndrea Arcangeli unsigned long src_addr, dst_addr; 578b6ebaedbSAndrea Arcangeli long copied; 579d7be6d7eSZhangPeng struct folio *folio; 580c1a4de99SAndrea Arcangeli 581c1a4de99SAndrea Arcangeli /* 582c1a4de99SAndrea Arcangeli * Sanitize the command parameters: 583c1a4de99SAndrea Arcangeli */ 584c1a4de99SAndrea Arcangeli BUG_ON(dst_start & ~PAGE_MASK); 585c1a4de99SAndrea Arcangeli BUG_ON(len & ~PAGE_MASK); 586c1a4de99SAndrea Arcangeli 587c1a4de99SAndrea Arcangeli /* Does the address range wrap, or is the span zero-sized? */ 588c1a4de99SAndrea Arcangeli BUG_ON(src_start + len <= src_start); 589c1a4de99SAndrea Arcangeli BUG_ON(dst_start + len <= dst_start); 590c1a4de99SAndrea Arcangeli 591b6ebaedbSAndrea Arcangeli src_addr = src_start; 592b6ebaedbSAndrea Arcangeli dst_addr = dst_start; 593b6ebaedbSAndrea Arcangeli copied = 0; 594d7be6d7eSZhangPeng folio = NULL; 595b6ebaedbSAndrea Arcangeli retry: 596d8ed45c5SMichel Lespinasse mmap_read_lock(dst_mm); 597c1a4de99SAndrea Arcangeli 598c1a4de99SAndrea Arcangeli /* 599df2cc96eSMike Rapoport * If memory mappings are changing because of non-cooperative 600df2cc96eSMike Rapoport * operation (e.g. mremap) running in parallel, bail out and 601df2cc96eSMike Rapoport * request the user to retry later 602df2cc96eSMike Rapoport */ 603df2cc96eSMike Rapoport err = -EAGAIN; 604a759a909SNadav Amit if (mmap_changing && atomic_read(mmap_changing)) 605df2cc96eSMike Rapoport goto out_unlock; 606df2cc96eSMike Rapoport 607df2cc96eSMike Rapoport /* 608c1a4de99SAndrea Arcangeli * Make sure the vma is not shared, that the dst range is 609c1a4de99SAndrea Arcangeli * both valid and fully within a single existing vma. 610c1a4de99SAndrea Arcangeli */ 61127d02568SMike Rapoport err = -ENOENT; 612643aa36eSWei Yang dst_vma = find_dst_vma(dst_mm, dst_start, len); 61326071cedSMike Rapoport if (!dst_vma) 61426071cedSMike Rapoport goto out_unlock; 61527d02568SMike Rapoport 61627d02568SMike Rapoport err = -EINVAL; 61727d02568SMike Rapoport /* 61827d02568SMike Rapoport * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but 61927d02568SMike Rapoport * it will overwrite vm_ops, so vma_is_anonymous must return false. 62027d02568SMike Rapoport */ 62127d02568SMike Rapoport if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) && 62227d02568SMike Rapoport dst_vma->vm_flags & VM_SHARED)) 62327d02568SMike Rapoport goto out_unlock; 62427d02568SMike Rapoport 62527d02568SMike Rapoport /* 62672981e0eSAndrea Arcangeli * validate 'mode' now that we know the dst_vma: don't allow 62772981e0eSAndrea Arcangeli * a wrprotect copy if the userfaultfd didn't register as WP. 62872981e0eSAndrea Arcangeli */ 629d9712937SAxel Rasmussen if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP)) 63072981e0eSAndrea Arcangeli goto out_unlock; 63172981e0eSAndrea Arcangeli 63272981e0eSAndrea Arcangeli /* 63327d02568SMike Rapoport * If this is a HUGETLB vma, pass off to appropriate routine 63427d02568SMike Rapoport */ 63527d02568SMike Rapoport if (is_vm_hugetlb_page(dst_vma)) 63667695f18SLokesh Gidra return mfill_atomic_hugetlb(dst_vma, dst_start, src_start, 63767695f18SLokesh Gidra len, mmap_changing, flags); 63827d02568SMike Rapoport 63926071cedSMike Rapoport if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) 640b6ebaedbSAndrea Arcangeli goto out_unlock; 641d9712937SAxel Rasmussen if (!vma_is_shmem(dst_vma) && 642d9712937SAxel Rasmussen uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) 643f6191471SAxel Rasmussen goto out_unlock; 644c1a4de99SAndrea Arcangeli 645c1a4de99SAndrea Arcangeli /* 646c1a4de99SAndrea Arcangeli * Ensure the dst_vma has a anon_vma or this page 647c1a4de99SAndrea Arcangeli * would get a NULL anon_vma when moved in the 648c1a4de99SAndrea Arcangeli * dst_vma. 649c1a4de99SAndrea Arcangeli */ 650c1a4de99SAndrea Arcangeli err = -ENOMEM; 6515b51072eSAndrea Arcangeli if (!(dst_vma->vm_flags & VM_SHARED) && 6525b51072eSAndrea Arcangeli unlikely(anon_vma_prepare(dst_vma))) 653b6ebaedbSAndrea Arcangeli goto out_unlock; 654c1a4de99SAndrea Arcangeli 655b6ebaedbSAndrea Arcangeli while (src_addr < src_start + len) { 656c1a4de99SAndrea Arcangeli pmd_t dst_pmdval; 657b6ebaedbSAndrea Arcangeli 658c1a4de99SAndrea Arcangeli BUG_ON(dst_addr >= dst_start + len); 659b6ebaedbSAndrea Arcangeli 660c1a4de99SAndrea Arcangeli dst_pmd = mm_alloc_pmd(dst_mm, dst_addr); 661c1a4de99SAndrea Arcangeli if (unlikely(!dst_pmd)) { 662c1a4de99SAndrea Arcangeli err = -ENOMEM; 663c1a4de99SAndrea Arcangeli break; 664c1a4de99SAndrea Arcangeli } 665c1a4de99SAndrea Arcangeli 666dab6e717SPeter Zijlstra dst_pmdval = pmdp_get_lockless(dst_pmd); 667c1a4de99SAndrea Arcangeli /* 668c1a4de99SAndrea Arcangeli * If the dst_pmd is mapped as THP don't 669c1a4de99SAndrea Arcangeli * override it and just be strict. 670c1a4de99SAndrea Arcangeli */ 671c1a4de99SAndrea Arcangeli if (unlikely(pmd_trans_huge(dst_pmdval))) { 672c1a4de99SAndrea Arcangeli err = -EEXIST; 673c1a4de99SAndrea Arcangeli break; 674c1a4de99SAndrea Arcangeli } 675c1a4de99SAndrea Arcangeli if (unlikely(pmd_none(dst_pmdval)) && 6764cf58924SJoel Fernandes (Google) unlikely(__pte_alloc(dst_mm, dst_pmd))) { 677c1a4de99SAndrea Arcangeli err = -ENOMEM; 678c1a4de99SAndrea Arcangeli break; 679c1a4de99SAndrea Arcangeli } 680c1a4de99SAndrea Arcangeli /* If an huge pmd materialized from under us fail */ 681c1a4de99SAndrea Arcangeli if (unlikely(pmd_trans_huge(*dst_pmd))) { 682c1a4de99SAndrea Arcangeli err = -EFAULT; 683c1a4de99SAndrea Arcangeli break; 684c1a4de99SAndrea Arcangeli } 685c1a4de99SAndrea Arcangeli 686c1a4de99SAndrea Arcangeli BUG_ON(pmd_none(*dst_pmd)); 687c1a4de99SAndrea Arcangeli BUG_ON(pmd_trans_huge(*dst_pmd)); 688c1a4de99SAndrea Arcangeli 68961c50040SAxel Rasmussen err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, 690d7be6d7eSZhangPeng src_addr, flags, &folio); 691c1a4de99SAndrea Arcangeli cond_resched(); 692c1a4de99SAndrea Arcangeli 6939e368259SAndrea Arcangeli if (unlikely(err == -ENOENT)) { 694d7be6d7eSZhangPeng void *kaddr; 695b6ebaedbSAndrea Arcangeli 696d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 697d7be6d7eSZhangPeng BUG_ON(!folio); 698b6ebaedbSAndrea Arcangeli 699d7be6d7eSZhangPeng kaddr = kmap_local_folio(folio, 0); 700d7be6d7eSZhangPeng err = copy_from_user(kaddr, 701b6ebaedbSAndrea Arcangeli (const void __user *) src_addr, 702b6ebaedbSAndrea Arcangeli PAGE_SIZE); 703d7be6d7eSZhangPeng kunmap_local(kaddr); 704b6ebaedbSAndrea Arcangeli if (unlikely(err)) { 705b6ebaedbSAndrea Arcangeli err = -EFAULT; 706b6ebaedbSAndrea Arcangeli goto out; 707b6ebaedbSAndrea Arcangeli } 708d7be6d7eSZhangPeng flush_dcache_folio(folio); 709b6ebaedbSAndrea Arcangeli goto retry; 710b6ebaedbSAndrea Arcangeli } else 711d7be6d7eSZhangPeng BUG_ON(folio); 712b6ebaedbSAndrea Arcangeli 713c1a4de99SAndrea Arcangeli if (!err) { 714c1a4de99SAndrea Arcangeli dst_addr += PAGE_SIZE; 715c1a4de99SAndrea Arcangeli src_addr += PAGE_SIZE; 716c1a4de99SAndrea Arcangeli copied += PAGE_SIZE; 717c1a4de99SAndrea Arcangeli 718c1a4de99SAndrea Arcangeli if (fatal_signal_pending(current)) 719c1a4de99SAndrea Arcangeli err = -EINTR; 720c1a4de99SAndrea Arcangeli } 721c1a4de99SAndrea Arcangeli if (err) 722c1a4de99SAndrea Arcangeli break; 723c1a4de99SAndrea Arcangeli } 724c1a4de99SAndrea Arcangeli 725b6ebaedbSAndrea Arcangeli out_unlock: 726d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 727b6ebaedbSAndrea Arcangeli out: 728d7be6d7eSZhangPeng if (folio) 729d7be6d7eSZhangPeng folio_put(folio); 730c1a4de99SAndrea Arcangeli BUG_ON(copied < 0); 731c1a4de99SAndrea Arcangeli BUG_ON(err > 0); 732c1a4de99SAndrea Arcangeli BUG_ON(!copied && !err); 733c1a4de99SAndrea Arcangeli return copied ? copied : err; 734c1a4de99SAndrea Arcangeli } 735c1a4de99SAndrea Arcangeli 736a734991cSAxel Rasmussen ssize_t mfill_atomic_copy(struct mm_struct *dst_mm, unsigned long dst_start, 737df2cc96eSMike Rapoport unsigned long src_start, unsigned long len, 738d9712937SAxel Rasmussen atomic_t *mmap_changing, uffd_flags_t flags) 739c1a4de99SAndrea Arcangeli { 740d9712937SAxel Rasmussen return mfill_atomic(dst_mm, dst_start, src_start, len, mmap_changing, 741d9712937SAxel Rasmussen uffd_flags_set_mode(flags, MFILL_ATOMIC_COPY)); 742c1a4de99SAndrea Arcangeli } 743c1a4de99SAndrea Arcangeli 744a734991cSAxel Rasmussen ssize_t mfill_atomic_zeropage(struct mm_struct *dst_mm, unsigned long start, 745a759a909SNadav Amit unsigned long len, atomic_t *mmap_changing) 746c1a4de99SAndrea Arcangeli { 747d9712937SAxel Rasmussen return mfill_atomic(dst_mm, start, 0, len, mmap_changing, 748d9712937SAxel Rasmussen uffd_flags_set_mode(0, MFILL_ATOMIC_ZEROPAGE)); 749f6191471SAxel Rasmussen } 750f6191471SAxel Rasmussen 751a734991cSAxel Rasmussen ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long start, 75202891844SAxel Rasmussen unsigned long len, atomic_t *mmap_changing, 75302891844SAxel Rasmussen uffd_flags_t flags) 754f6191471SAxel Rasmussen { 755d9712937SAxel Rasmussen return mfill_atomic(dst_mm, start, 0, len, mmap_changing, 75602891844SAxel Rasmussen uffd_flags_set_mode(flags, MFILL_ATOMIC_CONTINUE)); 757c1a4de99SAndrea Arcangeli } 758ffd05793SShaohua Li 759fc71884aSAxel Rasmussen ssize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start, 760fc71884aSAxel Rasmussen unsigned long len, atomic_t *mmap_changing, 761fc71884aSAxel Rasmussen uffd_flags_t flags) 762fc71884aSAxel Rasmussen { 763fc71884aSAxel Rasmussen return mfill_atomic(dst_mm, start, 0, len, mmap_changing, 764fc71884aSAxel Rasmussen uffd_flags_set_mode(flags, MFILL_ATOMIC_POISON)); 765fc71884aSAxel Rasmussen } 766fc71884aSAxel Rasmussen 76761c50040SAxel Rasmussen long uffd_wp_range(struct vm_area_struct *dst_vma, 768f369b07cSPeter Xu unsigned long start, unsigned long len, bool enable_wp) 769f369b07cSPeter Xu { 770931298e1SDavid Hildenbrand unsigned int mm_cp_flags; 771f369b07cSPeter Xu struct mmu_gather tlb; 772d1751118SPeter Xu long ret; 773f369b07cSPeter Xu 774a1b92a3fSMuhammad Usama Anjum VM_WARN_ONCE(start < dst_vma->vm_start || start + len > dst_vma->vm_end, 775a1b92a3fSMuhammad Usama Anjum "The address range exceeds VMA boundary.\n"); 776f369b07cSPeter Xu if (enable_wp) 777931298e1SDavid Hildenbrand mm_cp_flags = MM_CP_UFFD_WP; 778f369b07cSPeter Xu else 779931298e1SDavid Hildenbrand mm_cp_flags = MM_CP_UFFD_WP_RESOLVE; 780f369b07cSPeter Xu 781931298e1SDavid Hildenbrand /* 782931298e1SDavid Hildenbrand * vma->vm_page_prot already reflects that uffd-wp is enabled for this 783931298e1SDavid Hildenbrand * VMA (see userfaultfd_set_vm_flags()) and that all PTEs are supposed 784931298e1SDavid Hildenbrand * to be write-protected as default whenever protection changes. 785931298e1SDavid Hildenbrand * Try upgrading write permissions manually. 786931298e1SDavid Hildenbrand */ 787931298e1SDavid Hildenbrand if (!enable_wp && vma_wants_manual_pte_write_upgrade(dst_vma)) 788931298e1SDavid Hildenbrand mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE; 78961c50040SAxel Rasmussen tlb_gather_mmu(&tlb, dst_vma->vm_mm); 790d1751118SPeter Xu ret = change_protection(&tlb, dst_vma, start, start + len, mm_cp_flags); 791f369b07cSPeter Xu tlb_finish_mmu(&tlb); 792d1751118SPeter Xu 793d1751118SPeter Xu return ret; 794f369b07cSPeter Xu } 795f369b07cSPeter Xu 796ffd05793SShaohua Li int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, 797a759a909SNadav Amit unsigned long len, bool enable_wp, 798a759a909SNadav Amit atomic_t *mmap_changing) 799ffd05793SShaohua Li { 800a1b92a3fSMuhammad Usama Anjum unsigned long end = start + len; 801a1b92a3fSMuhammad Usama Anjum unsigned long _start, _end; 802ffd05793SShaohua Li struct vm_area_struct *dst_vma; 8035a90d5a1SPeter Xu unsigned long page_mask; 804d1751118SPeter Xu long err; 805a1b92a3fSMuhammad Usama Anjum VMA_ITERATOR(vmi, dst_mm, start); 806ffd05793SShaohua Li 807ffd05793SShaohua Li /* 808ffd05793SShaohua Li * Sanitize the command parameters: 809ffd05793SShaohua Li */ 810ffd05793SShaohua Li BUG_ON(start & ~PAGE_MASK); 811ffd05793SShaohua Li BUG_ON(len & ~PAGE_MASK); 812ffd05793SShaohua Li 813ffd05793SShaohua Li /* Does the address range wrap, or is the span zero-sized? */ 814ffd05793SShaohua Li BUG_ON(start + len <= start); 815ffd05793SShaohua Li 816d8ed45c5SMichel Lespinasse mmap_read_lock(dst_mm); 817ffd05793SShaohua Li 818ffd05793SShaohua Li /* 819ffd05793SShaohua Li * If memory mappings are changing because of non-cooperative 820ffd05793SShaohua Li * operation (e.g. mremap) running in parallel, bail out and 821ffd05793SShaohua Li * request the user to retry later 822ffd05793SShaohua Li */ 823ffd05793SShaohua Li err = -EAGAIN; 824a759a909SNadav Amit if (mmap_changing && atomic_read(mmap_changing)) 825ffd05793SShaohua Li goto out_unlock; 826ffd05793SShaohua Li 827ffd05793SShaohua Li err = -ENOENT; 828a1b92a3fSMuhammad Usama Anjum for_each_vma_range(vmi, dst_vma, end) { 829b1f9e876SPeter Xu 830a1b92a3fSMuhammad Usama Anjum if (!userfaultfd_wp(dst_vma)) { 831a1b92a3fSMuhammad Usama Anjum err = -ENOENT; 832a1b92a3fSMuhammad Usama Anjum break; 833a1b92a3fSMuhammad Usama Anjum } 834ffd05793SShaohua Li 8355a90d5a1SPeter Xu if (is_vm_hugetlb_page(dst_vma)) { 8365a90d5a1SPeter Xu err = -EINVAL; 8375a90d5a1SPeter Xu page_mask = vma_kernel_pagesize(dst_vma) - 1; 8385a90d5a1SPeter Xu if ((start & page_mask) || (len & page_mask)) 839a1b92a3fSMuhammad Usama Anjum break; 8405a90d5a1SPeter Xu } 8415a90d5a1SPeter Xu 842a1b92a3fSMuhammad Usama Anjum _start = max(dst_vma->vm_start, start); 843a1b92a3fSMuhammad Usama Anjum _end = min(dst_vma->vm_end, end); 844a1b92a3fSMuhammad Usama Anjum 84561c50040SAxel Rasmussen err = uffd_wp_range(dst_vma, _start, _end - _start, enable_wp); 846ffd05793SShaohua Li 847d1751118SPeter Xu /* Return 0 on success, <0 on failures */ 848a1b92a3fSMuhammad Usama Anjum if (err < 0) 849a1b92a3fSMuhammad Usama Anjum break; 850ffd05793SShaohua Li err = 0; 851a1b92a3fSMuhammad Usama Anjum } 852ffd05793SShaohua Li out_unlock: 853d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm); 854ffd05793SShaohua Li return err; 855ffd05793SShaohua Li } 856adef4406SAndrea Arcangeli 857adef4406SAndrea Arcangeli 858adef4406SAndrea Arcangeli void double_pt_lock(spinlock_t *ptl1, 859adef4406SAndrea Arcangeli spinlock_t *ptl2) 860adef4406SAndrea Arcangeli __acquires(ptl1) 861adef4406SAndrea Arcangeli __acquires(ptl2) 862adef4406SAndrea Arcangeli { 863adef4406SAndrea Arcangeli spinlock_t *ptl_tmp; 864adef4406SAndrea Arcangeli 865adef4406SAndrea Arcangeli if (ptl1 > ptl2) { 866adef4406SAndrea Arcangeli /* exchange ptl1 and ptl2 */ 867adef4406SAndrea Arcangeli ptl_tmp = ptl1; 868adef4406SAndrea Arcangeli ptl1 = ptl2; 869adef4406SAndrea Arcangeli ptl2 = ptl_tmp; 870adef4406SAndrea Arcangeli } 871adef4406SAndrea Arcangeli /* lock in virtual address order to avoid lock inversion */ 872adef4406SAndrea Arcangeli spin_lock(ptl1); 873adef4406SAndrea Arcangeli if (ptl1 != ptl2) 874adef4406SAndrea Arcangeli spin_lock_nested(ptl2, SINGLE_DEPTH_NESTING); 875adef4406SAndrea Arcangeli else 876adef4406SAndrea Arcangeli __acquire(ptl2); 877adef4406SAndrea Arcangeli } 878adef4406SAndrea Arcangeli 879adef4406SAndrea Arcangeli void double_pt_unlock(spinlock_t *ptl1, 880adef4406SAndrea Arcangeli spinlock_t *ptl2) 881adef4406SAndrea Arcangeli __releases(ptl1) 882adef4406SAndrea Arcangeli __releases(ptl2) 883adef4406SAndrea Arcangeli { 884adef4406SAndrea Arcangeli spin_unlock(ptl1); 885adef4406SAndrea Arcangeli if (ptl1 != ptl2) 886adef4406SAndrea Arcangeli spin_unlock(ptl2); 887adef4406SAndrea Arcangeli else 888adef4406SAndrea Arcangeli __release(ptl2); 889adef4406SAndrea Arcangeli } 890adef4406SAndrea Arcangeli 891adef4406SAndrea Arcangeli 892adef4406SAndrea Arcangeli static int move_present_pte(struct mm_struct *mm, 893adef4406SAndrea Arcangeli struct vm_area_struct *dst_vma, 894adef4406SAndrea Arcangeli struct vm_area_struct *src_vma, 895adef4406SAndrea Arcangeli unsigned long dst_addr, unsigned long src_addr, 896adef4406SAndrea Arcangeli pte_t *dst_pte, pte_t *src_pte, 897adef4406SAndrea Arcangeli pte_t orig_dst_pte, pte_t orig_src_pte, 898adef4406SAndrea Arcangeli spinlock_t *dst_ptl, spinlock_t *src_ptl, 899adef4406SAndrea Arcangeli struct folio *src_folio) 900adef4406SAndrea Arcangeli { 901adef4406SAndrea Arcangeli int err = 0; 902adef4406SAndrea Arcangeli 903adef4406SAndrea Arcangeli double_pt_lock(dst_ptl, src_ptl); 904adef4406SAndrea Arcangeli 905*56ae10cfSRyan Roberts if (!pte_same(ptep_get(src_pte), orig_src_pte) || 906*56ae10cfSRyan Roberts !pte_same(ptep_get(dst_pte), orig_dst_pte)) { 907adef4406SAndrea Arcangeli err = -EAGAIN; 908adef4406SAndrea Arcangeli goto out; 909adef4406SAndrea Arcangeli } 910adef4406SAndrea Arcangeli if (folio_test_large(src_folio) || 911adef4406SAndrea Arcangeli folio_maybe_dma_pinned(src_folio) || 912adef4406SAndrea Arcangeli !PageAnonExclusive(&src_folio->page)) { 913adef4406SAndrea Arcangeli err = -EBUSY; 914adef4406SAndrea Arcangeli goto out; 915adef4406SAndrea Arcangeli } 916adef4406SAndrea Arcangeli 917adef4406SAndrea Arcangeli folio_move_anon_rmap(src_folio, dst_vma); 918adef4406SAndrea Arcangeli WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr)); 919adef4406SAndrea Arcangeli 920adef4406SAndrea Arcangeli orig_src_pte = ptep_clear_flush(src_vma, src_addr, src_pte); 921adef4406SAndrea Arcangeli /* Folio got pinned from under us. Put it back and fail the move. */ 922adef4406SAndrea Arcangeli if (folio_maybe_dma_pinned(src_folio)) { 923adef4406SAndrea Arcangeli set_pte_at(mm, src_addr, src_pte, orig_src_pte); 924adef4406SAndrea Arcangeli err = -EBUSY; 925adef4406SAndrea Arcangeli goto out; 926adef4406SAndrea Arcangeli } 927adef4406SAndrea Arcangeli 928adef4406SAndrea Arcangeli orig_dst_pte = mk_pte(&src_folio->page, dst_vma->vm_page_prot); 929adef4406SAndrea Arcangeli /* Follow mremap() behavior and treat the entry dirty after the move */ 930adef4406SAndrea Arcangeli orig_dst_pte = pte_mkwrite(pte_mkdirty(orig_dst_pte), dst_vma); 931adef4406SAndrea Arcangeli 932adef4406SAndrea Arcangeli set_pte_at(mm, dst_addr, dst_pte, orig_dst_pte); 933adef4406SAndrea Arcangeli out: 934adef4406SAndrea Arcangeli double_pt_unlock(dst_ptl, src_ptl); 935adef4406SAndrea Arcangeli return err; 936adef4406SAndrea Arcangeli } 937adef4406SAndrea Arcangeli 938adef4406SAndrea Arcangeli static int move_swap_pte(struct mm_struct *mm, 939adef4406SAndrea Arcangeli unsigned long dst_addr, unsigned long src_addr, 940adef4406SAndrea Arcangeli pte_t *dst_pte, pte_t *src_pte, 941adef4406SAndrea Arcangeli pte_t orig_dst_pte, pte_t orig_src_pte, 942adef4406SAndrea Arcangeli spinlock_t *dst_ptl, spinlock_t *src_ptl) 943adef4406SAndrea Arcangeli { 944adef4406SAndrea Arcangeli if (!pte_swp_exclusive(orig_src_pte)) 945adef4406SAndrea Arcangeli return -EBUSY; 946adef4406SAndrea Arcangeli 947adef4406SAndrea Arcangeli double_pt_lock(dst_ptl, src_ptl); 948adef4406SAndrea Arcangeli 949*56ae10cfSRyan Roberts if (!pte_same(ptep_get(src_pte), orig_src_pte) || 950*56ae10cfSRyan Roberts !pte_same(ptep_get(dst_pte), orig_dst_pte)) { 951adef4406SAndrea Arcangeli double_pt_unlock(dst_ptl, src_ptl); 952adef4406SAndrea Arcangeli return -EAGAIN; 953adef4406SAndrea Arcangeli } 954adef4406SAndrea Arcangeli 955adef4406SAndrea Arcangeli orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte); 956adef4406SAndrea Arcangeli set_pte_at(mm, dst_addr, dst_pte, orig_src_pte); 957adef4406SAndrea Arcangeli double_pt_unlock(dst_ptl, src_ptl); 958adef4406SAndrea Arcangeli 959adef4406SAndrea Arcangeli return 0; 960adef4406SAndrea Arcangeli } 961adef4406SAndrea Arcangeli 962adef4406SAndrea Arcangeli /* 963adef4406SAndrea Arcangeli * The mmap_lock for reading is held by the caller. Just move the page 964adef4406SAndrea Arcangeli * from src_pmd to dst_pmd if possible, and return true if succeeded 965adef4406SAndrea Arcangeli * in moving the page. 966adef4406SAndrea Arcangeli */ 967adef4406SAndrea Arcangeli static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, 968adef4406SAndrea Arcangeli struct vm_area_struct *dst_vma, 969adef4406SAndrea Arcangeli struct vm_area_struct *src_vma, 970adef4406SAndrea Arcangeli unsigned long dst_addr, unsigned long src_addr, 971adef4406SAndrea Arcangeli __u64 mode) 972adef4406SAndrea Arcangeli { 973adef4406SAndrea Arcangeli swp_entry_t entry; 974adef4406SAndrea Arcangeli pte_t orig_src_pte, orig_dst_pte; 975adef4406SAndrea Arcangeli pte_t src_folio_pte; 976adef4406SAndrea Arcangeli spinlock_t *src_ptl, *dst_ptl; 977adef4406SAndrea Arcangeli pte_t *src_pte = NULL; 978adef4406SAndrea Arcangeli pte_t *dst_pte = NULL; 979adef4406SAndrea Arcangeli 980adef4406SAndrea Arcangeli struct folio *src_folio = NULL; 981adef4406SAndrea Arcangeli struct anon_vma *src_anon_vma = NULL; 982adef4406SAndrea Arcangeli struct mmu_notifier_range range; 983adef4406SAndrea Arcangeli int err = 0; 984adef4406SAndrea Arcangeli 985adef4406SAndrea Arcangeli flush_cache_range(src_vma, src_addr, src_addr + PAGE_SIZE); 986adef4406SAndrea Arcangeli mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, 987adef4406SAndrea Arcangeli src_addr, src_addr + PAGE_SIZE); 988adef4406SAndrea Arcangeli mmu_notifier_invalidate_range_start(&range); 989adef4406SAndrea Arcangeli retry: 990adef4406SAndrea Arcangeli dst_pte = pte_offset_map_nolock(mm, dst_pmd, dst_addr, &dst_ptl); 991adef4406SAndrea Arcangeli 992adef4406SAndrea Arcangeli /* Retry if a huge pmd materialized from under us */ 993adef4406SAndrea Arcangeli if (unlikely(!dst_pte)) { 994adef4406SAndrea Arcangeli err = -EAGAIN; 995adef4406SAndrea Arcangeli goto out; 996adef4406SAndrea Arcangeli } 997adef4406SAndrea Arcangeli 998adef4406SAndrea Arcangeli src_pte = pte_offset_map_nolock(mm, src_pmd, src_addr, &src_ptl); 999adef4406SAndrea Arcangeli 1000adef4406SAndrea Arcangeli /* 1001adef4406SAndrea Arcangeli * We held the mmap_lock for reading so MADV_DONTNEED 1002adef4406SAndrea Arcangeli * can zap transparent huge pages under us, or the 1003adef4406SAndrea Arcangeli * transparent huge page fault can establish new 1004adef4406SAndrea Arcangeli * transparent huge pages under us. 1005adef4406SAndrea Arcangeli */ 1006adef4406SAndrea Arcangeli if (unlikely(!src_pte)) { 1007adef4406SAndrea Arcangeli err = -EAGAIN; 1008adef4406SAndrea Arcangeli goto out; 1009adef4406SAndrea Arcangeli } 1010adef4406SAndrea Arcangeli 1011adef4406SAndrea Arcangeli /* Sanity checks before the operation */ 1012adef4406SAndrea Arcangeli if (WARN_ON_ONCE(pmd_none(*dst_pmd)) || WARN_ON_ONCE(pmd_none(*src_pmd)) || 1013adef4406SAndrea Arcangeli WARN_ON_ONCE(pmd_trans_huge(*dst_pmd)) || WARN_ON_ONCE(pmd_trans_huge(*src_pmd))) { 1014adef4406SAndrea Arcangeli err = -EINVAL; 1015adef4406SAndrea Arcangeli goto out; 1016adef4406SAndrea Arcangeli } 1017adef4406SAndrea Arcangeli 1018adef4406SAndrea Arcangeli spin_lock(dst_ptl); 1019*56ae10cfSRyan Roberts orig_dst_pte = ptep_get(dst_pte); 1020adef4406SAndrea Arcangeli spin_unlock(dst_ptl); 1021adef4406SAndrea Arcangeli if (!pte_none(orig_dst_pte)) { 1022adef4406SAndrea Arcangeli err = -EEXIST; 1023adef4406SAndrea Arcangeli goto out; 1024adef4406SAndrea Arcangeli } 1025adef4406SAndrea Arcangeli 1026adef4406SAndrea Arcangeli spin_lock(src_ptl); 1027*56ae10cfSRyan Roberts orig_src_pte = ptep_get(src_pte); 1028adef4406SAndrea Arcangeli spin_unlock(src_ptl); 1029adef4406SAndrea Arcangeli if (pte_none(orig_src_pte)) { 1030adef4406SAndrea Arcangeli if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) 1031adef4406SAndrea Arcangeli err = -ENOENT; 1032adef4406SAndrea Arcangeli else /* nothing to do to move a hole */ 1033adef4406SAndrea Arcangeli err = 0; 1034adef4406SAndrea Arcangeli goto out; 1035adef4406SAndrea Arcangeli } 1036adef4406SAndrea Arcangeli 1037adef4406SAndrea Arcangeli /* If PTE changed after we locked the folio them start over */ 1038adef4406SAndrea Arcangeli if (src_folio && unlikely(!pte_same(src_folio_pte, orig_src_pte))) { 1039adef4406SAndrea Arcangeli err = -EAGAIN; 1040adef4406SAndrea Arcangeli goto out; 1041adef4406SAndrea Arcangeli } 1042adef4406SAndrea Arcangeli 1043adef4406SAndrea Arcangeli if (pte_present(orig_src_pte)) { 1044adef4406SAndrea Arcangeli /* 1045adef4406SAndrea Arcangeli * Pin and lock both source folio and anon_vma. Since we are in 1046adef4406SAndrea Arcangeli * RCU read section, we can't block, so on contention have to 1047adef4406SAndrea Arcangeli * unmap the ptes, obtain the lock and retry. 1048adef4406SAndrea Arcangeli */ 1049adef4406SAndrea Arcangeli if (!src_folio) { 1050adef4406SAndrea Arcangeli struct folio *folio; 1051adef4406SAndrea Arcangeli 1052adef4406SAndrea Arcangeli /* 1053adef4406SAndrea Arcangeli * Pin the page while holding the lock to be sure the 1054adef4406SAndrea Arcangeli * page isn't freed under us 1055adef4406SAndrea Arcangeli */ 1056adef4406SAndrea Arcangeli spin_lock(src_ptl); 1057*56ae10cfSRyan Roberts if (!pte_same(orig_src_pte, ptep_get(src_pte))) { 1058adef4406SAndrea Arcangeli spin_unlock(src_ptl); 1059adef4406SAndrea Arcangeli err = -EAGAIN; 1060adef4406SAndrea Arcangeli goto out; 1061adef4406SAndrea Arcangeli } 1062adef4406SAndrea Arcangeli 1063adef4406SAndrea Arcangeli folio = vm_normal_folio(src_vma, src_addr, orig_src_pte); 1064adef4406SAndrea Arcangeli if (!folio || !PageAnonExclusive(&folio->page)) { 1065adef4406SAndrea Arcangeli spin_unlock(src_ptl); 1066adef4406SAndrea Arcangeli err = -EBUSY; 1067adef4406SAndrea Arcangeli goto out; 1068adef4406SAndrea Arcangeli } 1069adef4406SAndrea Arcangeli 1070adef4406SAndrea Arcangeli folio_get(folio); 1071adef4406SAndrea Arcangeli src_folio = folio; 1072adef4406SAndrea Arcangeli src_folio_pte = orig_src_pte; 1073adef4406SAndrea Arcangeli spin_unlock(src_ptl); 1074adef4406SAndrea Arcangeli 1075adef4406SAndrea Arcangeli if (!folio_trylock(src_folio)) { 1076adef4406SAndrea Arcangeli pte_unmap(&orig_src_pte); 1077adef4406SAndrea Arcangeli pte_unmap(&orig_dst_pte); 1078adef4406SAndrea Arcangeli src_pte = dst_pte = NULL; 1079adef4406SAndrea Arcangeli /* now we can block and wait */ 1080adef4406SAndrea Arcangeli folio_lock(src_folio); 1081adef4406SAndrea Arcangeli goto retry; 1082adef4406SAndrea Arcangeli } 1083adef4406SAndrea Arcangeli 1084adef4406SAndrea Arcangeli if (WARN_ON_ONCE(!folio_test_anon(src_folio))) { 1085adef4406SAndrea Arcangeli err = -EBUSY; 1086adef4406SAndrea Arcangeli goto out; 1087adef4406SAndrea Arcangeli } 1088adef4406SAndrea Arcangeli } 1089adef4406SAndrea Arcangeli 1090adef4406SAndrea Arcangeli /* at this point we have src_folio locked */ 1091adef4406SAndrea Arcangeli if (folio_test_large(src_folio)) { 1092982ae058SSuren Baghdasaryan /* split_folio() can block */ 1093982ae058SSuren Baghdasaryan pte_unmap(&orig_src_pte); 1094982ae058SSuren Baghdasaryan pte_unmap(&orig_dst_pte); 1095982ae058SSuren Baghdasaryan src_pte = dst_pte = NULL; 1096adef4406SAndrea Arcangeli err = split_folio(src_folio); 1097adef4406SAndrea Arcangeli if (err) 1098adef4406SAndrea Arcangeli goto out; 1099982ae058SSuren Baghdasaryan /* have to reacquire the folio after it got split */ 1100982ae058SSuren Baghdasaryan folio_unlock(src_folio); 1101982ae058SSuren Baghdasaryan folio_put(src_folio); 1102982ae058SSuren Baghdasaryan src_folio = NULL; 1103982ae058SSuren Baghdasaryan goto retry; 1104adef4406SAndrea Arcangeli } 1105adef4406SAndrea Arcangeli 1106adef4406SAndrea Arcangeli if (!src_anon_vma) { 1107adef4406SAndrea Arcangeli /* 1108adef4406SAndrea Arcangeli * folio_referenced walks the anon_vma chain 1109adef4406SAndrea Arcangeli * without the folio lock. Serialize against it with 1110adef4406SAndrea Arcangeli * the anon_vma lock, the folio lock is not enough. 1111adef4406SAndrea Arcangeli */ 1112adef4406SAndrea Arcangeli src_anon_vma = folio_get_anon_vma(src_folio); 1113adef4406SAndrea Arcangeli if (!src_anon_vma) { 1114adef4406SAndrea Arcangeli /* page was unmapped from under us */ 1115adef4406SAndrea Arcangeli err = -EAGAIN; 1116adef4406SAndrea Arcangeli goto out; 1117adef4406SAndrea Arcangeli } 1118adef4406SAndrea Arcangeli if (!anon_vma_trylock_write(src_anon_vma)) { 1119adef4406SAndrea Arcangeli pte_unmap(&orig_src_pte); 1120adef4406SAndrea Arcangeli pte_unmap(&orig_dst_pte); 1121adef4406SAndrea Arcangeli src_pte = dst_pte = NULL; 1122adef4406SAndrea Arcangeli /* now we can block and wait */ 1123adef4406SAndrea Arcangeli anon_vma_lock_write(src_anon_vma); 1124adef4406SAndrea Arcangeli goto retry; 1125adef4406SAndrea Arcangeli } 1126adef4406SAndrea Arcangeli } 1127adef4406SAndrea Arcangeli 1128adef4406SAndrea Arcangeli err = move_present_pte(mm, dst_vma, src_vma, 1129adef4406SAndrea Arcangeli dst_addr, src_addr, dst_pte, src_pte, 1130adef4406SAndrea Arcangeli orig_dst_pte, orig_src_pte, 1131adef4406SAndrea Arcangeli dst_ptl, src_ptl, src_folio); 1132adef4406SAndrea Arcangeli } else { 1133adef4406SAndrea Arcangeli entry = pte_to_swp_entry(orig_src_pte); 1134adef4406SAndrea Arcangeli if (non_swap_entry(entry)) { 1135adef4406SAndrea Arcangeli if (is_migration_entry(entry)) { 1136adef4406SAndrea Arcangeli pte_unmap(&orig_src_pte); 1137adef4406SAndrea Arcangeli pte_unmap(&orig_dst_pte); 1138adef4406SAndrea Arcangeli src_pte = dst_pte = NULL; 1139adef4406SAndrea Arcangeli migration_entry_wait(mm, src_pmd, src_addr); 1140adef4406SAndrea Arcangeli err = -EAGAIN; 1141adef4406SAndrea Arcangeli } else 1142adef4406SAndrea Arcangeli err = -EFAULT; 1143adef4406SAndrea Arcangeli goto out; 1144adef4406SAndrea Arcangeli } 1145adef4406SAndrea Arcangeli 1146adef4406SAndrea Arcangeli err = move_swap_pte(mm, dst_addr, src_addr, 1147adef4406SAndrea Arcangeli dst_pte, src_pte, 1148adef4406SAndrea Arcangeli orig_dst_pte, orig_src_pte, 1149adef4406SAndrea Arcangeli dst_ptl, src_ptl); 1150adef4406SAndrea Arcangeli } 1151adef4406SAndrea Arcangeli 1152adef4406SAndrea Arcangeli out: 1153adef4406SAndrea Arcangeli if (src_anon_vma) { 1154adef4406SAndrea Arcangeli anon_vma_unlock_write(src_anon_vma); 1155adef4406SAndrea Arcangeli put_anon_vma(src_anon_vma); 1156adef4406SAndrea Arcangeli } 1157adef4406SAndrea Arcangeli if (src_folio) { 1158adef4406SAndrea Arcangeli folio_unlock(src_folio); 1159adef4406SAndrea Arcangeli folio_put(src_folio); 1160adef4406SAndrea Arcangeli } 1161adef4406SAndrea Arcangeli if (dst_pte) 1162adef4406SAndrea Arcangeli pte_unmap(dst_pte); 1163adef4406SAndrea Arcangeli if (src_pte) 1164adef4406SAndrea Arcangeli pte_unmap(src_pte); 1165adef4406SAndrea Arcangeli mmu_notifier_invalidate_range_end(&range); 1166adef4406SAndrea Arcangeli 1167adef4406SAndrea Arcangeli return err; 1168adef4406SAndrea Arcangeli } 1169adef4406SAndrea Arcangeli 1170adef4406SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1171adef4406SAndrea Arcangeli static inline bool move_splits_huge_pmd(unsigned long dst_addr, 1172adef4406SAndrea Arcangeli unsigned long src_addr, 1173adef4406SAndrea Arcangeli unsigned long src_end) 1174adef4406SAndrea Arcangeli { 1175adef4406SAndrea Arcangeli return (src_addr & ~HPAGE_PMD_MASK) || (dst_addr & ~HPAGE_PMD_MASK) || 1176adef4406SAndrea Arcangeli src_end - src_addr < HPAGE_PMD_SIZE; 1177adef4406SAndrea Arcangeli } 1178adef4406SAndrea Arcangeli #else 1179adef4406SAndrea Arcangeli static inline bool move_splits_huge_pmd(unsigned long dst_addr, 1180adef4406SAndrea Arcangeli unsigned long src_addr, 1181adef4406SAndrea Arcangeli unsigned long src_end) 1182adef4406SAndrea Arcangeli { 1183adef4406SAndrea Arcangeli /* This is unreachable anyway, just to avoid warnings when HPAGE_PMD_SIZE==0 */ 1184adef4406SAndrea Arcangeli return false; 1185adef4406SAndrea Arcangeli } 1186adef4406SAndrea Arcangeli #endif 1187adef4406SAndrea Arcangeli 1188adef4406SAndrea Arcangeli static inline bool vma_move_compatible(struct vm_area_struct *vma) 1189adef4406SAndrea Arcangeli { 1190adef4406SAndrea Arcangeli return !(vma->vm_flags & (VM_PFNMAP | VM_IO | VM_HUGETLB | 1191adef4406SAndrea Arcangeli VM_MIXEDMAP | VM_SHADOW_STACK)); 1192adef4406SAndrea Arcangeli } 1193adef4406SAndrea Arcangeli 1194adef4406SAndrea Arcangeli static int validate_move_areas(struct userfaultfd_ctx *ctx, 1195adef4406SAndrea Arcangeli struct vm_area_struct *src_vma, 1196adef4406SAndrea Arcangeli struct vm_area_struct *dst_vma) 1197adef4406SAndrea Arcangeli { 1198adef4406SAndrea Arcangeli /* Only allow moving if both have the same access and protection */ 1199adef4406SAndrea Arcangeli if ((src_vma->vm_flags & VM_ACCESS_FLAGS) != (dst_vma->vm_flags & VM_ACCESS_FLAGS) || 1200adef4406SAndrea Arcangeli pgprot_val(src_vma->vm_page_prot) != pgprot_val(dst_vma->vm_page_prot)) 1201adef4406SAndrea Arcangeli return -EINVAL; 1202adef4406SAndrea Arcangeli 1203adef4406SAndrea Arcangeli /* Only allow moving if both are mlocked or both aren't */ 1204adef4406SAndrea Arcangeli if ((src_vma->vm_flags & VM_LOCKED) != (dst_vma->vm_flags & VM_LOCKED)) 1205adef4406SAndrea Arcangeli return -EINVAL; 1206adef4406SAndrea Arcangeli 1207adef4406SAndrea Arcangeli /* 1208adef4406SAndrea Arcangeli * For now, we keep it simple and only move between writable VMAs. 1209adef4406SAndrea Arcangeli * Access flags are equal, therefore cheching only the source is enough. 1210adef4406SAndrea Arcangeli */ 1211adef4406SAndrea Arcangeli if (!(src_vma->vm_flags & VM_WRITE)) 1212adef4406SAndrea Arcangeli return -EINVAL; 1213adef4406SAndrea Arcangeli 1214adef4406SAndrea Arcangeli /* Check if vma flags indicate content which can be moved */ 1215adef4406SAndrea Arcangeli if (!vma_move_compatible(src_vma) || !vma_move_compatible(dst_vma)) 1216adef4406SAndrea Arcangeli return -EINVAL; 1217adef4406SAndrea Arcangeli 1218adef4406SAndrea Arcangeli /* Ensure dst_vma is registered in uffd we are operating on */ 1219adef4406SAndrea Arcangeli if (!dst_vma->vm_userfaultfd_ctx.ctx || 1220adef4406SAndrea Arcangeli dst_vma->vm_userfaultfd_ctx.ctx != ctx) 1221adef4406SAndrea Arcangeli return -EINVAL; 1222adef4406SAndrea Arcangeli 1223adef4406SAndrea Arcangeli /* Only allow moving across anonymous vmas */ 1224adef4406SAndrea Arcangeli if (!vma_is_anonymous(src_vma) || !vma_is_anonymous(dst_vma)) 1225adef4406SAndrea Arcangeli return -EINVAL; 1226adef4406SAndrea Arcangeli 1227adef4406SAndrea Arcangeli /* 1228adef4406SAndrea Arcangeli * Ensure the dst_vma has a anon_vma or this page 1229adef4406SAndrea Arcangeli * would get a NULL anon_vma when moved in the 1230adef4406SAndrea Arcangeli * dst_vma. 1231adef4406SAndrea Arcangeli */ 1232adef4406SAndrea Arcangeli if (unlikely(anon_vma_prepare(dst_vma))) 1233adef4406SAndrea Arcangeli return -ENOMEM; 1234adef4406SAndrea Arcangeli 1235adef4406SAndrea Arcangeli return 0; 1236adef4406SAndrea Arcangeli } 1237adef4406SAndrea Arcangeli 1238adef4406SAndrea Arcangeli /** 1239adef4406SAndrea Arcangeli * move_pages - move arbitrary anonymous pages of an existing vma 1240adef4406SAndrea Arcangeli * @ctx: pointer to the userfaultfd context 1241adef4406SAndrea Arcangeli * @mm: the address space to move pages 1242adef4406SAndrea Arcangeli * @dst_start: start of the destination virtual memory range 1243adef4406SAndrea Arcangeli * @src_start: start of the source virtual memory range 1244adef4406SAndrea Arcangeli * @len: length of the virtual memory range 1245adef4406SAndrea Arcangeli * @mode: flags from uffdio_move.mode 1246adef4406SAndrea Arcangeli * 1247adef4406SAndrea Arcangeli * Must be called with mmap_lock held for read. 1248adef4406SAndrea Arcangeli * 1249adef4406SAndrea Arcangeli * move_pages() remaps arbitrary anonymous pages atomically in zero 1250adef4406SAndrea Arcangeli * copy. It only works on non shared anonymous pages because those can 1251adef4406SAndrea Arcangeli * be relocated without generating non linear anon_vmas in the rmap 1252adef4406SAndrea Arcangeli * code. 1253adef4406SAndrea Arcangeli * 1254adef4406SAndrea Arcangeli * It provides a zero copy mechanism to handle userspace page faults. 1255adef4406SAndrea Arcangeli * The source vma pages should have mapcount == 1, which can be 1256adef4406SAndrea Arcangeli * enforced by using madvise(MADV_DONTFORK) on src vma. 1257adef4406SAndrea Arcangeli * 1258adef4406SAndrea Arcangeli * The thread receiving the page during the userland page fault 1259adef4406SAndrea Arcangeli * will receive the faulting page in the source vma through the network, 1260adef4406SAndrea Arcangeli * storage or any other I/O device (MADV_DONTFORK in the source vma 1261adef4406SAndrea Arcangeli * avoids move_pages() to fail with -EBUSY if the process forks before 1262adef4406SAndrea Arcangeli * move_pages() is called), then it will call move_pages() to map the 1263adef4406SAndrea Arcangeli * page in the faulting address in the destination vma. 1264adef4406SAndrea Arcangeli * 1265adef4406SAndrea Arcangeli * This userfaultfd command works purely via pagetables, so it's the 1266adef4406SAndrea Arcangeli * most efficient way to move physical non shared anonymous pages 1267adef4406SAndrea Arcangeli * across different virtual addresses. Unlike mremap()/mmap()/munmap() 1268adef4406SAndrea Arcangeli * it does not create any new vmas. The mapping in the destination 1269adef4406SAndrea Arcangeli * address is atomic. 1270adef4406SAndrea Arcangeli * 1271adef4406SAndrea Arcangeli * It only works if the vma protection bits are identical from the 1272adef4406SAndrea Arcangeli * source and destination vma. 1273adef4406SAndrea Arcangeli * 1274adef4406SAndrea Arcangeli * It can remap non shared anonymous pages within the same vma too. 1275adef4406SAndrea Arcangeli * 1276adef4406SAndrea Arcangeli * If the source virtual memory range has any unmapped holes, or if 1277adef4406SAndrea Arcangeli * the destination virtual memory range is not a whole unmapped hole, 1278adef4406SAndrea Arcangeli * move_pages() will fail respectively with -ENOENT or -EEXIST. This 1279adef4406SAndrea Arcangeli * provides a very strict behavior to avoid any chance of memory 1280adef4406SAndrea Arcangeli * corruption going unnoticed if there are userland race conditions. 1281adef4406SAndrea Arcangeli * Only one thread should resolve the userland page fault at any given 1282adef4406SAndrea Arcangeli * time for any given faulting address. This means that if two threads 1283adef4406SAndrea Arcangeli * try to both call move_pages() on the same destination address at the 1284adef4406SAndrea Arcangeli * same time, the second thread will get an explicit error from this 1285adef4406SAndrea Arcangeli * command. 1286adef4406SAndrea Arcangeli * 1287adef4406SAndrea Arcangeli * The command retval will return "len" is successful. The command 1288adef4406SAndrea Arcangeli * however can be interrupted by fatal signals or errors. If 1289adef4406SAndrea Arcangeli * interrupted it will return the number of bytes successfully 1290adef4406SAndrea Arcangeli * remapped before the interruption if any, or the negative error if 1291adef4406SAndrea Arcangeli * none. It will never return zero. Either it will return an error or 1292adef4406SAndrea Arcangeli * an amount of bytes successfully moved. If the retval reports a 1293adef4406SAndrea Arcangeli * "short" remap, the move_pages() command should be repeated by 1294adef4406SAndrea Arcangeli * userland with src+retval, dst+reval, len-retval if it wants to know 1295adef4406SAndrea Arcangeli * about the error that interrupted it. 1296adef4406SAndrea Arcangeli * 1297adef4406SAndrea Arcangeli * The UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES flag can be specified to 1298adef4406SAndrea Arcangeli * prevent -ENOENT errors to materialize if there are holes in the 1299adef4406SAndrea Arcangeli * source virtual range that is being remapped. The holes will be 1300adef4406SAndrea Arcangeli * accounted as successfully remapped in the retval of the 1301adef4406SAndrea Arcangeli * command. This is mostly useful to remap hugepage naturally aligned 1302adef4406SAndrea Arcangeli * virtual regions without knowing if there are transparent hugepage 1303adef4406SAndrea Arcangeli * in the regions or not, but preventing the risk of having to split 1304adef4406SAndrea Arcangeli * the hugepmd during the remap. 1305adef4406SAndrea Arcangeli * 1306adef4406SAndrea Arcangeli * If there's any rmap walk that is taking the anon_vma locks without 1307adef4406SAndrea Arcangeli * first obtaining the folio lock (the only current instance is 1308adef4406SAndrea Arcangeli * folio_referenced), they will have to verify if the folio->mapping 1309adef4406SAndrea Arcangeli * has changed after taking the anon_vma lock. If it changed they 1310adef4406SAndrea Arcangeli * should release the lock and retry obtaining a new anon_vma, because 1311adef4406SAndrea Arcangeli * it means the anon_vma was changed by move_pages() before the lock 1312adef4406SAndrea Arcangeli * could be obtained. This is the only additional complexity added to 1313adef4406SAndrea Arcangeli * the rmap code to provide this anonymous page remapping functionality. 1314adef4406SAndrea Arcangeli */ 1315adef4406SAndrea Arcangeli ssize_t move_pages(struct userfaultfd_ctx *ctx, struct mm_struct *mm, 1316adef4406SAndrea Arcangeli unsigned long dst_start, unsigned long src_start, 1317adef4406SAndrea Arcangeli unsigned long len, __u64 mode) 1318adef4406SAndrea Arcangeli { 1319adef4406SAndrea Arcangeli struct vm_area_struct *src_vma, *dst_vma; 1320adef4406SAndrea Arcangeli unsigned long src_addr, dst_addr; 1321adef4406SAndrea Arcangeli pmd_t *src_pmd, *dst_pmd; 1322adef4406SAndrea Arcangeli long err = -EINVAL; 1323adef4406SAndrea Arcangeli ssize_t moved = 0; 1324adef4406SAndrea Arcangeli 1325adef4406SAndrea Arcangeli /* Sanitize the command parameters. */ 1326adef4406SAndrea Arcangeli if (WARN_ON_ONCE(src_start & ~PAGE_MASK) || 1327adef4406SAndrea Arcangeli WARN_ON_ONCE(dst_start & ~PAGE_MASK) || 1328adef4406SAndrea Arcangeli WARN_ON_ONCE(len & ~PAGE_MASK)) 1329adef4406SAndrea Arcangeli goto out; 1330adef4406SAndrea Arcangeli 1331adef4406SAndrea Arcangeli /* Does the address range wrap, or is the span zero-sized? */ 1332adef4406SAndrea Arcangeli if (WARN_ON_ONCE(src_start + len <= src_start) || 1333adef4406SAndrea Arcangeli WARN_ON_ONCE(dst_start + len <= dst_start)) 1334adef4406SAndrea Arcangeli goto out; 1335adef4406SAndrea Arcangeli 1336adef4406SAndrea Arcangeli /* 1337adef4406SAndrea Arcangeli * Make sure the vma is not shared, that the src and dst remap 1338adef4406SAndrea Arcangeli * ranges are both valid and fully within a single existing 1339adef4406SAndrea Arcangeli * vma. 1340adef4406SAndrea Arcangeli */ 1341adef4406SAndrea Arcangeli src_vma = find_vma(mm, src_start); 1342adef4406SAndrea Arcangeli if (!src_vma || (src_vma->vm_flags & VM_SHARED)) 1343adef4406SAndrea Arcangeli goto out; 1344adef4406SAndrea Arcangeli if (src_start < src_vma->vm_start || 1345adef4406SAndrea Arcangeli src_start + len > src_vma->vm_end) 1346adef4406SAndrea Arcangeli goto out; 1347adef4406SAndrea Arcangeli 1348adef4406SAndrea Arcangeli dst_vma = find_vma(mm, dst_start); 1349adef4406SAndrea Arcangeli if (!dst_vma || (dst_vma->vm_flags & VM_SHARED)) 1350adef4406SAndrea Arcangeli goto out; 1351adef4406SAndrea Arcangeli if (dst_start < dst_vma->vm_start || 1352adef4406SAndrea Arcangeli dst_start + len > dst_vma->vm_end) 1353adef4406SAndrea Arcangeli goto out; 1354adef4406SAndrea Arcangeli 1355adef4406SAndrea Arcangeli err = validate_move_areas(ctx, src_vma, dst_vma); 1356adef4406SAndrea Arcangeli if (err) 1357adef4406SAndrea Arcangeli goto out; 1358adef4406SAndrea Arcangeli 1359adef4406SAndrea Arcangeli for (src_addr = src_start, dst_addr = dst_start; 1360adef4406SAndrea Arcangeli src_addr < src_start + len;) { 1361adef4406SAndrea Arcangeli spinlock_t *ptl; 1362adef4406SAndrea Arcangeli pmd_t dst_pmdval; 1363adef4406SAndrea Arcangeli unsigned long step_size; 1364adef4406SAndrea Arcangeli 1365adef4406SAndrea Arcangeli /* 1366adef4406SAndrea Arcangeli * Below works because anonymous area would not have a 1367adef4406SAndrea Arcangeli * transparent huge PUD. If file-backed support is added, 1368adef4406SAndrea Arcangeli * that case would need to be handled here. 1369adef4406SAndrea Arcangeli */ 1370adef4406SAndrea Arcangeli src_pmd = mm_find_pmd(mm, src_addr); 1371adef4406SAndrea Arcangeli if (unlikely(!src_pmd)) { 1372adef4406SAndrea Arcangeli if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) { 1373adef4406SAndrea Arcangeli err = -ENOENT; 1374adef4406SAndrea Arcangeli break; 1375adef4406SAndrea Arcangeli } 1376adef4406SAndrea Arcangeli src_pmd = mm_alloc_pmd(mm, src_addr); 1377adef4406SAndrea Arcangeli if (unlikely(!src_pmd)) { 1378adef4406SAndrea Arcangeli err = -ENOMEM; 1379adef4406SAndrea Arcangeli break; 1380adef4406SAndrea Arcangeli } 1381adef4406SAndrea Arcangeli } 1382adef4406SAndrea Arcangeli dst_pmd = mm_alloc_pmd(mm, dst_addr); 1383adef4406SAndrea Arcangeli if (unlikely(!dst_pmd)) { 1384adef4406SAndrea Arcangeli err = -ENOMEM; 1385adef4406SAndrea Arcangeli break; 1386adef4406SAndrea Arcangeli } 1387adef4406SAndrea Arcangeli 1388adef4406SAndrea Arcangeli dst_pmdval = pmdp_get_lockless(dst_pmd); 1389adef4406SAndrea Arcangeli /* 1390adef4406SAndrea Arcangeli * If the dst_pmd is mapped as THP don't override it and just 1391adef4406SAndrea Arcangeli * be strict. If dst_pmd changes into TPH after this check, the 1392adef4406SAndrea Arcangeli * move_pages_huge_pmd() will detect the change and retry 1393adef4406SAndrea Arcangeli * while move_pages_pte() will detect the change and fail. 1394adef4406SAndrea Arcangeli */ 1395adef4406SAndrea Arcangeli if (unlikely(pmd_trans_huge(dst_pmdval))) { 1396adef4406SAndrea Arcangeli err = -EEXIST; 1397adef4406SAndrea Arcangeli break; 1398adef4406SAndrea Arcangeli } 1399adef4406SAndrea Arcangeli 1400adef4406SAndrea Arcangeli ptl = pmd_trans_huge_lock(src_pmd, src_vma); 1401adef4406SAndrea Arcangeli if (ptl) { 1402adef4406SAndrea Arcangeli if (pmd_devmap(*src_pmd)) { 1403adef4406SAndrea Arcangeli spin_unlock(ptl); 1404adef4406SAndrea Arcangeli err = -ENOENT; 1405adef4406SAndrea Arcangeli break; 1406adef4406SAndrea Arcangeli } 14075d4747a6SSuren Baghdasaryan /* Avoid moving zeropages for now */ 14085d4747a6SSuren Baghdasaryan if (is_huge_zero_pmd(*src_pmd)) { 14095d4747a6SSuren Baghdasaryan spin_unlock(ptl); 14105d4747a6SSuren Baghdasaryan err = -EBUSY; 14115d4747a6SSuren Baghdasaryan break; 14125d4747a6SSuren Baghdasaryan } 1413adef4406SAndrea Arcangeli 1414adef4406SAndrea Arcangeli /* Check if we can move the pmd without splitting it. */ 1415adef4406SAndrea Arcangeli if (move_splits_huge_pmd(dst_addr, src_addr, src_start + len) || 1416adef4406SAndrea Arcangeli !pmd_none(dst_pmdval)) { 1417adef4406SAndrea Arcangeli struct folio *folio = pfn_folio(pmd_pfn(*src_pmd)); 1418adef4406SAndrea Arcangeli 1419adef4406SAndrea Arcangeli if (!folio || !PageAnonExclusive(&folio->page)) { 1420adef4406SAndrea Arcangeli spin_unlock(ptl); 1421adef4406SAndrea Arcangeli err = -EBUSY; 1422adef4406SAndrea Arcangeli break; 1423adef4406SAndrea Arcangeli } 1424adef4406SAndrea Arcangeli 1425adef4406SAndrea Arcangeli spin_unlock(ptl); 1426adef4406SAndrea Arcangeli split_huge_pmd(src_vma, src_pmd, src_addr); 1427adef4406SAndrea Arcangeli /* The folio will be split by move_pages_pte() */ 1428adef4406SAndrea Arcangeli continue; 1429adef4406SAndrea Arcangeli } 1430adef4406SAndrea Arcangeli 1431adef4406SAndrea Arcangeli err = move_pages_huge_pmd(mm, dst_pmd, src_pmd, 1432adef4406SAndrea Arcangeli dst_pmdval, dst_vma, src_vma, 1433adef4406SAndrea Arcangeli dst_addr, src_addr); 1434adef4406SAndrea Arcangeli step_size = HPAGE_PMD_SIZE; 1435adef4406SAndrea Arcangeli } else { 1436adef4406SAndrea Arcangeli if (pmd_none(*src_pmd)) { 1437adef4406SAndrea Arcangeli if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) { 1438adef4406SAndrea Arcangeli err = -ENOENT; 1439adef4406SAndrea Arcangeli break; 1440adef4406SAndrea Arcangeli } 1441adef4406SAndrea Arcangeli if (unlikely(__pte_alloc(mm, src_pmd))) { 1442adef4406SAndrea Arcangeli err = -ENOMEM; 1443adef4406SAndrea Arcangeli break; 1444adef4406SAndrea Arcangeli } 1445adef4406SAndrea Arcangeli } 1446adef4406SAndrea Arcangeli 1447adef4406SAndrea Arcangeli if (unlikely(pte_alloc(mm, dst_pmd))) { 1448adef4406SAndrea Arcangeli err = -ENOMEM; 1449adef4406SAndrea Arcangeli break; 1450adef4406SAndrea Arcangeli } 1451adef4406SAndrea Arcangeli 1452adef4406SAndrea Arcangeli err = move_pages_pte(mm, dst_pmd, src_pmd, 1453adef4406SAndrea Arcangeli dst_vma, src_vma, 1454adef4406SAndrea Arcangeli dst_addr, src_addr, mode); 1455adef4406SAndrea Arcangeli step_size = PAGE_SIZE; 1456adef4406SAndrea Arcangeli } 1457adef4406SAndrea Arcangeli 1458adef4406SAndrea Arcangeli cond_resched(); 1459adef4406SAndrea Arcangeli 1460adef4406SAndrea Arcangeli if (fatal_signal_pending(current)) { 1461adef4406SAndrea Arcangeli /* Do not override an error */ 1462adef4406SAndrea Arcangeli if (!err || err == -EAGAIN) 1463adef4406SAndrea Arcangeli err = -EINTR; 1464adef4406SAndrea Arcangeli break; 1465adef4406SAndrea Arcangeli } 1466adef4406SAndrea Arcangeli 1467adef4406SAndrea Arcangeli if (err) { 1468adef4406SAndrea Arcangeli if (err == -EAGAIN) 1469adef4406SAndrea Arcangeli continue; 1470adef4406SAndrea Arcangeli break; 1471adef4406SAndrea Arcangeli } 1472adef4406SAndrea Arcangeli 1473adef4406SAndrea Arcangeli /* Proceed to the next page */ 1474adef4406SAndrea Arcangeli dst_addr += step_size; 1475adef4406SAndrea Arcangeli src_addr += step_size; 1476adef4406SAndrea Arcangeli moved += step_size; 1477adef4406SAndrea Arcangeli } 1478adef4406SAndrea Arcangeli 1479adef4406SAndrea Arcangeli out: 1480adef4406SAndrea Arcangeli VM_WARN_ON(moved < 0); 1481adef4406SAndrea Arcangeli VM_WARN_ON(err > 0); 1482adef4406SAndrea Arcangeli VM_WARN_ON(!moved && !err); 1483adef4406SAndrea Arcangeli return moved ? moved : err; 1484adef4406SAndrea Arcangeli } 1485