xref: /linux/mm/userfaultfd.c (revision adef440691bab824e39c1b17382322d195e1fab0)
120c8ccb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2c1a4de99SAndrea Arcangeli /*
3c1a4de99SAndrea Arcangeli  *  mm/userfaultfd.c
4c1a4de99SAndrea Arcangeli  *
5c1a4de99SAndrea Arcangeli  *  Copyright (C) 2015  Red Hat, Inc.
6c1a4de99SAndrea Arcangeli  */
7c1a4de99SAndrea Arcangeli 
8c1a4de99SAndrea Arcangeli #include <linux/mm.h>
9174cd4b1SIngo Molnar #include <linux/sched/signal.h>
10c1a4de99SAndrea Arcangeli #include <linux/pagemap.h>
11c1a4de99SAndrea Arcangeli #include <linux/rmap.h>
12c1a4de99SAndrea Arcangeli #include <linux/swap.h>
13c1a4de99SAndrea Arcangeli #include <linux/swapops.h>
14c1a4de99SAndrea Arcangeli #include <linux/userfaultfd_k.h>
15c1a4de99SAndrea Arcangeli #include <linux/mmu_notifier.h>
1660d4d2d2SMike Kravetz #include <linux/hugetlb.h>
1726071cedSMike Rapoport #include <linux/shmem_fs.h>
18c1a4de99SAndrea Arcangeli #include <asm/tlbflush.h>
194a18419fSNadav Amit #include <asm/tlb.h>
20c1a4de99SAndrea Arcangeli #include "internal.h"
21c1a4de99SAndrea Arcangeli 
22643aa36eSWei Yang static __always_inline
23643aa36eSWei Yang struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
24643aa36eSWei Yang 				    unsigned long dst_start,
25643aa36eSWei Yang 				    unsigned long len)
26643aa36eSWei Yang {
27643aa36eSWei Yang 	/*
28643aa36eSWei Yang 	 * Make sure that the dst range is both valid and fully within a
29643aa36eSWei Yang 	 * single existing vma.
30643aa36eSWei Yang 	 */
31643aa36eSWei Yang 	struct vm_area_struct *dst_vma;
32643aa36eSWei Yang 
33643aa36eSWei Yang 	dst_vma = find_vma(dst_mm, dst_start);
34686ea6e6SZhangPeng 	if (!range_in_vma(dst_vma, dst_start, dst_start + len))
35643aa36eSWei Yang 		return NULL;
36643aa36eSWei Yang 
37643aa36eSWei Yang 	/*
38643aa36eSWei Yang 	 * Check the vma is registered in uffd, this is required to
39643aa36eSWei Yang 	 * enforce the VM_MAYWRITE check done at uffd registration
40643aa36eSWei Yang 	 * time.
41643aa36eSWei Yang 	 */
42643aa36eSWei Yang 	if (!dst_vma->vm_userfaultfd_ctx.ctx)
43643aa36eSWei Yang 		return NULL;
44643aa36eSWei Yang 
45643aa36eSWei Yang 	return dst_vma;
46643aa36eSWei Yang }
47643aa36eSWei Yang 
48435cdb41SAxel Rasmussen /* Check if dst_addr is outside of file's size. Must be called with ptl held. */
49435cdb41SAxel Rasmussen static bool mfill_file_over_size(struct vm_area_struct *dst_vma,
50435cdb41SAxel Rasmussen 				 unsigned long dst_addr)
51435cdb41SAxel Rasmussen {
52435cdb41SAxel Rasmussen 	struct inode *inode;
53435cdb41SAxel Rasmussen 	pgoff_t offset, max_off;
54435cdb41SAxel Rasmussen 
55435cdb41SAxel Rasmussen 	if (!dst_vma->vm_file)
56435cdb41SAxel Rasmussen 		return false;
57435cdb41SAxel Rasmussen 
58435cdb41SAxel Rasmussen 	inode = dst_vma->vm_file->f_inode;
59435cdb41SAxel Rasmussen 	offset = linear_page_index(dst_vma, dst_addr);
60435cdb41SAxel Rasmussen 	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
61435cdb41SAxel Rasmussen 	return offset >= max_off;
62435cdb41SAxel Rasmussen }
63435cdb41SAxel Rasmussen 
6415313257SAxel Rasmussen /*
6515313257SAxel Rasmussen  * Install PTEs, to map dst_addr (within dst_vma) to page.
6615313257SAxel Rasmussen  *
677d64ae3aSAxel Rasmussen  * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem
687d64ae3aSAxel Rasmussen  * and anon, and for both shared and private VMAs.
6915313257SAxel Rasmussen  */
7061c50040SAxel Rasmussen int mfill_atomic_install_pte(pmd_t *dst_pmd,
7115313257SAxel Rasmussen 			     struct vm_area_struct *dst_vma,
7215313257SAxel Rasmussen 			     unsigned long dst_addr, struct page *page,
73d9712937SAxel Rasmussen 			     bool newly_allocated, uffd_flags_t flags)
7415313257SAxel Rasmussen {
7515313257SAxel Rasmussen 	int ret;
7661c50040SAxel Rasmussen 	struct mm_struct *dst_mm = dst_vma->vm_mm;
7715313257SAxel Rasmussen 	pte_t _dst_pte, *dst_pte;
7815313257SAxel Rasmussen 	bool writable = dst_vma->vm_flags & VM_WRITE;
7915313257SAxel Rasmussen 	bool vm_shared = dst_vma->vm_flags & VM_SHARED;
8093b0d917SPeter Xu 	bool page_in_cache = page_mapping(page);
8115313257SAxel Rasmussen 	spinlock_t *ptl;
8228965f0fSVishal Moola (Oracle) 	struct folio *folio;
8315313257SAxel Rasmussen 
8415313257SAxel Rasmussen 	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
859ae0f87dSPeter Xu 	_dst_pte = pte_mkdirty(_dst_pte);
8615313257SAxel Rasmussen 	if (page_in_cache && !vm_shared)
8715313257SAxel Rasmussen 		writable = false;
888ee79edfSPeter Xu 	if (writable)
89161e393cSRick Edgecombe 		_dst_pte = pte_mkwrite(_dst_pte, dst_vma);
90d9712937SAxel Rasmussen 	if (flags & MFILL_ATOMIC_WP)
91f1eb1bacSPeter Xu 		_dst_pte = pte_mkuffd_wp(_dst_pte);
9215313257SAxel Rasmussen 
933622d3cdSHugh Dickins 	ret = -EAGAIN;
9415313257SAxel Rasmussen 	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
953622d3cdSHugh Dickins 	if (!dst_pte)
963622d3cdSHugh Dickins 		goto out;
9715313257SAxel Rasmussen 
98435cdb41SAxel Rasmussen 	if (mfill_file_over_size(dst_vma, dst_addr)) {
9915313257SAxel Rasmussen 		ret = -EFAULT;
10015313257SAxel Rasmussen 		goto out_unlock;
10115313257SAxel Rasmussen 	}
10215313257SAxel Rasmussen 
10315313257SAxel Rasmussen 	ret = -EEXIST;
1048ee79edfSPeter Xu 	/*
1058ee79edfSPeter Xu 	 * We allow to overwrite a pte marker: consider when both MISSING|WP
1068ee79edfSPeter Xu 	 * registered, we firstly wr-protect a none pte which has no page cache
1078ee79edfSPeter Xu 	 * page backing it, then access the page.
1088ee79edfSPeter Xu 	 */
109c33c7948SRyan Roberts 	if (!pte_none_mostly(ptep_get(dst_pte)))
11015313257SAxel Rasmussen 		goto out_unlock;
11115313257SAxel Rasmussen 
11228965f0fSVishal Moola (Oracle) 	folio = page_folio(page);
113cea86fe2SHugh Dickins 	if (page_in_cache) {
114cea86fe2SHugh Dickins 		/* Usually, cache pages are already added to LRU */
115cea86fe2SHugh Dickins 		if (newly_allocated)
11628965f0fSVishal Moola (Oracle) 			folio_add_lru(folio);
117cea86fe2SHugh Dickins 		page_add_file_rmap(page, dst_vma, false);
118cea86fe2SHugh Dickins 	} else {
11940f2bbf7SDavid Hildenbrand 		page_add_new_anon_rmap(page, dst_vma, dst_addr);
12028965f0fSVishal Moola (Oracle) 		folio_add_lru_vma(folio, dst_vma);
121cea86fe2SHugh Dickins 	}
12215313257SAxel Rasmussen 
12315313257SAxel Rasmussen 	/*
12415313257SAxel Rasmussen 	 * Must happen after rmap, as mm_counter() checks mapping (via
12515313257SAxel Rasmussen 	 * PageAnon()), which is set by __page_set_anon_rmap().
12615313257SAxel Rasmussen 	 */
12715313257SAxel Rasmussen 	inc_mm_counter(dst_mm, mm_counter(page));
12815313257SAxel Rasmussen 
12915313257SAxel Rasmussen 	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
13015313257SAxel Rasmussen 
13115313257SAxel Rasmussen 	/* No need to invalidate - it was non-present before */
13215313257SAxel Rasmussen 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
13315313257SAxel Rasmussen 	ret = 0;
13415313257SAxel Rasmussen out_unlock:
13515313257SAxel Rasmussen 	pte_unmap_unlock(dst_pte, ptl);
1363622d3cdSHugh Dickins out:
13715313257SAxel Rasmussen 	return ret;
13815313257SAxel Rasmussen }
13915313257SAxel Rasmussen 
14061c50040SAxel Rasmussen static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
141c1a4de99SAndrea Arcangeli 				 struct vm_area_struct *dst_vma,
142c1a4de99SAndrea Arcangeli 				 unsigned long dst_addr,
143b6ebaedbSAndrea Arcangeli 				 unsigned long src_addr,
144d9712937SAxel Rasmussen 				 uffd_flags_t flags,
145d7be6d7eSZhangPeng 				 struct folio **foliop)
146c1a4de99SAndrea Arcangeli {
14707e6d409SZhangPeng 	void *kaddr;
148c1a4de99SAndrea Arcangeli 	int ret;
14907e6d409SZhangPeng 	struct folio *folio;
150c1a4de99SAndrea Arcangeli 
151d7be6d7eSZhangPeng 	if (!*foliop) {
152c1a4de99SAndrea Arcangeli 		ret = -ENOMEM;
15307e6d409SZhangPeng 		folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma,
15407e6d409SZhangPeng 					dst_addr, false);
15507e6d409SZhangPeng 		if (!folio)
156c1a4de99SAndrea Arcangeli 			goto out;
157c1a4de99SAndrea Arcangeli 
15807e6d409SZhangPeng 		kaddr = kmap_local_folio(folio, 0);
1595521de7dSIra Weiny 		/*
1605521de7dSIra Weiny 		 * The read mmap_lock is held here.  Despite the
1615521de7dSIra Weiny 		 * mmap_lock being read recursive a deadlock is still
1625521de7dSIra Weiny 		 * possible if a writer has taken a lock.  For example:
1635521de7dSIra Weiny 		 *
1645521de7dSIra Weiny 		 * process A thread 1 takes read lock on own mmap_lock
1655521de7dSIra Weiny 		 * process A thread 2 calls mmap, blocks taking write lock
1665521de7dSIra Weiny 		 * process B thread 1 takes page fault, read lock on own mmap lock
1675521de7dSIra Weiny 		 * process B thread 2 calls mmap, blocks taking write lock
1685521de7dSIra Weiny 		 * process A thread 1 blocks taking read lock on process B
1695521de7dSIra Weiny 		 * process B thread 1 blocks taking read lock on process A
1705521de7dSIra Weiny 		 *
1715521de7dSIra Weiny 		 * Disable page faults to prevent potential deadlock
1725521de7dSIra Weiny 		 * and retry the copy outside the mmap_lock.
1735521de7dSIra Weiny 		 */
1745521de7dSIra Weiny 		pagefault_disable();
17507e6d409SZhangPeng 		ret = copy_from_user(kaddr, (const void __user *) src_addr,
176b6ebaedbSAndrea Arcangeli 				     PAGE_SIZE);
1775521de7dSIra Weiny 		pagefault_enable();
17807e6d409SZhangPeng 		kunmap_local(kaddr);
179b6ebaedbSAndrea Arcangeli 
180c1e8d7c6SMichel Lespinasse 		/* fallback to copy_from_user outside mmap_lock */
181b6ebaedbSAndrea Arcangeli 		if (unlikely(ret)) {
1829e368259SAndrea Arcangeli 			ret = -ENOENT;
183d7be6d7eSZhangPeng 			*foliop = folio;
184b6ebaedbSAndrea Arcangeli 			/* don't free the page */
185b6ebaedbSAndrea Arcangeli 			goto out;
186b6ebaedbSAndrea Arcangeli 		}
1877c25a0b8SMuchun Song 
18807e6d409SZhangPeng 		flush_dcache_folio(folio);
189b6ebaedbSAndrea Arcangeli 	} else {
190d7be6d7eSZhangPeng 		folio = *foliop;
191d7be6d7eSZhangPeng 		*foliop = NULL;
192b6ebaedbSAndrea Arcangeli 	}
193c1a4de99SAndrea Arcangeli 
194c1a4de99SAndrea Arcangeli 	/*
19507e6d409SZhangPeng 	 * The memory barrier inside __folio_mark_uptodate makes sure that
196f4f5329dSWei Yang 	 * preceding stores to the page contents become visible before
197c1a4de99SAndrea Arcangeli 	 * the set_pte_at() write.
198c1a4de99SAndrea Arcangeli 	 */
19907e6d409SZhangPeng 	__folio_mark_uptodate(folio);
200c1a4de99SAndrea Arcangeli 
201c1a4de99SAndrea Arcangeli 	ret = -ENOMEM;
20207e6d409SZhangPeng 	if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
203c1a4de99SAndrea Arcangeli 		goto out_release;
204c1a4de99SAndrea Arcangeli 
20561c50040SAxel Rasmussen 	ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
20607e6d409SZhangPeng 				       &folio->page, true, flags);
20715313257SAxel Rasmussen 	if (ret)
20815313257SAxel Rasmussen 		goto out_release;
209c1a4de99SAndrea Arcangeli out:
210c1a4de99SAndrea Arcangeli 	return ret;
211c1a4de99SAndrea Arcangeli out_release:
21207e6d409SZhangPeng 	folio_put(folio);
213c1a4de99SAndrea Arcangeli 	goto out;
214c1a4de99SAndrea Arcangeli }
215c1a4de99SAndrea Arcangeli 
21661c50040SAxel Rasmussen static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd,
217c1a4de99SAndrea Arcangeli 				     struct vm_area_struct *dst_vma,
218c1a4de99SAndrea Arcangeli 				     unsigned long dst_addr)
219c1a4de99SAndrea Arcangeli {
220c1a4de99SAndrea Arcangeli 	pte_t _dst_pte, *dst_pte;
221c1a4de99SAndrea Arcangeli 	spinlock_t *ptl;
222c1a4de99SAndrea Arcangeli 	int ret;
223c1a4de99SAndrea Arcangeli 
224c1a4de99SAndrea Arcangeli 	_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
225c1a4de99SAndrea Arcangeli 					 dst_vma->vm_page_prot));
2263622d3cdSHugh Dickins 	ret = -EAGAIN;
22761c50040SAxel Rasmussen 	dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl);
2283622d3cdSHugh Dickins 	if (!dst_pte)
2293622d3cdSHugh Dickins 		goto out;
230435cdb41SAxel Rasmussen 	if (mfill_file_over_size(dst_vma, dst_addr)) {
231e2a50c1fSAndrea Arcangeli 		ret = -EFAULT;
232e2a50c1fSAndrea Arcangeli 		goto out_unlock;
233e2a50c1fSAndrea Arcangeli 	}
234e2a50c1fSAndrea Arcangeli 	ret = -EEXIST;
235c33c7948SRyan Roberts 	if (!pte_none(ptep_get(dst_pte)))
236c1a4de99SAndrea Arcangeli 		goto out_unlock;
23761c50040SAxel Rasmussen 	set_pte_at(dst_vma->vm_mm, dst_addr, dst_pte, _dst_pte);
238c1a4de99SAndrea Arcangeli 	/* No need to invalidate - it was non-present before */
239c1a4de99SAndrea Arcangeli 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
240c1a4de99SAndrea Arcangeli 	ret = 0;
241c1a4de99SAndrea Arcangeli out_unlock:
242c1a4de99SAndrea Arcangeli 	pte_unmap_unlock(dst_pte, ptl);
2433622d3cdSHugh Dickins out:
244c1a4de99SAndrea Arcangeli 	return ret;
245c1a4de99SAndrea Arcangeli }
246c1a4de99SAndrea Arcangeli 
24715313257SAxel Rasmussen /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
24861c50040SAxel Rasmussen static int mfill_atomic_pte_continue(pmd_t *dst_pmd,
24915313257SAxel Rasmussen 				     struct vm_area_struct *dst_vma,
25015313257SAxel Rasmussen 				     unsigned long dst_addr,
251d9712937SAxel Rasmussen 				     uffd_flags_t flags)
25215313257SAxel Rasmussen {
25315313257SAxel Rasmussen 	struct inode *inode = file_inode(dst_vma->vm_file);
25415313257SAxel Rasmussen 	pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
25512acf4fbSMatthew Wilcox (Oracle) 	struct folio *folio;
25615313257SAxel Rasmussen 	struct page *page;
25715313257SAxel Rasmussen 	int ret;
25815313257SAxel Rasmussen 
25912acf4fbSMatthew Wilcox (Oracle) 	ret = shmem_get_folio(inode, pgoff, &folio, SGP_NOALLOC);
26012acf4fbSMatthew Wilcox (Oracle) 	/* Our caller expects us to return -EFAULT if we failed to find folio */
26173f37dbcSAxel Rasmussen 	if (ret == -ENOENT)
26273f37dbcSAxel Rasmussen 		ret = -EFAULT;
26315313257SAxel Rasmussen 	if (ret)
26415313257SAxel Rasmussen 		goto out;
26512acf4fbSMatthew Wilcox (Oracle) 	if (!folio) {
26615313257SAxel Rasmussen 		ret = -EFAULT;
26715313257SAxel Rasmussen 		goto out;
26815313257SAxel Rasmussen 	}
26915313257SAxel Rasmussen 
27012acf4fbSMatthew Wilcox (Oracle) 	page = folio_file_page(folio, pgoff);
271a7605426SYang Shi 	if (PageHWPoison(page)) {
272a7605426SYang Shi 		ret = -EIO;
273a7605426SYang Shi 		goto out_release;
274a7605426SYang Shi 	}
275a7605426SYang Shi 
27661c50040SAxel Rasmussen 	ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
277d9712937SAxel Rasmussen 				       page, false, flags);
27815313257SAxel Rasmussen 	if (ret)
27915313257SAxel Rasmussen 		goto out_release;
28015313257SAxel Rasmussen 
28112acf4fbSMatthew Wilcox (Oracle) 	folio_unlock(folio);
28215313257SAxel Rasmussen 	ret = 0;
28315313257SAxel Rasmussen out:
28415313257SAxel Rasmussen 	return ret;
28515313257SAxel Rasmussen out_release:
28612acf4fbSMatthew Wilcox (Oracle) 	folio_unlock(folio);
28712acf4fbSMatthew Wilcox (Oracle) 	folio_put(folio);
28815313257SAxel Rasmussen 	goto out;
28915313257SAxel Rasmussen }
29015313257SAxel Rasmussen 
291fc71884aSAxel Rasmussen /* Handles UFFDIO_POISON for all non-hugetlb VMAs. */
292fc71884aSAxel Rasmussen static int mfill_atomic_pte_poison(pmd_t *dst_pmd,
293fc71884aSAxel Rasmussen 				   struct vm_area_struct *dst_vma,
294fc71884aSAxel Rasmussen 				   unsigned long dst_addr,
295fc71884aSAxel Rasmussen 				   uffd_flags_t flags)
296fc71884aSAxel Rasmussen {
297fc71884aSAxel Rasmussen 	int ret;
298fc71884aSAxel Rasmussen 	struct mm_struct *dst_mm = dst_vma->vm_mm;
299fc71884aSAxel Rasmussen 	pte_t _dst_pte, *dst_pte;
300fc71884aSAxel Rasmussen 	spinlock_t *ptl;
301fc71884aSAxel Rasmussen 
302fc71884aSAxel Rasmussen 	_dst_pte = make_pte_marker(PTE_MARKER_POISONED);
303597425dfSHugh Dickins 	ret = -EAGAIN;
304fc71884aSAxel Rasmussen 	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
305597425dfSHugh Dickins 	if (!dst_pte)
306597425dfSHugh Dickins 		goto out;
307fc71884aSAxel Rasmussen 
308fc71884aSAxel Rasmussen 	if (mfill_file_over_size(dst_vma, dst_addr)) {
309fc71884aSAxel Rasmussen 		ret = -EFAULT;
310fc71884aSAxel Rasmussen 		goto out_unlock;
311fc71884aSAxel Rasmussen 	}
312fc71884aSAxel Rasmussen 
313fc71884aSAxel Rasmussen 	ret = -EEXIST;
314fc71884aSAxel Rasmussen 	/* Refuse to overwrite any PTE, even a PTE marker (e.g. UFFD WP). */
315afccb080SRyan Roberts 	if (!pte_none(ptep_get(dst_pte)))
316fc71884aSAxel Rasmussen 		goto out_unlock;
317fc71884aSAxel Rasmussen 
318fc71884aSAxel Rasmussen 	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
319fc71884aSAxel Rasmussen 
320fc71884aSAxel Rasmussen 	/* No need to invalidate - it was non-present before */
321fc71884aSAxel Rasmussen 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
322fc71884aSAxel Rasmussen 	ret = 0;
323fc71884aSAxel Rasmussen out_unlock:
324fc71884aSAxel Rasmussen 	pte_unmap_unlock(dst_pte, ptl);
325597425dfSHugh Dickins out:
326fc71884aSAxel Rasmussen 	return ret;
327fc71884aSAxel Rasmussen }
328fc71884aSAxel Rasmussen 
329c1a4de99SAndrea Arcangeli static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
330c1a4de99SAndrea Arcangeli {
331c1a4de99SAndrea Arcangeli 	pgd_t *pgd;
332c2febafcSKirill A. Shutemov 	p4d_t *p4d;
333c1a4de99SAndrea Arcangeli 	pud_t *pud;
334c1a4de99SAndrea Arcangeli 
335c1a4de99SAndrea Arcangeli 	pgd = pgd_offset(mm, address);
336c2febafcSKirill A. Shutemov 	p4d = p4d_alloc(mm, pgd, address);
337c2febafcSKirill A. Shutemov 	if (!p4d)
338c2febafcSKirill A. Shutemov 		return NULL;
339c2febafcSKirill A. Shutemov 	pud = pud_alloc(mm, p4d, address);
340c2febafcSKirill A. Shutemov 	if (!pud)
341c2febafcSKirill A. Shutemov 		return NULL;
342c1a4de99SAndrea Arcangeli 	/*
343c1a4de99SAndrea Arcangeli 	 * Note that we didn't run this because the pmd was
344c1a4de99SAndrea Arcangeli 	 * missing, the *pmd may be already established and in
345c1a4de99SAndrea Arcangeli 	 * turn it may also be a trans_huge_pmd.
346c1a4de99SAndrea Arcangeli 	 */
347c2febafcSKirill A. Shutemov 	return pmd_alloc(mm, pud, address);
348c1a4de99SAndrea Arcangeli }
349c1a4de99SAndrea Arcangeli 
35060d4d2d2SMike Kravetz #ifdef CONFIG_HUGETLB_PAGE
35160d4d2d2SMike Kravetz /*
352a734991cSAxel Rasmussen  * mfill_atomic processing for HUGETLB vmas.  Note that this routine is
353c1e8d7c6SMichel Lespinasse  * called with mmap_lock held, it will release mmap_lock before returning.
35460d4d2d2SMike Kravetz  */
35561c50040SAxel Rasmussen static __always_inline ssize_t mfill_atomic_hugetlb(
35660d4d2d2SMike Kravetz 					      struct vm_area_struct *dst_vma,
35760d4d2d2SMike Kravetz 					      unsigned long dst_start,
35860d4d2d2SMike Kravetz 					      unsigned long src_start,
35960d4d2d2SMike Kravetz 					      unsigned long len,
360d9712937SAxel Rasmussen 					      uffd_flags_t flags)
36160d4d2d2SMike Kravetz {
36261c50040SAxel Rasmussen 	struct mm_struct *dst_mm = dst_vma->vm_mm;
3631c9e8defSMike Kravetz 	int vm_shared = dst_vma->vm_flags & VM_SHARED;
36460d4d2d2SMike Kravetz 	ssize_t err;
36560d4d2d2SMike Kravetz 	pte_t *dst_pte;
36660d4d2d2SMike Kravetz 	unsigned long src_addr, dst_addr;
36760d4d2d2SMike Kravetz 	long copied;
3680169fd51SZhangPeng 	struct folio *folio;
36960d4d2d2SMike Kravetz 	unsigned long vma_hpagesize;
37060d4d2d2SMike Kravetz 	pgoff_t idx;
37160d4d2d2SMike Kravetz 	u32 hash;
37260d4d2d2SMike Kravetz 	struct address_space *mapping;
37360d4d2d2SMike Kravetz 
37460d4d2d2SMike Kravetz 	/*
37560d4d2d2SMike Kravetz 	 * There is no default zero huge page for all huge page sizes as
37660d4d2d2SMike Kravetz 	 * supported by hugetlb.  A PMD_SIZE huge pages may exist as used
37760d4d2d2SMike Kravetz 	 * by THP.  Since we can not reliably insert a zero page, this
37860d4d2d2SMike Kravetz 	 * feature is not supported.
37960d4d2d2SMike Kravetz 	 */
3808a13897fSAxel Rasmussen 	if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) {
381d8ed45c5SMichel Lespinasse 		mmap_read_unlock(dst_mm);
38260d4d2d2SMike Kravetz 		return -EINVAL;
38360d4d2d2SMike Kravetz 	}
38460d4d2d2SMike Kravetz 
38560d4d2d2SMike Kravetz 	src_addr = src_start;
38660d4d2d2SMike Kravetz 	dst_addr = dst_start;
38760d4d2d2SMike Kravetz 	copied = 0;
3880169fd51SZhangPeng 	folio = NULL;
38960d4d2d2SMike Kravetz 	vma_hpagesize = vma_kernel_pagesize(dst_vma);
39060d4d2d2SMike Kravetz 
39160d4d2d2SMike Kravetz 	/*
39260d4d2d2SMike Kravetz 	 * Validate alignment based on huge page size
39360d4d2d2SMike Kravetz 	 */
39460d4d2d2SMike Kravetz 	err = -EINVAL;
39560d4d2d2SMike Kravetz 	if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
39660d4d2d2SMike Kravetz 		goto out_unlock;
39760d4d2d2SMike Kravetz 
39860d4d2d2SMike Kravetz retry:
39960d4d2d2SMike Kravetz 	/*
400c1e8d7c6SMichel Lespinasse 	 * On routine entry dst_vma is set.  If we had to drop mmap_lock and
40160d4d2d2SMike Kravetz 	 * retry, dst_vma will be set to NULL and we must lookup again.
40260d4d2d2SMike Kravetz 	 */
40360d4d2d2SMike Kravetz 	if (!dst_vma) {
40427d02568SMike Rapoport 		err = -ENOENT;
405643aa36eSWei Yang 		dst_vma = find_dst_vma(dst_mm, dst_start, len);
40660d4d2d2SMike Kravetz 		if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
40760d4d2d2SMike Kravetz 			goto out_unlock;
4081c9e8defSMike Kravetz 
40927d02568SMike Rapoport 		err = -EINVAL;
41027d02568SMike Rapoport 		if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
41127d02568SMike Rapoport 			goto out_unlock;
41227d02568SMike Rapoport 
4131c9e8defSMike Kravetz 		vm_shared = dst_vma->vm_flags & VM_SHARED;
41460d4d2d2SMike Kravetz 	}
41560d4d2d2SMike Kravetz 
41660d4d2d2SMike Kravetz 	/*
4171c9e8defSMike Kravetz 	 * If not shared, ensure the dst_vma has a anon_vma.
41860d4d2d2SMike Kravetz 	 */
41960d4d2d2SMike Kravetz 	err = -ENOMEM;
4201c9e8defSMike Kravetz 	if (!vm_shared) {
42160d4d2d2SMike Kravetz 		if (unlikely(anon_vma_prepare(dst_vma)))
42260d4d2d2SMike Kravetz 			goto out_unlock;
4231c9e8defSMike Kravetz 	}
42460d4d2d2SMike Kravetz 
42560d4d2d2SMike Kravetz 	while (src_addr < src_start + len) {
42660d4d2d2SMike Kravetz 		BUG_ON(dst_addr >= dst_start + len);
42760d4d2d2SMike Kravetz 
42860d4d2d2SMike Kravetz 		/*
42940549ba8SMike Kravetz 		 * Serialize via vma_lock and hugetlb_fault_mutex.
43040549ba8SMike Kravetz 		 * vma_lock ensures the dst_pte remains valid even
43140549ba8SMike Kravetz 		 * in the case of shared pmds.  fault mutex prevents
43240549ba8SMike Kravetz 		 * races with other faulting threads.
43360d4d2d2SMike Kravetz 		 */
434c0d0381aSMike Kravetz 		idx = linear_page_index(dst_vma, dst_addr);
4353a47c54fSMike Kravetz 		mapping = dst_vma->vm_file->f_mapping;
436188b04a7SWei Yang 		hash = hugetlb_fault_mutex_hash(mapping, idx);
43760d4d2d2SMike Kravetz 		mutex_lock(&hugetlb_fault_mutex_table[hash]);
43840549ba8SMike Kravetz 		hugetlb_vma_lock_read(dst_vma);
43960d4d2d2SMike Kravetz 
44060d4d2d2SMike Kravetz 		err = -ENOMEM;
441aec44e0fSPeter Xu 		dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize);
44260d4d2d2SMike Kravetz 		if (!dst_pte) {
44340549ba8SMike Kravetz 			hugetlb_vma_unlock_read(dst_vma);
44460d4d2d2SMike Kravetz 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
44560d4d2d2SMike Kravetz 			goto out_unlock;
44660d4d2d2SMike Kravetz 		}
44760d4d2d2SMike Kravetz 
448d9712937SAxel Rasmussen 		if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) &&
4496041c691SPeter Xu 		    !huge_pte_none_mostly(huge_ptep_get(dst_pte))) {
45060d4d2d2SMike Kravetz 			err = -EEXIST;
45140549ba8SMike Kravetz 			hugetlb_vma_unlock_read(dst_vma);
45260d4d2d2SMike Kravetz 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
45360d4d2d2SMike Kravetz 			goto out_unlock;
45460d4d2d2SMike Kravetz 		}
45560d4d2d2SMike Kravetz 
456d9712937SAxel Rasmussen 		err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr,
4570169fd51SZhangPeng 					       src_addr, flags, &folio);
45860d4d2d2SMike Kravetz 
45940549ba8SMike Kravetz 		hugetlb_vma_unlock_read(dst_vma);
46060d4d2d2SMike Kravetz 		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
46160d4d2d2SMike Kravetz 
46260d4d2d2SMike Kravetz 		cond_resched();
46360d4d2d2SMike Kravetz 
4649e368259SAndrea Arcangeli 		if (unlikely(err == -ENOENT)) {
465d8ed45c5SMichel Lespinasse 			mmap_read_unlock(dst_mm);
4660169fd51SZhangPeng 			BUG_ON(!folio);
46760d4d2d2SMike Kravetz 
4680169fd51SZhangPeng 			err = copy_folio_from_user(folio,
469e87340caSZhangPeng 						   (const void __user *)src_addr, true);
47060d4d2d2SMike Kravetz 			if (unlikely(err)) {
47160d4d2d2SMike Kravetz 				err = -EFAULT;
47260d4d2d2SMike Kravetz 				goto out;
47360d4d2d2SMike Kravetz 			}
474d8ed45c5SMichel Lespinasse 			mmap_read_lock(dst_mm);
47560d4d2d2SMike Kravetz 
47660d4d2d2SMike Kravetz 			dst_vma = NULL;
47760d4d2d2SMike Kravetz 			goto retry;
47860d4d2d2SMike Kravetz 		} else
4790169fd51SZhangPeng 			BUG_ON(folio);
48060d4d2d2SMike Kravetz 
48160d4d2d2SMike Kravetz 		if (!err) {
48260d4d2d2SMike Kravetz 			dst_addr += vma_hpagesize;
48360d4d2d2SMike Kravetz 			src_addr += vma_hpagesize;
48460d4d2d2SMike Kravetz 			copied += vma_hpagesize;
48560d4d2d2SMike Kravetz 
48660d4d2d2SMike Kravetz 			if (fatal_signal_pending(current))
48760d4d2d2SMike Kravetz 				err = -EINTR;
48860d4d2d2SMike Kravetz 		}
48960d4d2d2SMike Kravetz 		if (err)
49060d4d2d2SMike Kravetz 			break;
49160d4d2d2SMike Kravetz 	}
49260d4d2d2SMike Kravetz 
49360d4d2d2SMike Kravetz out_unlock:
494d8ed45c5SMichel Lespinasse 	mmap_read_unlock(dst_mm);
49560d4d2d2SMike Kravetz out:
4960169fd51SZhangPeng 	if (folio)
4970169fd51SZhangPeng 		folio_put(folio);
49860d4d2d2SMike Kravetz 	BUG_ON(copied < 0);
49960d4d2d2SMike Kravetz 	BUG_ON(err > 0);
50060d4d2d2SMike Kravetz 	BUG_ON(!copied && !err);
50160d4d2d2SMike Kravetz 	return copied ? copied : err;
50260d4d2d2SMike Kravetz }
50360d4d2d2SMike Kravetz #else /* !CONFIG_HUGETLB_PAGE */
50460d4d2d2SMike Kravetz /* fail at build time if gcc attempts to use this */
50561c50040SAxel Rasmussen extern ssize_t mfill_atomic_hugetlb(struct vm_area_struct *dst_vma,
50660d4d2d2SMike Kravetz 				    unsigned long dst_start,
50760d4d2d2SMike Kravetz 				    unsigned long src_start,
50860d4d2d2SMike Kravetz 				    unsigned long len,
509d9712937SAxel Rasmussen 				    uffd_flags_t flags);
51060d4d2d2SMike Kravetz #endif /* CONFIG_HUGETLB_PAGE */
51160d4d2d2SMike Kravetz 
51261c50040SAxel Rasmussen static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd,
5133217d3c7SMike Rapoport 						struct vm_area_struct *dst_vma,
5143217d3c7SMike Rapoport 						unsigned long dst_addr,
5153217d3c7SMike Rapoport 						unsigned long src_addr,
516d9712937SAxel Rasmussen 						uffd_flags_t flags,
517d7be6d7eSZhangPeng 						struct folio **foliop)
5183217d3c7SMike Rapoport {
5193217d3c7SMike Rapoport 	ssize_t err;
5203217d3c7SMike Rapoport 
521d9712937SAxel Rasmussen 	if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
52261c50040SAxel Rasmussen 		return mfill_atomic_pte_continue(dst_pmd, dst_vma,
523d9712937SAxel Rasmussen 						 dst_addr, flags);
524fc71884aSAxel Rasmussen 	} else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
525fc71884aSAxel Rasmussen 		return mfill_atomic_pte_poison(dst_pmd, dst_vma,
526fc71884aSAxel Rasmussen 					       dst_addr, flags);
52715313257SAxel Rasmussen 	}
52815313257SAxel Rasmussen 
5295b51072eSAndrea Arcangeli 	/*
5305b51072eSAndrea Arcangeli 	 * The normal page fault path for a shmem will invoke the
5315b51072eSAndrea Arcangeli 	 * fault, fill the hole in the file and COW it right away. The
5325b51072eSAndrea Arcangeli 	 * result generates plain anonymous memory. So when we are
5335b51072eSAndrea Arcangeli 	 * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
5345b51072eSAndrea Arcangeli 	 * generate anonymous memory directly without actually filling
5355b51072eSAndrea Arcangeli 	 * the hole. For the MAP_PRIVATE case the robustness check
5365b51072eSAndrea Arcangeli 	 * only happens in the pagetable (to verify it's still none)
5375b51072eSAndrea Arcangeli 	 * and not in the radix tree.
5385b51072eSAndrea Arcangeli 	 */
5395b51072eSAndrea Arcangeli 	if (!(dst_vma->vm_flags & VM_SHARED)) {
540d9712937SAxel Rasmussen 		if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY))
54161c50040SAxel Rasmussen 			err = mfill_atomic_pte_copy(dst_pmd, dst_vma,
542d9712937SAxel Rasmussen 						    dst_addr, src_addr,
543d7be6d7eSZhangPeng 						    flags, foliop);
5443217d3c7SMike Rapoport 		else
54561c50040SAxel Rasmussen 			err = mfill_atomic_pte_zeropage(dst_pmd,
5463217d3c7SMike Rapoport 						 dst_vma, dst_addr);
5473217d3c7SMike Rapoport 	} else {
54861c50040SAxel Rasmussen 		err = shmem_mfill_atomic_pte(dst_pmd, dst_vma,
54915313257SAxel Rasmussen 					     dst_addr, src_addr,
550d7be6d7eSZhangPeng 					     flags, foliop);
5513217d3c7SMike Rapoport 	}
5523217d3c7SMike Rapoport 
5533217d3c7SMike Rapoport 	return err;
5543217d3c7SMike Rapoport }
5553217d3c7SMike Rapoport 
556a734991cSAxel Rasmussen static __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm,
557c1a4de99SAndrea Arcangeli 					    unsigned long dst_start,
558c1a4de99SAndrea Arcangeli 					    unsigned long src_start,
559c1a4de99SAndrea Arcangeli 					    unsigned long len,
560a759a909SNadav Amit 					    atomic_t *mmap_changing,
561d9712937SAxel Rasmussen 					    uffd_flags_t flags)
562c1a4de99SAndrea Arcangeli {
563c1a4de99SAndrea Arcangeli 	struct vm_area_struct *dst_vma;
564c1a4de99SAndrea Arcangeli 	ssize_t err;
565c1a4de99SAndrea Arcangeli 	pmd_t *dst_pmd;
566c1a4de99SAndrea Arcangeli 	unsigned long src_addr, dst_addr;
567b6ebaedbSAndrea Arcangeli 	long copied;
568d7be6d7eSZhangPeng 	struct folio *folio;
569c1a4de99SAndrea Arcangeli 
570c1a4de99SAndrea Arcangeli 	/*
571c1a4de99SAndrea Arcangeli 	 * Sanitize the command parameters:
572c1a4de99SAndrea Arcangeli 	 */
573c1a4de99SAndrea Arcangeli 	BUG_ON(dst_start & ~PAGE_MASK);
574c1a4de99SAndrea Arcangeli 	BUG_ON(len & ~PAGE_MASK);
575c1a4de99SAndrea Arcangeli 
576c1a4de99SAndrea Arcangeli 	/* Does the address range wrap, or is the span zero-sized? */
577c1a4de99SAndrea Arcangeli 	BUG_ON(src_start + len <= src_start);
578c1a4de99SAndrea Arcangeli 	BUG_ON(dst_start + len <= dst_start);
579c1a4de99SAndrea Arcangeli 
580b6ebaedbSAndrea Arcangeli 	src_addr = src_start;
581b6ebaedbSAndrea Arcangeli 	dst_addr = dst_start;
582b6ebaedbSAndrea Arcangeli 	copied = 0;
583d7be6d7eSZhangPeng 	folio = NULL;
584b6ebaedbSAndrea Arcangeli retry:
585d8ed45c5SMichel Lespinasse 	mmap_read_lock(dst_mm);
586c1a4de99SAndrea Arcangeli 
587c1a4de99SAndrea Arcangeli 	/*
588df2cc96eSMike Rapoport 	 * If memory mappings are changing because of non-cooperative
589df2cc96eSMike Rapoport 	 * operation (e.g. mremap) running in parallel, bail out and
590df2cc96eSMike Rapoport 	 * request the user to retry later
591df2cc96eSMike Rapoport 	 */
592df2cc96eSMike Rapoport 	err = -EAGAIN;
593a759a909SNadav Amit 	if (mmap_changing && atomic_read(mmap_changing))
594df2cc96eSMike Rapoport 		goto out_unlock;
595df2cc96eSMike Rapoport 
596df2cc96eSMike Rapoport 	/*
597c1a4de99SAndrea Arcangeli 	 * Make sure the vma is not shared, that the dst range is
598c1a4de99SAndrea Arcangeli 	 * both valid and fully within a single existing vma.
599c1a4de99SAndrea Arcangeli 	 */
60027d02568SMike Rapoport 	err = -ENOENT;
601643aa36eSWei Yang 	dst_vma = find_dst_vma(dst_mm, dst_start, len);
60226071cedSMike Rapoport 	if (!dst_vma)
60326071cedSMike Rapoport 		goto out_unlock;
60427d02568SMike Rapoport 
60527d02568SMike Rapoport 	err = -EINVAL;
60627d02568SMike Rapoport 	/*
60727d02568SMike Rapoport 	 * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
60827d02568SMike Rapoport 	 * it will overwrite vm_ops, so vma_is_anonymous must return false.
60927d02568SMike Rapoport 	 */
61027d02568SMike Rapoport 	if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
61127d02568SMike Rapoport 	    dst_vma->vm_flags & VM_SHARED))
61227d02568SMike Rapoport 		goto out_unlock;
61327d02568SMike Rapoport 
61427d02568SMike Rapoport 	/*
61572981e0eSAndrea Arcangeli 	 * validate 'mode' now that we know the dst_vma: don't allow
61672981e0eSAndrea Arcangeli 	 * a wrprotect copy if the userfaultfd didn't register as WP.
61772981e0eSAndrea Arcangeli 	 */
618d9712937SAxel Rasmussen 	if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP))
61972981e0eSAndrea Arcangeli 		goto out_unlock;
62072981e0eSAndrea Arcangeli 
62172981e0eSAndrea Arcangeli 	/*
62227d02568SMike Rapoport 	 * If this is a HUGETLB vma, pass off to appropriate routine
62327d02568SMike Rapoport 	 */
62427d02568SMike Rapoport 	if (is_vm_hugetlb_page(dst_vma))
62561c50040SAxel Rasmussen 		return  mfill_atomic_hugetlb(dst_vma, dst_start,
626d9712937SAxel Rasmussen 					     src_start, len, flags);
62727d02568SMike Rapoport 
62826071cedSMike Rapoport 	if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
629b6ebaedbSAndrea Arcangeli 		goto out_unlock;
630d9712937SAxel Rasmussen 	if (!vma_is_shmem(dst_vma) &&
631d9712937SAxel Rasmussen 	    uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE))
632f6191471SAxel Rasmussen 		goto out_unlock;
633c1a4de99SAndrea Arcangeli 
634c1a4de99SAndrea Arcangeli 	/*
635c1a4de99SAndrea Arcangeli 	 * Ensure the dst_vma has a anon_vma or this page
636c1a4de99SAndrea Arcangeli 	 * would get a NULL anon_vma when moved in the
637c1a4de99SAndrea Arcangeli 	 * dst_vma.
638c1a4de99SAndrea Arcangeli 	 */
639c1a4de99SAndrea Arcangeli 	err = -ENOMEM;
6405b51072eSAndrea Arcangeli 	if (!(dst_vma->vm_flags & VM_SHARED) &&
6415b51072eSAndrea Arcangeli 	    unlikely(anon_vma_prepare(dst_vma)))
642b6ebaedbSAndrea Arcangeli 		goto out_unlock;
643c1a4de99SAndrea Arcangeli 
644b6ebaedbSAndrea Arcangeli 	while (src_addr < src_start + len) {
645c1a4de99SAndrea Arcangeli 		pmd_t dst_pmdval;
646b6ebaedbSAndrea Arcangeli 
647c1a4de99SAndrea Arcangeli 		BUG_ON(dst_addr >= dst_start + len);
648b6ebaedbSAndrea Arcangeli 
649c1a4de99SAndrea Arcangeli 		dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
650c1a4de99SAndrea Arcangeli 		if (unlikely(!dst_pmd)) {
651c1a4de99SAndrea Arcangeli 			err = -ENOMEM;
652c1a4de99SAndrea Arcangeli 			break;
653c1a4de99SAndrea Arcangeli 		}
654c1a4de99SAndrea Arcangeli 
655dab6e717SPeter Zijlstra 		dst_pmdval = pmdp_get_lockless(dst_pmd);
656c1a4de99SAndrea Arcangeli 		/*
657c1a4de99SAndrea Arcangeli 		 * If the dst_pmd is mapped as THP don't
658c1a4de99SAndrea Arcangeli 		 * override it and just be strict.
659c1a4de99SAndrea Arcangeli 		 */
660c1a4de99SAndrea Arcangeli 		if (unlikely(pmd_trans_huge(dst_pmdval))) {
661c1a4de99SAndrea Arcangeli 			err = -EEXIST;
662c1a4de99SAndrea Arcangeli 			break;
663c1a4de99SAndrea Arcangeli 		}
664c1a4de99SAndrea Arcangeli 		if (unlikely(pmd_none(dst_pmdval)) &&
6654cf58924SJoel Fernandes (Google) 		    unlikely(__pte_alloc(dst_mm, dst_pmd))) {
666c1a4de99SAndrea Arcangeli 			err = -ENOMEM;
667c1a4de99SAndrea Arcangeli 			break;
668c1a4de99SAndrea Arcangeli 		}
669c1a4de99SAndrea Arcangeli 		/* If an huge pmd materialized from under us fail */
670c1a4de99SAndrea Arcangeli 		if (unlikely(pmd_trans_huge(*dst_pmd))) {
671c1a4de99SAndrea Arcangeli 			err = -EFAULT;
672c1a4de99SAndrea Arcangeli 			break;
673c1a4de99SAndrea Arcangeli 		}
674c1a4de99SAndrea Arcangeli 
675c1a4de99SAndrea Arcangeli 		BUG_ON(pmd_none(*dst_pmd));
676c1a4de99SAndrea Arcangeli 		BUG_ON(pmd_trans_huge(*dst_pmd));
677c1a4de99SAndrea Arcangeli 
67861c50040SAxel Rasmussen 		err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr,
679d7be6d7eSZhangPeng 				       src_addr, flags, &folio);
680c1a4de99SAndrea Arcangeli 		cond_resched();
681c1a4de99SAndrea Arcangeli 
6829e368259SAndrea Arcangeli 		if (unlikely(err == -ENOENT)) {
683d7be6d7eSZhangPeng 			void *kaddr;
684b6ebaedbSAndrea Arcangeli 
685d8ed45c5SMichel Lespinasse 			mmap_read_unlock(dst_mm);
686d7be6d7eSZhangPeng 			BUG_ON(!folio);
687b6ebaedbSAndrea Arcangeli 
688d7be6d7eSZhangPeng 			kaddr = kmap_local_folio(folio, 0);
689d7be6d7eSZhangPeng 			err = copy_from_user(kaddr,
690b6ebaedbSAndrea Arcangeli 					     (const void __user *) src_addr,
691b6ebaedbSAndrea Arcangeli 					     PAGE_SIZE);
692d7be6d7eSZhangPeng 			kunmap_local(kaddr);
693b6ebaedbSAndrea Arcangeli 			if (unlikely(err)) {
694b6ebaedbSAndrea Arcangeli 				err = -EFAULT;
695b6ebaedbSAndrea Arcangeli 				goto out;
696b6ebaedbSAndrea Arcangeli 			}
697d7be6d7eSZhangPeng 			flush_dcache_folio(folio);
698b6ebaedbSAndrea Arcangeli 			goto retry;
699b6ebaedbSAndrea Arcangeli 		} else
700d7be6d7eSZhangPeng 			BUG_ON(folio);
701b6ebaedbSAndrea Arcangeli 
702c1a4de99SAndrea Arcangeli 		if (!err) {
703c1a4de99SAndrea Arcangeli 			dst_addr += PAGE_SIZE;
704c1a4de99SAndrea Arcangeli 			src_addr += PAGE_SIZE;
705c1a4de99SAndrea Arcangeli 			copied += PAGE_SIZE;
706c1a4de99SAndrea Arcangeli 
707c1a4de99SAndrea Arcangeli 			if (fatal_signal_pending(current))
708c1a4de99SAndrea Arcangeli 				err = -EINTR;
709c1a4de99SAndrea Arcangeli 		}
710c1a4de99SAndrea Arcangeli 		if (err)
711c1a4de99SAndrea Arcangeli 			break;
712c1a4de99SAndrea Arcangeli 	}
713c1a4de99SAndrea Arcangeli 
714b6ebaedbSAndrea Arcangeli out_unlock:
715d8ed45c5SMichel Lespinasse 	mmap_read_unlock(dst_mm);
716b6ebaedbSAndrea Arcangeli out:
717d7be6d7eSZhangPeng 	if (folio)
718d7be6d7eSZhangPeng 		folio_put(folio);
719c1a4de99SAndrea Arcangeli 	BUG_ON(copied < 0);
720c1a4de99SAndrea Arcangeli 	BUG_ON(err > 0);
721c1a4de99SAndrea Arcangeli 	BUG_ON(!copied && !err);
722c1a4de99SAndrea Arcangeli 	return copied ? copied : err;
723c1a4de99SAndrea Arcangeli }
724c1a4de99SAndrea Arcangeli 
725a734991cSAxel Rasmussen ssize_t mfill_atomic_copy(struct mm_struct *dst_mm, unsigned long dst_start,
726df2cc96eSMike Rapoport 			  unsigned long src_start, unsigned long len,
727d9712937SAxel Rasmussen 			  atomic_t *mmap_changing, uffd_flags_t flags)
728c1a4de99SAndrea Arcangeli {
729d9712937SAxel Rasmussen 	return mfill_atomic(dst_mm, dst_start, src_start, len, mmap_changing,
730d9712937SAxel Rasmussen 			    uffd_flags_set_mode(flags, MFILL_ATOMIC_COPY));
731c1a4de99SAndrea Arcangeli }
732c1a4de99SAndrea Arcangeli 
733a734991cSAxel Rasmussen ssize_t mfill_atomic_zeropage(struct mm_struct *dst_mm, unsigned long start,
734a759a909SNadav Amit 			      unsigned long len, atomic_t *mmap_changing)
735c1a4de99SAndrea Arcangeli {
736d9712937SAxel Rasmussen 	return mfill_atomic(dst_mm, start, 0, len, mmap_changing,
737d9712937SAxel Rasmussen 			    uffd_flags_set_mode(0, MFILL_ATOMIC_ZEROPAGE));
738f6191471SAxel Rasmussen }
739f6191471SAxel Rasmussen 
740a734991cSAxel Rasmussen ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long start,
74102891844SAxel Rasmussen 			      unsigned long len, atomic_t *mmap_changing,
74202891844SAxel Rasmussen 			      uffd_flags_t flags)
743f6191471SAxel Rasmussen {
744d9712937SAxel Rasmussen 	return mfill_atomic(dst_mm, start, 0, len, mmap_changing,
74502891844SAxel Rasmussen 			    uffd_flags_set_mode(flags, MFILL_ATOMIC_CONTINUE));
746c1a4de99SAndrea Arcangeli }
747ffd05793SShaohua Li 
748fc71884aSAxel Rasmussen ssize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start,
749fc71884aSAxel Rasmussen 			    unsigned long len, atomic_t *mmap_changing,
750fc71884aSAxel Rasmussen 			    uffd_flags_t flags)
751fc71884aSAxel Rasmussen {
752fc71884aSAxel Rasmussen 	return mfill_atomic(dst_mm, start, 0, len, mmap_changing,
753fc71884aSAxel Rasmussen 			    uffd_flags_set_mode(flags, MFILL_ATOMIC_POISON));
754fc71884aSAxel Rasmussen }
755fc71884aSAxel Rasmussen 
75661c50040SAxel Rasmussen long uffd_wp_range(struct vm_area_struct *dst_vma,
757f369b07cSPeter Xu 		   unsigned long start, unsigned long len, bool enable_wp)
758f369b07cSPeter Xu {
759931298e1SDavid Hildenbrand 	unsigned int mm_cp_flags;
760f369b07cSPeter Xu 	struct mmu_gather tlb;
761d1751118SPeter Xu 	long ret;
762f369b07cSPeter Xu 
763a1b92a3fSMuhammad Usama Anjum 	VM_WARN_ONCE(start < dst_vma->vm_start || start + len > dst_vma->vm_end,
764a1b92a3fSMuhammad Usama Anjum 			"The address range exceeds VMA boundary.\n");
765f369b07cSPeter Xu 	if (enable_wp)
766931298e1SDavid Hildenbrand 		mm_cp_flags = MM_CP_UFFD_WP;
767f369b07cSPeter Xu 	else
768931298e1SDavid Hildenbrand 		mm_cp_flags = MM_CP_UFFD_WP_RESOLVE;
769f369b07cSPeter Xu 
770931298e1SDavid Hildenbrand 	/*
771931298e1SDavid Hildenbrand 	 * vma->vm_page_prot already reflects that uffd-wp is enabled for this
772931298e1SDavid Hildenbrand 	 * VMA (see userfaultfd_set_vm_flags()) and that all PTEs are supposed
773931298e1SDavid Hildenbrand 	 * to be write-protected as default whenever protection changes.
774931298e1SDavid Hildenbrand 	 * Try upgrading write permissions manually.
775931298e1SDavid Hildenbrand 	 */
776931298e1SDavid Hildenbrand 	if (!enable_wp && vma_wants_manual_pte_write_upgrade(dst_vma))
777931298e1SDavid Hildenbrand 		mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
77861c50040SAxel Rasmussen 	tlb_gather_mmu(&tlb, dst_vma->vm_mm);
779d1751118SPeter Xu 	ret = change_protection(&tlb, dst_vma, start, start + len, mm_cp_flags);
780f369b07cSPeter Xu 	tlb_finish_mmu(&tlb);
781d1751118SPeter Xu 
782d1751118SPeter Xu 	return ret;
783f369b07cSPeter Xu }
784f369b07cSPeter Xu 
785ffd05793SShaohua Li int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
786a759a909SNadav Amit 			unsigned long len, bool enable_wp,
787a759a909SNadav Amit 			atomic_t *mmap_changing)
788ffd05793SShaohua Li {
789a1b92a3fSMuhammad Usama Anjum 	unsigned long end = start + len;
790a1b92a3fSMuhammad Usama Anjum 	unsigned long _start, _end;
791ffd05793SShaohua Li 	struct vm_area_struct *dst_vma;
7925a90d5a1SPeter Xu 	unsigned long page_mask;
793d1751118SPeter Xu 	long err;
794a1b92a3fSMuhammad Usama Anjum 	VMA_ITERATOR(vmi, dst_mm, start);
795ffd05793SShaohua Li 
796ffd05793SShaohua Li 	/*
797ffd05793SShaohua Li 	 * Sanitize the command parameters:
798ffd05793SShaohua Li 	 */
799ffd05793SShaohua Li 	BUG_ON(start & ~PAGE_MASK);
800ffd05793SShaohua Li 	BUG_ON(len & ~PAGE_MASK);
801ffd05793SShaohua Li 
802ffd05793SShaohua Li 	/* Does the address range wrap, or is the span zero-sized? */
803ffd05793SShaohua Li 	BUG_ON(start + len <= start);
804ffd05793SShaohua Li 
805d8ed45c5SMichel Lespinasse 	mmap_read_lock(dst_mm);
806ffd05793SShaohua Li 
807ffd05793SShaohua Li 	/*
808ffd05793SShaohua Li 	 * If memory mappings are changing because of non-cooperative
809ffd05793SShaohua Li 	 * operation (e.g. mremap) running in parallel, bail out and
810ffd05793SShaohua Li 	 * request the user to retry later
811ffd05793SShaohua Li 	 */
812ffd05793SShaohua Li 	err = -EAGAIN;
813a759a909SNadav Amit 	if (mmap_changing && atomic_read(mmap_changing))
814ffd05793SShaohua Li 		goto out_unlock;
815ffd05793SShaohua Li 
816ffd05793SShaohua Li 	err = -ENOENT;
817a1b92a3fSMuhammad Usama Anjum 	for_each_vma_range(vmi, dst_vma, end) {
818b1f9e876SPeter Xu 
819a1b92a3fSMuhammad Usama Anjum 		if (!userfaultfd_wp(dst_vma)) {
820a1b92a3fSMuhammad Usama Anjum 			err = -ENOENT;
821a1b92a3fSMuhammad Usama Anjum 			break;
822a1b92a3fSMuhammad Usama Anjum 		}
823ffd05793SShaohua Li 
8245a90d5a1SPeter Xu 		if (is_vm_hugetlb_page(dst_vma)) {
8255a90d5a1SPeter Xu 			err = -EINVAL;
8265a90d5a1SPeter Xu 			page_mask = vma_kernel_pagesize(dst_vma) - 1;
8275a90d5a1SPeter Xu 			if ((start & page_mask) || (len & page_mask))
828a1b92a3fSMuhammad Usama Anjum 				break;
8295a90d5a1SPeter Xu 		}
8305a90d5a1SPeter Xu 
831a1b92a3fSMuhammad Usama Anjum 		_start = max(dst_vma->vm_start, start);
832a1b92a3fSMuhammad Usama Anjum 		_end = min(dst_vma->vm_end, end);
833a1b92a3fSMuhammad Usama Anjum 
83461c50040SAxel Rasmussen 		err = uffd_wp_range(dst_vma, _start, _end - _start, enable_wp);
835ffd05793SShaohua Li 
836d1751118SPeter Xu 		/* Return 0 on success, <0 on failures */
837a1b92a3fSMuhammad Usama Anjum 		if (err < 0)
838a1b92a3fSMuhammad Usama Anjum 			break;
839ffd05793SShaohua Li 		err = 0;
840a1b92a3fSMuhammad Usama Anjum 	}
841ffd05793SShaohua Li out_unlock:
842d8ed45c5SMichel Lespinasse 	mmap_read_unlock(dst_mm);
843ffd05793SShaohua Li 	return err;
844ffd05793SShaohua Li }
845*adef4406SAndrea Arcangeli 
846*adef4406SAndrea Arcangeli 
847*adef4406SAndrea Arcangeli void double_pt_lock(spinlock_t *ptl1,
848*adef4406SAndrea Arcangeli 		    spinlock_t *ptl2)
849*adef4406SAndrea Arcangeli 	__acquires(ptl1)
850*adef4406SAndrea Arcangeli 	__acquires(ptl2)
851*adef4406SAndrea Arcangeli {
852*adef4406SAndrea Arcangeli 	spinlock_t *ptl_tmp;
853*adef4406SAndrea Arcangeli 
854*adef4406SAndrea Arcangeli 	if (ptl1 > ptl2) {
855*adef4406SAndrea Arcangeli 		/* exchange ptl1 and ptl2 */
856*adef4406SAndrea Arcangeli 		ptl_tmp = ptl1;
857*adef4406SAndrea Arcangeli 		ptl1 = ptl2;
858*adef4406SAndrea Arcangeli 		ptl2 = ptl_tmp;
859*adef4406SAndrea Arcangeli 	}
860*adef4406SAndrea Arcangeli 	/* lock in virtual address order to avoid lock inversion */
861*adef4406SAndrea Arcangeli 	spin_lock(ptl1);
862*adef4406SAndrea Arcangeli 	if (ptl1 != ptl2)
863*adef4406SAndrea Arcangeli 		spin_lock_nested(ptl2, SINGLE_DEPTH_NESTING);
864*adef4406SAndrea Arcangeli 	else
865*adef4406SAndrea Arcangeli 		__acquire(ptl2);
866*adef4406SAndrea Arcangeli }
867*adef4406SAndrea Arcangeli 
868*adef4406SAndrea Arcangeli void double_pt_unlock(spinlock_t *ptl1,
869*adef4406SAndrea Arcangeli 		      spinlock_t *ptl2)
870*adef4406SAndrea Arcangeli 	__releases(ptl1)
871*adef4406SAndrea Arcangeli 	__releases(ptl2)
872*adef4406SAndrea Arcangeli {
873*adef4406SAndrea Arcangeli 	spin_unlock(ptl1);
874*adef4406SAndrea Arcangeli 	if (ptl1 != ptl2)
875*adef4406SAndrea Arcangeli 		spin_unlock(ptl2);
876*adef4406SAndrea Arcangeli 	else
877*adef4406SAndrea Arcangeli 		__release(ptl2);
878*adef4406SAndrea Arcangeli }
879*adef4406SAndrea Arcangeli 
880*adef4406SAndrea Arcangeli 
881*adef4406SAndrea Arcangeli static int move_present_pte(struct mm_struct *mm,
882*adef4406SAndrea Arcangeli 			    struct vm_area_struct *dst_vma,
883*adef4406SAndrea Arcangeli 			    struct vm_area_struct *src_vma,
884*adef4406SAndrea Arcangeli 			    unsigned long dst_addr, unsigned long src_addr,
885*adef4406SAndrea Arcangeli 			    pte_t *dst_pte, pte_t *src_pte,
886*adef4406SAndrea Arcangeli 			    pte_t orig_dst_pte, pte_t orig_src_pte,
887*adef4406SAndrea Arcangeli 			    spinlock_t *dst_ptl, spinlock_t *src_ptl,
888*adef4406SAndrea Arcangeli 			    struct folio *src_folio)
889*adef4406SAndrea Arcangeli {
890*adef4406SAndrea Arcangeli 	int err = 0;
891*adef4406SAndrea Arcangeli 
892*adef4406SAndrea Arcangeli 	double_pt_lock(dst_ptl, src_ptl);
893*adef4406SAndrea Arcangeli 
894*adef4406SAndrea Arcangeli 	if (!pte_same(*src_pte, orig_src_pte) ||
895*adef4406SAndrea Arcangeli 	    !pte_same(*dst_pte, orig_dst_pte)) {
896*adef4406SAndrea Arcangeli 		err = -EAGAIN;
897*adef4406SAndrea Arcangeli 		goto out;
898*adef4406SAndrea Arcangeli 	}
899*adef4406SAndrea Arcangeli 	if (folio_test_large(src_folio) ||
900*adef4406SAndrea Arcangeli 	    folio_maybe_dma_pinned(src_folio) ||
901*adef4406SAndrea Arcangeli 	    !PageAnonExclusive(&src_folio->page)) {
902*adef4406SAndrea Arcangeli 		err = -EBUSY;
903*adef4406SAndrea Arcangeli 		goto out;
904*adef4406SAndrea Arcangeli 	}
905*adef4406SAndrea Arcangeli 
906*adef4406SAndrea Arcangeli 	folio_move_anon_rmap(src_folio, dst_vma);
907*adef4406SAndrea Arcangeli 	WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr));
908*adef4406SAndrea Arcangeli 
909*adef4406SAndrea Arcangeli 	orig_src_pte = ptep_clear_flush(src_vma, src_addr, src_pte);
910*adef4406SAndrea Arcangeli 	/* Folio got pinned from under us. Put it back and fail the move. */
911*adef4406SAndrea Arcangeli 	if (folio_maybe_dma_pinned(src_folio)) {
912*adef4406SAndrea Arcangeli 		set_pte_at(mm, src_addr, src_pte, orig_src_pte);
913*adef4406SAndrea Arcangeli 		err = -EBUSY;
914*adef4406SAndrea Arcangeli 		goto out;
915*adef4406SAndrea Arcangeli 	}
916*adef4406SAndrea Arcangeli 
917*adef4406SAndrea Arcangeli 	orig_dst_pte = mk_pte(&src_folio->page, dst_vma->vm_page_prot);
918*adef4406SAndrea Arcangeli 	/* Follow mremap() behavior and treat the entry dirty after the move */
919*adef4406SAndrea Arcangeli 	orig_dst_pte = pte_mkwrite(pte_mkdirty(orig_dst_pte), dst_vma);
920*adef4406SAndrea Arcangeli 
921*adef4406SAndrea Arcangeli 	set_pte_at(mm, dst_addr, dst_pte, orig_dst_pte);
922*adef4406SAndrea Arcangeli out:
923*adef4406SAndrea Arcangeli 	double_pt_unlock(dst_ptl, src_ptl);
924*adef4406SAndrea Arcangeli 	return err;
925*adef4406SAndrea Arcangeli }
926*adef4406SAndrea Arcangeli 
927*adef4406SAndrea Arcangeli static int move_swap_pte(struct mm_struct *mm,
928*adef4406SAndrea Arcangeli 			 unsigned long dst_addr, unsigned long src_addr,
929*adef4406SAndrea Arcangeli 			 pte_t *dst_pte, pte_t *src_pte,
930*adef4406SAndrea Arcangeli 			 pte_t orig_dst_pte, pte_t orig_src_pte,
931*adef4406SAndrea Arcangeli 			 spinlock_t *dst_ptl, spinlock_t *src_ptl)
932*adef4406SAndrea Arcangeli {
933*adef4406SAndrea Arcangeli 	if (!pte_swp_exclusive(orig_src_pte))
934*adef4406SAndrea Arcangeli 		return -EBUSY;
935*adef4406SAndrea Arcangeli 
936*adef4406SAndrea Arcangeli 	double_pt_lock(dst_ptl, src_ptl);
937*adef4406SAndrea Arcangeli 
938*adef4406SAndrea Arcangeli 	if (!pte_same(*src_pte, orig_src_pte) ||
939*adef4406SAndrea Arcangeli 	    !pte_same(*dst_pte, orig_dst_pte)) {
940*adef4406SAndrea Arcangeli 		double_pt_unlock(dst_ptl, src_ptl);
941*adef4406SAndrea Arcangeli 		return -EAGAIN;
942*adef4406SAndrea Arcangeli 	}
943*adef4406SAndrea Arcangeli 
944*adef4406SAndrea Arcangeli 	orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte);
945*adef4406SAndrea Arcangeli 	set_pte_at(mm, dst_addr, dst_pte, orig_src_pte);
946*adef4406SAndrea Arcangeli 	double_pt_unlock(dst_ptl, src_ptl);
947*adef4406SAndrea Arcangeli 
948*adef4406SAndrea Arcangeli 	return 0;
949*adef4406SAndrea Arcangeli }
950*adef4406SAndrea Arcangeli 
951*adef4406SAndrea Arcangeli /*
952*adef4406SAndrea Arcangeli  * The mmap_lock for reading is held by the caller. Just move the page
953*adef4406SAndrea Arcangeli  * from src_pmd to dst_pmd if possible, and return true if succeeded
954*adef4406SAndrea Arcangeli  * in moving the page.
955*adef4406SAndrea Arcangeli  */
956*adef4406SAndrea Arcangeli static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd,
957*adef4406SAndrea Arcangeli 			  struct vm_area_struct *dst_vma,
958*adef4406SAndrea Arcangeli 			  struct vm_area_struct *src_vma,
959*adef4406SAndrea Arcangeli 			  unsigned long dst_addr, unsigned long src_addr,
960*adef4406SAndrea Arcangeli 			  __u64 mode)
961*adef4406SAndrea Arcangeli {
962*adef4406SAndrea Arcangeli 	swp_entry_t entry;
963*adef4406SAndrea Arcangeli 	pte_t orig_src_pte, orig_dst_pte;
964*adef4406SAndrea Arcangeli 	pte_t src_folio_pte;
965*adef4406SAndrea Arcangeli 	spinlock_t *src_ptl, *dst_ptl;
966*adef4406SAndrea Arcangeli 	pte_t *src_pte = NULL;
967*adef4406SAndrea Arcangeli 	pte_t *dst_pte = NULL;
968*adef4406SAndrea Arcangeli 
969*adef4406SAndrea Arcangeli 	struct folio *src_folio = NULL;
970*adef4406SAndrea Arcangeli 	struct anon_vma *src_anon_vma = NULL;
971*adef4406SAndrea Arcangeli 	struct mmu_notifier_range range;
972*adef4406SAndrea Arcangeli 	int err = 0;
973*adef4406SAndrea Arcangeli 
974*adef4406SAndrea Arcangeli 	flush_cache_range(src_vma, src_addr, src_addr + PAGE_SIZE);
975*adef4406SAndrea Arcangeli 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
976*adef4406SAndrea Arcangeli 				src_addr, src_addr + PAGE_SIZE);
977*adef4406SAndrea Arcangeli 	mmu_notifier_invalidate_range_start(&range);
978*adef4406SAndrea Arcangeli retry:
979*adef4406SAndrea Arcangeli 	dst_pte = pte_offset_map_nolock(mm, dst_pmd, dst_addr, &dst_ptl);
980*adef4406SAndrea Arcangeli 
981*adef4406SAndrea Arcangeli 	/* Retry if a huge pmd materialized from under us */
982*adef4406SAndrea Arcangeli 	if (unlikely(!dst_pte)) {
983*adef4406SAndrea Arcangeli 		err = -EAGAIN;
984*adef4406SAndrea Arcangeli 		goto out;
985*adef4406SAndrea Arcangeli 	}
986*adef4406SAndrea Arcangeli 
987*adef4406SAndrea Arcangeli 	src_pte = pte_offset_map_nolock(mm, src_pmd, src_addr, &src_ptl);
988*adef4406SAndrea Arcangeli 
989*adef4406SAndrea Arcangeli 	/*
990*adef4406SAndrea Arcangeli 	 * We held the mmap_lock for reading so MADV_DONTNEED
991*adef4406SAndrea Arcangeli 	 * can zap transparent huge pages under us, or the
992*adef4406SAndrea Arcangeli 	 * transparent huge page fault can establish new
993*adef4406SAndrea Arcangeli 	 * transparent huge pages under us.
994*adef4406SAndrea Arcangeli 	 */
995*adef4406SAndrea Arcangeli 	if (unlikely(!src_pte)) {
996*adef4406SAndrea Arcangeli 		err = -EAGAIN;
997*adef4406SAndrea Arcangeli 		goto out;
998*adef4406SAndrea Arcangeli 	}
999*adef4406SAndrea Arcangeli 
1000*adef4406SAndrea Arcangeli 	/* Sanity checks before the operation */
1001*adef4406SAndrea Arcangeli 	if (WARN_ON_ONCE(pmd_none(*dst_pmd)) ||	WARN_ON_ONCE(pmd_none(*src_pmd)) ||
1002*adef4406SAndrea Arcangeli 	    WARN_ON_ONCE(pmd_trans_huge(*dst_pmd)) || WARN_ON_ONCE(pmd_trans_huge(*src_pmd))) {
1003*adef4406SAndrea Arcangeli 		err = -EINVAL;
1004*adef4406SAndrea Arcangeli 		goto out;
1005*adef4406SAndrea Arcangeli 	}
1006*adef4406SAndrea Arcangeli 
1007*adef4406SAndrea Arcangeli 	spin_lock(dst_ptl);
1008*adef4406SAndrea Arcangeli 	orig_dst_pte = *dst_pte;
1009*adef4406SAndrea Arcangeli 	spin_unlock(dst_ptl);
1010*adef4406SAndrea Arcangeli 	if (!pte_none(orig_dst_pte)) {
1011*adef4406SAndrea Arcangeli 		err = -EEXIST;
1012*adef4406SAndrea Arcangeli 		goto out;
1013*adef4406SAndrea Arcangeli 	}
1014*adef4406SAndrea Arcangeli 
1015*adef4406SAndrea Arcangeli 	spin_lock(src_ptl);
1016*adef4406SAndrea Arcangeli 	orig_src_pte = *src_pte;
1017*adef4406SAndrea Arcangeli 	spin_unlock(src_ptl);
1018*adef4406SAndrea Arcangeli 	if (pte_none(orig_src_pte)) {
1019*adef4406SAndrea Arcangeli 		if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES))
1020*adef4406SAndrea Arcangeli 			err = -ENOENT;
1021*adef4406SAndrea Arcangeli 		else /* nothing to do to move a hole */
1022*adef4406SAndrea Arcangeli 			err = 0;
1023*adef4406SAndrea Arcangeli 		goto out;
1024*adef4406SAndrea Arcangeli 	}
1025*adef4406SAndrea Arcangeli 
1026*adef4406SAndrea Arcangeli 	/* If PTE changed after we locked the folio them start over */
1027*adef4406SAndrea Arcangeli 	if (src_folio && unlikely(!pte_same(src_folio_pte, orig_src_pte))) {
1028*adef4406SAndrea Arcangeli 		err = -EAGAIN;
1029*adef4406SAndrea Arcangeli 		goto out;
1030*adef4406SAndrea Arcangeli 	}
1031*adef4406SAndrea Arcangeli 
1032*adef4406SAndrea Arcangeli 	if (pte_present(orig_src_pte)) {
1033*adef4406SAndrea Arcangeli 		/*
1034*adef4406SAndrea Arcangeli 		 * Pin and lock both source folio and anon_vma. Since we are in
1035*adef4406SAndrea Arcangeli 		 * RCU read section, we can't block, so on contention have to
1036*adef4406SAndrea Arcangeli 		 * unmap the ptes, obtain the lock and retry.
1037*adef4406SAndrea Arcangeli 		 */
1038*adef4406SAndrea Arcangeli 		if (!src_folio) {
1039*adef4406SAndrea Arcangeli 			struct folio *folio;
1040*adef4406SAndrea Arcangeli 
1041*adef4406SAndrea Arcangeli 			/*
1042*adef4406SAndrea Arcangeli 			 * Pin the page while holding the lock to be sure the
1043*adef4406SAndrea Arcangeli 			 * page isn't freed under us
1044*adef4406SAndrea Arcangeli 			 */
1045*adef4406SAndrea Arcangeli 			spin_lock(src_ptl);
1046*adef4406SAndrea Arcangeli 			if (!pte_same(orig_src_pte, *src_pte)) {
1047*adef4406SAndrea Arcangeli 				spin_unlock(src_ptl);
1048*adef4406SAndrea Arcangeli 				err = -EAGAIN;
1049*adef4406SAndrea Arcangeli 				goto out;
1050*adef4406SAndrea Arcangeli 			}
1051*adef4406SAndrea Arcangeli 
1052*adef4406SAndrea Arcangeli 			folio = vm_normal_folio(src_vma, src_addr, orig_src_pte);
1053*adef4406SAndrea Arcangeli 			if (!folio || !PageAnonExclusive(&folio->page)) {
1054*adef4406SAndrea Arcangeli 				spin_unlock(src_ptl);
1055*adef4406SAndrea Arcangeli 				err = -EBUSY;
1056*adef4406SAndrea Arcangeli 				goto out;
1057*adef4406SAndrea Arcangeli 			}
1058*adef4406SAndrea Arcangeli 
1059*adef4406SAndrea Arcangeli 			folio_get(folio);
1060*adef4406SAndrea Arcangeli 			src_folio = folio;
1061*adef4406SAndrea Arcangeli 			src_folio_pte = orig_src_pte;
1062*adef4406SAndrea Arcangeli 			spin_unlock(src_ptl);
1063*adef4406SAndrea Arcangeli 
1064*adef4406SAndrea Arcangeli 			if (!folio_trylock(src_folio)) {
1065*adef4406SAndrea Arcangeli 				pte_unmap(&orig_src_pte);
1066*adef4406SAndrea Arcangeli 				pte_unmap(&orig_dst_pte);
1067*adef4406SAndrea Arcangeli 				src_pte = dst_pte = NULL;
1068*adef4406SAndrea Arcangeli 				/* now we can block and wait */
1069*adef4406SAndrea Arcangeli 				folio_lock(src_folio);
1070*adef4406SAndrea Arcangeli 				goto retry;
1071*adef4406SAndrea Arcangeli 			}
1072*adef4406SAndrea Arcangeli 
1073*adef4406SAndrea Arcangeli 			if (WARN_ON_ONCE(!folio_test_anon(src_folio))) {
1074*adef4406SAndrea Arcangeli 				err = -EBUSY;
1075*adef4406SAndrea Arcangeli 				goto out;
1076*adef4406SAndrea Arcangeli 			}
1077*adef4406SAndrea Arcangeli 		}
1078*adef4406SAndrea Arcangeli 
1079*adef4406SAndrea Arcangeli 		/* at this point we have src_folio locked */
1080*adef4406SAndrea Arcangeli 		if (folio_test_large(src_folio)) {
1081*adef4406SAndrea Arcangeli 			err = split_folio(src_folio);
1082*adef4406SAndrea Arcangeli 			if (err)
1083*adef4406SAndrea Arcangeli 				goto out;
1084*adef4406SAndrea Arcangeli 		}
1085*adef4406SAndrea Arcangeli 
1086*adef4406SAndrea Arcangeli 		if (!src_anon_vma) {
1087*adef4406SAndrea Arcangeli 			/*
1088*adef4406SAndrea Arcangeli 			 * folio_referenced walks the anon_vma chain
1089*adef4406SAndrea Arcangeli 			 * without the folio lock. Serialize against it with
1090*adef4406SAndrea Arcangeli 			 * the anon_vma lock, the folio lock is not enough.
1091*adef4406SAndrea Arcangeli 			 */
1092*adef4406SAndrea Arcangeli 			src_anon_vma = folio_get_anon_vma(src_folio);
1093*adef4406SAndrea Arcangeli 			if (!src_anon_vma) {
1094*adef4406SAndrea Arcangeli 				/* page was unmapped from under us */
1095*adef4406SAndrea Arcangeli 				err = -EAGAIN;
1096*adef4406SAndrea Arcangeli 				goto out;
1097*adef4406SAndrea Arcangeli 			}
1098*adef4406SAndrea Arcangeli 			if (!anon_vma_trylock_write(src_anon_vma)) {
1099*adef4406SAndrea Arcangeli 				pte_unmap(&orig_src_pte);
1100*adef4406SAndrea Arcangeli 				pte_unmap(&orig_dst_pte);
1101*adef4406SAndrea Arcangeli 				src_pte = dst_pte = NULL;
1102*adef4406SAndrea Arcangeli 				/* now we can block and wait */
1103*adef4406SAndrea Arcangeli 				anon_vma_lock_write(src_anon_vma);
1104*adef4406SAndrea Arcangeli 				goto retry;
1105*adef4406SAndrea Arcangeli 			}
1106*adef4406SAndrea Arcangeli 		}
1107*adef4406SAndrea Arcangeli 
1108*adef4406SAndrea Arcangeli 		err = move_present_pte(mm,  dst_vma, src_vma,
1109*adef4406SAndrea Arcangeli 				       dst_addr, src_addr, dst_pte, src_pte,
1110*adef4406SAndrea Arcangeli 				       orig_dst_pte, orig_src_pte,
1111*adef4406SAndrea Arcangeli 				       dst_ptl, src_ptl, src_folio);
1112*adef4406SAndrea Arcangeli 	} else {
1113*adef4406SAndrea Arcangeli 		entry = pte_to_swp_entry(orig_src_pte);
1114*adef4406SAndrea Arcangeli 		if (non_swap_entry(entry)) {
1115*adef4406SAndrea Arcangeli 			if (is_migration_entry(entry)) {
1116*adef4406SAndrea Arcangeli 				pte_unmap(&orig_src_pte);
1117*adef4406SAndrea Arcangeli 				pte_unmap(&orig_dst_pte);
1118*adef4406SAndrea Arcangeli 				src_pte = dst_pte = NULL;
1119*adef4406SAndrea Arcangeli 				migration_entry_wait(mm, src_pmd, src_addr);
1120*adef4406SAndrea Arcangeli 				err = -EAGAIN;
1121*adef4406SAndrea Arcangeli 			} else
1122*adef4406SAndrea Arcangeli 				err = -EFAULT;
1123*adef4406SAndrea Arcangeli 			goto out;
1124*adef4406SAndrea Arcangeli 		}
1125*adef4406SAndrea Arcangeli 
1126*adef4406SAndrea Arcangeli 		err = move_swap_pte(mm, dst_addr, src_addr,
1127*adef4406SAndrea Arcangeli 				    dst_pte, src_pte,
1128*adef4406SAndrea Arcangeli 				    orig_dst_pte, orig_src_pte,
1129*adef4406SAndrea Arcangeli 				    dst_ptl, src_ptl);
1130*adef4406SAndrea Arcangeli 	}
1131*adef4406SAndrea Arcangeli 
1132*adef4406SAndrea Arcangeli out:
1133*adef4406SAndrea Arcangeli 	if (src_anon_vma) {
1134*adef4406SAndrea Arcangeli 		anon_vma_unlock_write(src_anon_vma);
1135*adef4406SAndrea Arcangeli 		put_anon_vma(src_anon_vma);
1136*adef4406SAndrea Arcangeli 	}
1137*adef4406SAndrea Arcangeli 	if (src_folio) {
1138*adef4406SAndrea Arcangeli 		folio_unlock(src_folio);
1139*adef4406SAndrea Arcangeli 		folio_put(src_folio);
1140*adef4406SAndrea Arcangeli 	}
1141*adef4406SAndrea Arcangeli 	if (dst_pte)
1142*adef4406SAndrea Arcangeli 		pte_unmap(dst_pte);
1143*adef4406SAndrea Arcangeli 	if (src_pte)
1144*adef4406SAndrea Arcangeli 		pte_unmap(src_pte);
1145*adef4406SAndrea Arcangeli 	mmu_notifier_invalidate_range_end(&range);
1146*adef4406SAndrea Arcangeli 
1147*adef4406SAndrea Arcangeli 	return err;
1148*adef4406SAndrea Arcangeli }
1149*adef4406SAndrea Arcangeli 
1150*adef4406SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1151*adef4406SAndrea Arcangeli static inline bool move_splits_huge_pmd(unsigned long dst_addr,
1152*adef4406SAndrea Arcangeli 					unsigned long src_addr,
1153*adef4406SAndrea Arcangeli 					unsigned long src_end)
1154*adef4406SAndrea Arcangeli {
1155*adef4406SAndrea Arcangeli 	return (src_addr & ~HPAGE_PMD_MASK) || (dst_addr & ~HPAGE_PMD_MASK) ||
1156*adef4406SAndrea Arcangeli 		src_end - src_addr < HPAGE_PMD_SIZE;
1157*adef4406SAndrea Arcangeli }
1158*adef4406SAndrea Arcangeli #else
1159*adef4406SAndrea Arcangeli static inline bool move_splits_huge_pmd(unsigned long dst_addr,
1160*adef4406SAndrea Arcangeli 					unsigned long src_addr,
1161*adef4406SAndrea Arcangeli 					unsigned long src_end)
1162*adef4406SAndrea Arcangeli {
1163*adef4406SAndrea Arcangeli 	/* This is unreachable anyway, just to avoid warnings when HPAGE_PMD_SIZE==0 */
1164*adef4406SAndrea Arcangeli 	return false;
1165*adef4406SAndrea Arcangeli }
1166*adef4406SAndrea Arcangeli #endif
1167*adef4406SAndrea Arcangeli 
1168*adef4406SAndrea Arcangeli static inline bool vma_move_compatible(struct vm_area_struct *vma)
1169*adef4406SAndrea Arcangeli {
1170*adef4406SAndrea Arcangeli 	return !(vma->vm_flags & (VM_PFNMAP | VM_IO |  VM_HUGETLB |
1171*adef4406SAndrea Arcangeli 				  VM_MIXEDMAP | VM_SHADOW_STACK));
1172*adef4406SAndrea Arcangeli }
1173*adef4406SAndrea Arcangeli 
1174*adef4406SAndrea Arcangeli static int validate_move_areas(struct userfaultfd_ctx *ctx,
1175*adef4406SAndrea Arcangeli 			       struct vm_area_struct *src_vma,
1176*adef4406SAndrea Arcangeli 			       struct vm_area_struct *dst_vma)
1177*adef4406SAndrea Arcangeli {
1178*adef4406SAndrea Arcangeli 	/* Only allow moving if both have the same access and protection */
1179*adef4406SAndrea Arcangeli 	if ((src_vma->vm_flags & VM_ACCESS_FLAGS) != (dst_vma->vm_flags & VM_ACCESS_FLAGS) ||
1180*adef4406SAndrea Arcangeli 	    pgprot_val(src_vma->vm_page_prot) != pgprot_val(dst_vma->vm_page_prot))
1181*adef4406SAndrea Arcangeli 		return -EINVAL;
1182*adef4406SAndrea Arcangeli 
1183*adef4406SAndrea Arcangeli 	/* Only allow moving if both are mlocked or both aren't */
1184*adef4406SAndrea Arcangeli 	if ((src_vma->vm_flags & VM_LOCKED) != (dst_vma->vm_flags & VM_LOCKED))
1185*adef4406SAndrea Arcangeli 		return -EINVAL;
1186*adef4406SAndrea Arcangeli 
1187*adef4406SAndrea Arcangeli 	/*
1188*adef4406SAndrea Arcangeli 	 * For now, we keep it simple and only move between writable VMAs.
1189*adef4406SAndrea Arcangeli 	 * Access flags are equal, therefore cheching only the source is enough.
1190*adef4406SAndrea Arcangeli 	 */
1191*adef4406SAndrea Arcangeli 	if (!(src_vma->vm_flags & VM_WRITE))
1192*adef4406SAndrea Arcangeli 		return -EINVAL;
1193*adef4406SAndrea Arcangeli 
1194*adef4406SAndrea Arcangeli 	/* Check if vma flags indicate content which can be moved */
1195*adef4406SAndrea Arcangeli 	if (!vma_move_compatible(src_vma) || !vma_move_compatible(dst_vma))
1196*adef4406SAndrea Arcangeli 		return -EINVAL;
1197*adef4406SAndrea Arcangeli 
1198*adef4406SAndrea Arcangeli 	/* Ensure dst_vma is registered in uffd we are operating on */
1199*adef4406SAndrea Arcangeli 	if (!dst_vma->vm_userfaultfd_ctx.ctx ||
1200*adef4406SAndrea Arcangeli 	    dst_vma->vm_userfaultfd_ctx.ctx != ctx)
1201*adef4406SAndrea Arcangeli 		return -EINVAL;
1202*adef4406SAndrea Arcangeli 
1203*adef4406SAndrea Arcangeli 	/* Only allow moving across anonymous vmas */
1204*adef4406SAndrea Arcangeli 	if (!vma_is_anonymous(src_vma) || !vma_is_anonymous(dst_vma))
1205*adef4406SAndrea Arcangeli 		return -EINVAL;
1206*adef4406SAndrea Arcangeli 
1207*adef4406SAndrea Arcangeli 	/*
1208*adef4406SAndrea Arcangeli 	 * Ensure the dst_vma has a anon_vma or this page
1209*adef4406SAndrea Arcangeli 	 * would get a NULL anon_vma when moved in the
1210*adef4406SAndrea Arcangeli 	 * dst_vma.
1211*adef4406SAndrea Arcangeli 	 */
1212*adef4406SAndrea Arcangeli 	if (unlikely(anon_vma_prepare(dst_vma)))
1213*adef4406SAndrea Arcangeli 		return -ENOMEM;
1214*adef4406SAndrea Arcangeli 
1215*adef4406SAndrea Arcangeli 	return 0;
1216*adef4406SAndrea Arcangeli }
1217*adef4406SAndrea Arcangeli 
1218*adef4406SAndrea Arcangeli /**
1219*adef4406SAndrea Arcangeli  * move_pages - move arbitrary anonymous pages of an existing vma
1220*adef4406SAndrea Arcangeli  * @ctx: pointer to the userfaultfd context
1221*adef4406SAndrea Arcangeli  * @mm: the address space to move pages
1222*adef4406SAndrea Arcangeli  * @dst_start: start of the destination virtual memory range
1223*adef4406SAndrea Arcangeli  * @src_start: start of the source virtual memory range
1224*adef4406SAndrea Arcangeli  * @len: length of the virtual memory range
1225*adef4406SAndrea Arcangeli  * @mode: flags from uffdio_move.mode
1226*adef4406SAndrea Arcangeli  *
1227*adef4406SAndrea Arcangeli  * Must be called with mmap_lock held for read.
1228*adef4406SAndrea Arcangeli  *
1229*adef4406SAndrea Arcangeli  * move_pages() remaps arbitrary anonymous pages atomically in zero
1230*adef4406SAndrea Arcangeli  * copy. It only works on non shared anonymous pages because those can
1231*adef4406SAndrea Arcangeli  * be relocated without generating non linear anon_vmas in the rmap
1232*adef4406SAndrea Arcangeli  * code.
1233*adef4406SAndrea Arcangeli  *
1234*adef4406SAndrea Arcangeli  * It provides a zero copy mechanism to handle userspace page faults.
1235*adef4406SAndrea Arcangeli  * The source vma pages should have mapcount == 1, which can be
1236*adef4406SAndrea Arcangeli  * enforced by using madvise(MADV_DONTFORK) on src vma.
1237*adef4406SAndrea Arcangeli  *
1238*adef4406SAndrea Arcangeli  * The thread receiving the page during the userland page fault
1239*adef4406SAndrea Arcangeli  * will receive the faulting page in the source vma through the network,
1240*adef4406SAndrea Arcangeli  * storage or any other I/O device (MADV_DONTFORK in the source vma
1241*adef4406SAndrea Arcangeli  * avoids move_pages() to fail with -EBUSY if the process forks before
1242*adef4406SAndrea Arcangeli  * move_pages() is called), then it will call move_pages() to map the
1243*adef4406SAndrea Arcangeli  * page in the faulting address in the destination vma.
1244*adef4406SAndrea Arcangeli  *
1245*adef4406SAndrea Arcangeli  * This userfaultfd command works purely via pagetables, so it's the
1246*adef4406SAndrea Arcangeli  * most efficient way to move physical non shared anonymous pages
1247*adef4406SAndrea Arcangeli  * across different virtual addresses. Unlike mremap()/mmap()/munmap()
1248*adef4406SAndrea Arcangeli  * it does not create any new vmas. The mapping in the destination
1249*adef4406SAndrea Arcangeli  * address is atomic.
1250*adef4406SAndrea Arcangeli  *
1251*adef4406SAndrea Arcangeli  * It only works if the vma protection bits are identical from the
1252*adef4406SAndrea Arcangeli  * source and destination vma.
1253*adef4406SAndrea Arcangeli  *
1254*adef4406SAndrea Arcangeli  * It can remap non shared anonymous pages within the same vma too.
1255*adef4406SAndrea Arcangeli  *
1256*adef4406SAndrea Arcangeli  * If the source virtual memory range has any unmapped holes, or if
1257*adef4406SAndrea Arcangeli  * the destination virtual memory range is not a whole unmapped hole,
1258*adef4406SAndrea Arcangeli  * move_pages() will fail respectively with -ENOENT or -EEXIST. This
1259*adef4406SAndrea Arcangeli  * provides a very strict behavior to avoid any chance of memory
1260*adef4406SAndrea Arcangeli  * corruption going unnoticed if there are userland race conditions.
1261*adef4406SAndrea Arcangeli  * Only one thread should resolve the userland page fault at any given
1262*adef4406SAndrea Arcangeli  * time for any given faulting address. This means that if two threads
1263*adef4406SAndrea Arcangeli  * try to both call move_pages() on the same destination address at the
1264*adef4406SAndrea Arcangeli  * same time, the second thread will get an explicit error from this
1265*adef4406SAndrea Arcangeli  * command.
1266*adef4406SAndrea Arcangeli  *
1267*adef4406SAndrea Arcangeli  * The command retval will return "len" is successful. The command
1268*adef4406SAndrea Arcangeli  * however can be interrupted by fatal signals or errors. If
1269*adef4406SAndrea Arcangeli  * interrupted it will return the number of bytes successfully
1270*adef4406SAndrea Arcangeli  * remapped before the interruption if any, or the negative error if
1271*adef4406SAndrea Arcangeli  * none. It will never return zero. Either it will return an error or
1272*adef4406SAndrea Arcangeli  * an amount of bytes successfully moved. If the retval reports a
1273*adef4406SAndrea Arcangeli  * "short" remap, the move_pages() command should be repeated by
1274*adef4406SAndrea Arcangeli  * userland with src+retval, dst+reval, len-retval if it wants to know
1275*adef4406SAndrea Arcangeli  * about the error that interrupted it.
1276*adef4406SAndrea Arcangeli  *
1277*adef4406SAndrea Arcangeli  * The UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES flag can be specified to
1278*adef4406SAndrea Arcangeli  * prevent -ENOENT errors to materialize if there are holes in the
1279*adef4406SAndrea Arcangeli  * source virtual range that is being remapped. The holes will be
1280*adef4406SAndrea Arcangeli  * accounted as successfully remapped in the retval of the
1281*adef4406SAndrea Arcangeli  * command. This is mostly useful to remap hugepage naturally aligned
1282*adef4406SAndrea Arcangeli  * virtual regions without knowing if there are transparent hugepage
1283*adef4406SAndrea Arcangeli  * in the regions or not, but preventing the risk of having to split
1284*adef4406SAndrea Arcangeli  * the hugepmd during the remap.
1285*adef4406SAndrea Arcangeli  *
1286*adef4406SAndrea Arcangeli  * If there's any rmap walk that is taking the anon_vma locks without
1287*adef4406SAndrea Arcangeli  * first obtaining the folio lock (the only current instance is
1288*adef4406SAndrea Arcangeli  * folio_referenced), they will have to verify if the folio->mapping
1289*adef4406SAndrea Arcangeli  * has changed after taking the anon_vma lock. If it changed they
1290*adef4406SAndrea Arcangeli  * should release the lock and retry obtaining a new anon_vma, because
1291*adef4406SAndrea Arcangeli  * it means the anon_vma was changed by move_pages() before the lock
1292*adef4406SAndrea Arcangeli  * could be obtained. This is the only additional complexity added to
1293*adef4406SAndrea Arcangeli  * the rmap code to provide this anonymous page remapping functionality.
1294*adef4406SAndrea Arcangeli  */
1295*adef4406SAndrea Arcangeli ssize_t move_pages(struct userfaultfd_ctx *ctx, struct mm_struct *mm,
1296*adef4406SAndrea Arcangeli 		   unsigned long dst_start, unsigned long src_start,
1297*adef4406SAndrea Arcangeli 		   unsigned long len, __u64 mode)
1298*adef4406SAndrea Arcangeli {
1299*adef4406SAndrea Arcangeli 	struct vm_area_struct *src_vma, *dst_vma;
1300*adef4406SAndrea Arcangeli 	unsigned long src_addr, dst_addr;
1301*adef4406SAndrea Arcangeli 	pmd_t *src_pmd, *dst_pmd;
1302*adef4406SAndrea Arcangeli 	long err = -EINVAL;
1303*adef4406SAndrea Arcangeli 	ssize_t moved = 0;
1304*adef4406SAndrea Arcangeli 
1305*adef4406SAndrea Arcangeli 	/* Sanitize the command parameters. */
1306*adef4406SAndrea Arcangeli 	if (WARN_ON_ONCE(src_start & ~PAGE_MASK) ||
1307*adef4406SAndrea Arcangeli 	    WARN_ON_ONCE(dst_start & ~PAGE_MASK) ||
1308*adef4406SAndrea Arcangeli 	    WARN_ON_ONCE(len & ~PAGE_MASK))
1309*adef4406SAndrea Arcangeli 		goto out;
1310*adef4406SAndrea Arcangeli 
1311*adef4406SAndrea Arcangeli 	/* Does the address range wrap, or is the span zero-sized? */
1312*adef4406SAndrea Arcangeli 	if (WARN_ON_ONCE(src_start + len <= src_start) ||
1313*adef4406SAndrea Arcangeli 	    WARN_ON_ONCE(dst_start + len <= dst_start))
1314*adef4406SAndrea Arcangeli 		goto out;
1315*adef4406SAndrea Arcangeli 
1316*adef4406SAndrea Arcangeli 	/*
1317*adef4406SAndrea Arcangeli 	 * Make sure the vma is not shared, that the src and dst remap
1318*adef4406SAndrea Arcangeli 	 * ranges are both valid and fully within a single existing
1319*adef4406SAndrea Arcangeli 	 * vma.
1320*adef4406SAndrea Arcangeli 	 */
1321*adef4406SAndrea Arcangeli 	src_vma = find_vma(mm, src_start);
1322*adef4406SAndrea Arcangeli 	if (!src_vma || (src_vma->vm_flags & VM_SHARED))
1323*adef4406SAndrea Arcangeli 		goto out;
1324*adef4406SAndrea Arcangeli 	if (src_start < src_vma->vm_start ||
1325*adef4406SAndrea Arcangeli 	    src_start + len > src_vma->vm_end)
1326*adef4406SAndrea Arcangeli 		goto out;
1327*adef4406SAndrea Arcangeli 
1328*adef4406SAndrea Arcangeli 	dst_vma = find_vma(mm, dst_start);
1329*adef4406SAndrea Arcangeli 	if (!dst_vma || (dst_vma->vm_flags & VM_SHARED))
1330*adef4406SAndrea Arcangeli 		goto out;
1331*adef4406SAndrea Arcangeli 	if (dst_start < dst_vma->vm_start ||
1332*adef4406SAndrea Arcangeli 	    dst_start + len > dst_vma->vm_end)
1333*adef4406SAndrea Arcangeli 		goto out;
1334*adef4406SAndrea Arcangeli 
1335*adef4406SAndrea Arcangeli 	err = validate_move_areas(ctx, src_vma, dst_vma);
1336*adef4406SAndrea Arcangeli 	if (err)
1337*adef4406SAndrea Arcangeli 		goto out;
1338*adef4406SAndrea Arcangeli 
1339*adef4406SAndrea Arcangeli 	for (src_addr = src_start, dst_addr = dst_start;
1340*adef4406SAndrea Arcangeli 	     src_addr < src_start + len;) {
1341*adef4406SAndrea Arcangeli 		spinlock_t *ptl;
1342*adef4406SAndrea Arcangeli 		pmd_t dst_pmdval;
1343*adef4406SAndrea Arcangeli 		unsigned long step_size;
1344*adef4406SAndrea Arcangeli 
1345*adef4406SAndrea Arcangeli 		/*
1346*adef4406SAndrea Arcangeli 		 * Below works because anonymous area would not have a
1347*adef4406SAndrea Arcangeli 		 * transparent huge PUD. If file-backed support is added,
1348*adef4406SAndrea Arcangeli 		 * that case would need to be handled here.
1349*adef4406SAndrea Arcangeli 		 */
1350*adef4406SAndrea Arcangeli 		src_pmd = mm_find_pmd(mm, src_addr);
1351*adef4406SAndrea Arcangeli 		if (unlikely(!src_pmd)) {
1352*adef4406SAndrea Arcangeli 			if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) {
1353*adef4406SAndrea Arcangeli 				err = -ENOENT;
1354*adef4406SAndrea Arcangeli 				break;
1355*adef4406SAndrea Arcangeli 			}
1356*adef4406SAndrea Arcangeli 			src_pmd = mm_alloc_pmd(mm, src_addr);
1357*adef4406SAndrea Arcangeli 			if (unlikely(!src_pmd)) {
1358*adef4406SAndrea Arcangeli 				err = -ENOMEM;
1359*adef4406SAndrea Arcangeli 				break;
1360*adef4406SAndrea Arcangeli 			}
1361*adef4406SAndrea Arcangeli 		}
1362*adef4406SAndrea Arcangeli 		dst_pmd = mm_alloc_pmd(mm, dst_addr);
1363*adef4406SAndrea Arcangeli 		if (unlikely(!dst_pmd)) {
1364*adef4406SAndrea Arcangeli 			err = -ENOMEM;
1365*adef4406SAndrea Arcangeli 			break;
1366*adef4406SAndrea Arcangeli 		}
1367*adef4406SAndrea Arcangeli 
1368*adef4406SAndrea Arcangeli 		dst_pmdval = pmdp_get_lockless(dst_pmd);
1369*adef4406SAndrea Arcangeli 		/*
1370*adef4406SAndrea Arcangeli 		 * If the dst_pmd is mapped as THP don't override it and just
1371*adef4406SAndrea Arcangeli 		 * be strict. If dst_pmd changes into TPH after this check, the
1372*adef4406SAndrea Arcangeli 		 * move_pages_huge_pmd() will detect the change and retry
1373*adef4406SAndrea Arcangeli 		 * while move_pages_pte() will detect the change and fail.
1374*adef4406SAndrea Arcangeli 		 */
1375*adef4406SAndrea Arcangeli 		if (unlikely(pmd_trans_huge(dst_pmdval))) {
1376*adef4406SAndrea Arcangeli 			err = -EEXIST;
1377*adef4406SAndrea Arcangeli 			break;
1378*adef4406SAndrea Arcangeli 		}
1379*adef4406SAndrea Arcangeli 
1380*adef4406SAndrea Arcangeli 		ptl = pmd_trans_huge_lock(src_pmd, src_vma);
1381*adef4406SAndrea Arcangeli 		if (ptl) {
1382*adef4406SAndrea Arcangeli 			if (pmd_devmap(*src_pmd)) {
1383*adef4406SAndrea Arcangeli 				spin_unlock(ptl);
1384*adef4406SAndrea Arcangeli 				err = -ENOENT;
1385*adef4406SAndrea Arcangeli 				break;
1386*adef4406SAndrea Arcangeli 			}
1387*adef4406SAndrea Arcangeli 
1388*adef4406SAndrea Arcangeli 			/* Check if we can move the pmd without splitting it. */
1389*adef4406SAndrea Arcangeli 			if (move_splits_huge_pmd(dst_addr, src_addr, src_start + len) ||
1390*adef4406SAndrea Arcangeli 			    !pmd_none(dst_pmdval)) {
1391*adef4406SAndrea Arcangeli 				struct folio *folio = pfn_folio(pmd_pfn(*src_pmd));
1392*adef4406SAndrea Arcangeli 
1393*adef4406SAndrea Arcangeli 				if (!folio || !PageAnonExclusive(&folio->page)) {
1394*adef4406SAndrea Arcangeli 					spin_unlock(ptl);
1395*adef4406SAndrea Arcangeli 					err = -EBUSY;
1396*adef4406SAndrea Arcangeli 					break;
1397*adef4406SAndrea Arcangeli 				}
1398*adef4406SAndrea Arcangeli 
1399*adef4406SAndrea Arcangeli 				spin_unlock(ptl);
1400*adef4406SAndrea Arcangeli 				split_huge_pmd(src_vma, src_pmd, src_addr);
1401*adef4406SAndrea Arcangeli 				/* The folio will be split by move_pages_pte() */
1402*adef4406SAndrea Arcangeli 				continue;
1403*adef4406SAndrea Arcangeli 			}
1404*adef4406SAndrea Arcangeli 
1405*adef4406SAndrea Arcangeli 			err = move_pages_huge_pmd(mm, dst_pmd, src_pmd,
1406*adef4406SAndrea Arcangeli 						  dst_pmdval, dst_vma, src_vma,
1407*adef4406SAndrea Arcangeli 						  dst_addr, src_addr);
1408*adef4406SAndrea Arcangeli 			step_size = HPAGE_PMD_SIZE;
1409*adef4406SAndrea Arcangeli 		} else {
1410*adef4406SAndrea Arcangeli 			if (pmd_none(*src_pmd)) {
1411*adef4406SAndrea Arcangeli 				if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) {
1412*adef4406SAndrea Arcangeli 					err = -ENOENT;
1413*adef4406SAndrea Arcangeli 					break;
1414*adef4406SAndrea Arcangeli 				}
1415*adef4406SAndrea Arcangeli 				if (unlikely(__pte_alloc(mm, src_pmd))) {
1416*adef4406SAndrea Arcangeli 					err = -ENOMEM;
1417*adef4406SAndrea Arcangeli 					break;
1418*adef4406SAndrea Arcangeli 				}
1419*adef4406SAndrea Arcangeli 			}
1420*adef4406SAndrea Arcangeli 
1421*adef4406SAndrea Arcangeli 			if (unlikely(pte_alloc(mm, dst_pmd))) {
1422*adef4406SAndrea Arcangeli 				err = -ENOMEM;
1423*adef4406SAndrea Arcangeli 				break;
1424*adef4406SAndrea Arcangeli 			}
1425*adef4406SAndrea Arcangeli 
1426*adef4406SAndrea Arcangeli 			err = move_pages_pte(mm, dst_pmd, src_pmd,
1427*adef4406SAndrea Arcangeli 					     dst_vma, src_vma,
1428*adef4406SAndrea Arcangeli 					     dst_addr, src_addr, mode);
1429*adef4406SAndrea Arcangeli 			step_size = PAGE_SIZE;
1430*adef4406SAndrea Arcangeli 		}
1431*adef4406SAndrea Arcangeli 
1432*adef4406SAndrea Arcangeli 		cond_resched();
1433*adef4406SAndrea Arcangeli 
1434*adef4406SAndrea Arcangeli 		if (fatal_signal_pending(current)) {
1435*adef4406SAndrea Arcangeli 			/* Do not override an error */
1436*adef4406SAndrea Arcangeli 			if (!err || err == -EAGAIN)
1437*adef4406SAndrea Arcangeli 				err = -EINTR;
1438*adef4406SAndrea Arcangeli 			break;
1439*adef4406SAndrea Arcangeli 		}
1440*adef4406SAndrea Arcangeli 
1441*adef4406SAndrea Arcangeli 		if (err) {
1442*adef4406SAndrea Arcangeli 			if (err == -EAGAIN)
1443*adef4406SAndrea Arcangeli 				continue;
1444*adef4406SAndrea Arcangeli 			break;
1445*adef4406SAndrea Arcangeli 		}
1446*adef4406SAndrea Arcangeli 
1447*adef4406SAndrea Arcangeli 		/* Proceed to the next page */
1448*adef4406SAndrea Arcangeli 		dst_addr += step_size;
1449*adef4406SAndrea Arcangeli 		src_addr += step_size;
1450*adef4406SAndrea Arcangeli 		moved += step_size;
1451*adef4406SAndrea Arcangeli 	}
1452*adef4406SAndrea Arcangeli 
1453*adef4406SAndrea Arcangeli out:
1454*adef4406SAndrea Arcangeli 	VM_WARN_ON(moved < 0);
1455*adef4406SAndrea Arcangeli 	VM_WARN_ON(err > 0);
1456*adef4406SAndrea Arcangeli 	VM_WARN_ON(!moved && !err);
1457*adef4406SAndrea Arcangeli 	return moved ? moved : err;
1458*adef4406SAndrea Arcangeli }
1459