120c8ccb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2c1a4de99SAndrea Arcangeli /*
3c1a4de99SAndrea Arcangeli * mm/userfaultfd.c
4c1a4de99SAndrea Arcangeli *
5c1a4de99SAndrea Arcangeli * Copyright (C) 2015 Red Hat, Inc.
6c1a4de99SAndrea Arcangeli */
7c1a4de99SAndrea Arcangeli
8c1a4de99SAndrea Arcangeli #include <linux/mm.h>
9174cd4b1SIngo Molnar #include <linux/sched/signal.h>
10c1a4de99SAndrea Arcangeli #include <linux/pagemap.h>
11c1a4de99SAndrea Arcangeli #include <linux/rmap.h>
12c1a4de99SAndrea Arcangeli #include <linux/swap.h>
13c1a4de99SAndrea Arcangeli #include <linux/swapops.h>
14c1a4de99SAndrea Arcangeli #include <linux/userfaultfd_k.h>
15c1a4de99SAndrea Arcangeli #include <linux/mmu_notifier.h>
1660d4d2d2SMike Kravetz #include <linux/hugetlb.h>
1726071cedSMike Rapoport #include <linux/shmem_fs.h>
18c1a4de99SAndrea Arcangeli #include <asm/tlbflush.h>
194a18419fSNadav Amit #include <asm/tlb.h>
20c1a4de99SAndrea Arcangeli #include "internal.h"
21c1a4de99SAndrea Arcangeli
22643aa36eSWei Yang static __always_inline
validate_dst_vma(struct vm_area_struct * dst_vma,unsigned long dst_end)23867a43a3SLokesh Gidra bool validate_dst_vma(struct vm_area_struct *dst_vma, unsigned long dst_end)
24643aa36eSWei Yang {
25867a43a3SLokesh Gidra /* Make sure that the dst range is fully within dst_vma. */
26867a43a3SLokesh Gidra if (dst_end > dst_vma->vm_end)
27867a43a3SLokesh Gidra return false;
28643aa36eSWei Yang
29643aa36eSWei Yang /*
30643aa36eSWei Yang * Check the vma is registered in uffd, this is required to
31643aa36eSWei Yang * enforce the VM_MAYWRITE check done at uffd registration
32643aa36eSWei Yang * time.
33643aa36eSWei Yang */
34643aa36eSWei Yang if (!dst_vma->vm_userfaultfd_ctx.ctx)
35867a43a3SLokesh Gidra return false;
36643aa36eSWei Yang
37867a43a3SLokesh Gidra return true;
38867a43a3SLokesh Gidra }
39867a43a3SLokesh Gidra
40867a43a3SLokesh Gidra static __always_inline
find_vma_and_prepare_anon(struct mm_struct * mm,unsigned long addr)41867a43a3SLokesh Gidra struct vm_area_struct *find_vma_and_prepare_anon(struct mm_struct *mm,
42867a43a3SLokesh Gidra unsigned long addr)
43867a43a3SLokesh Gidra {
44867a43a3SLokesh Gidra struct vm_area_struct *vma;
45867a43a3SLokesh Gidra
46867a43a3SLokesh Gidra mmap_assert_locked(mm);
47867a43a3SLokesh Gidra vma = vma_lookup(mm, addr);
48867a43a3SLokesh Gidra if (!vma)
49867a43a3SLokesh Gidra vma = ERR_PTR(-ENOENT);
50867a43a3SLokesh Gidra else if (!(vma->vm_flags & VM_SHARED) &&
51867a43a3SLokesh Gidra unlikely(anon_vma_prepare(vma)))
52867a43a3SLokesh Gidra vma = ERR_PTR(-ENOMEM);
53867a43a3SLokesh Gidra
54867a43a3SLokesh Gidra return vma;
55867a43a3SLokesh Gidra }
56867a43a3SLokesh Gidra
57867a43a3SLokesh Gidra #ifdef CONFIG_PER_VMA_LOCK
58867a43a3SLokesh Gidra /*
59867a43a3SLokesh Gidra * lock_vma() - Lookup and lock vma corresponding to @address.
60867a43a3SLokesh Gidra * @mm: mm to search vma in.
61867a43a3SLokesh Gidra * @address: address that the vma should contain.
62867a43a3SLokesh Gidra *
63867a43a3SLokesh Gidra * Should be called without holding mmap_lock. vma should be unlocked after use
64867a43a3SLokesh Gidra * with unlock_vma().
65867a43a3SLokesh Gidra *
66867a43a3SLokesh Gidra * Return: A locked vma containing @address, -ENOENT if no vma is found, or
67867a43a3SLokesh Gidra * -ENOMEM if anon_vma couldn't be allocated.
68867a43a3SLokesh Gidra */
lock_vma(struct mm_struct * mm,unsigned long address)69867a43a3SLokesh Gidra static struct vm_area_struct *lock_vma(struct mm_struct *mm,
70867a43a3SLokesh Gidra unsigned long address)
71867a43a3SLokesh Gidra {
72867a43a3SLokesh Gidra struct vm_area_struct *vma;
73867a43a3SLokesh Gidra
74867a43a3SLokesh Gidra vma = lock_vma_under_rcu(mm, address);
75867a43a3SLokesh Gidra if (vma) {
76867a43a3SLokesh Gidra /*
77867a43a3SLokesh Gidra * lock_vma_under_rcu() only checks anon_vma for private
78867a43a3SLokesh Gidra * anonymous mappings. But we need to ensure it is assigned in
79867a43a3SLokesh Gidra * private file-backed vmas as well.
80867a43a3SLokesh Gidra */
81867a43a3SLokesh Gidra if (!(vma->vm_flags & VM_SHARED) && unlikely(!vma->anon_vma))
82867a43a3SLokesh Gidra vma_end_read(vma);
83867a43a3SLokesh Gidra else
84867a43a3SLokesh Gidra return vma;
85867a43a3SLokesh Gidra }
86867a43a3SLokesh Gidra
87867a43a3SLokesh Gidra mmap_read_lock(mm);
88867a43a3SLokesh Gidra vma = find_vma_and_prepare_anon(mm, address);
89867a43a3SLokesh Gidra if (!IS_ERR(vma)) {
90867a43a3SLokesh Gidra /*
91867a43a3SLokesh Gidra * We cannot use vma_start_read() as it may fail due to
92867a43a3SLokesh Gidra * false locked (see comment in vma_start_read()). We
93867a43a3SLokesh Gidra * can avoid that by directly locking vm_lock under
94867a43a3SLokesh Gidra * mmap_lock, which guarantees that nobody can lock the
95867a43a3SLokesh Gidra * vma for write (vma_start_write()) under us.
96867a43a3SLokesh Gidra */
97867a43a3SLokesh Gidra down_read(&vma->vm_lock->lock);
98867a43a3SLokesh Gidra }
99867a43a3SLokesh Gidra
100867a43a3SLokesh Gidra mmap_read_unlock(mm);
101867a43a3SLokesh Gidra return vma;
102867a43a3SLokesh Gidra }
103867a43a3SLokesh Gidra
uffd_mfill_lock(struct mm_struct * dst_mm,unsigned long dst_start,unsigned long len)104867a43a3SLokesh Gidra static struct vm_area_struct *uffd_mfill_lock(struct mm_struct *dst_mm,
105867a43a3SLokesh Gidra unsigned long dst_start,
106867a43a3SLokesh Gidra unsigned long len)
107867a43a3SLokesh Gidra {
108867a43a3SLokesh Gidra struct vm_area_struct *dst_vma;
109867a43a3SLokesh Gidra
110867a43a3SLokesh Gidra dst_vma = lock_vma(dst_mm, dst_start);
111867a43a3SLokesh Gidra if (IS_ERR(dst_vma) || validate_dst_vma(dst_vma, dst_start + len))
112867a43a3SLokesh Gidra return dst_vma;
113867a43a3SLokesh Gidra
114867a43a3SLokesh Gidra vma_end_read(dst_vma);
115867a43a3SLokesh Gidra return ERR_PTR(-ENOENT);
116867a43a3SLokesh Gidra }
117867a43a3SLokesh Gidra
uffd_mfill_unlock(struct vm_area_struct * vma)118867a43a3SLokesh Gidra static void uffd_mfill_unlock(struct vm_area_struct *vma)
119867a43a3SLokesh Gidra {
120867a43a3SLokesh Gidra vma_end_read(vma);
121867a43a3SLokesh Gidra }
122867a43a3SLokesh Gidra
123867a43a3SLokesh Gidra #else
124867a43a3SLokesh Gidra
uffd_mfill_lock(struct mm_struct * dst_mm,unsigned long dst_start,unsigned long len)125867a43a3SLokesh Gidra static struct vm_area_struct *uffd_mfill_lock(struct mm_struct *dst_mm,
126867a43a3SLokesh Gidra unsigned long dst_start,
127867a43a3SLokesh Gidra unsigned long len)
128867a43a3SLokesh Gidra {
129867a43a3SLokesh Gidra struct vm_area_struct *dst_vma;
130867a43a3SLokesh Gidra
131867a43a3SLokesh Gidra mmap_read_lock(dst_mm);
132867a43a3SLokesh Gidra dst_vma = find_vma_and_prepare_anon(dst_mm, dst_start);
133867a43a3SLokesh Gidra if (IS_ERR(dst_vma))
134867a43a3SLokesh Gidra goto out_unlock;
135867a43a3SLokesh Gidra
136867a43a3SLokesh Gidra if (validate_dst_vma(dst_vma, dst_start + len))
137867a43a3SLokesh Gidra return dst_vma;
138867a43a3SLokesh Gidra
139867a43a3SLokesh Gidra dst_vma = ERR_PTR(-ENOENT);
140867a43a3SLokesh Gidra out_unlock:
141867a43a3SLokesh Gidra mmap_read_unlock(dst_mm);
142643aa36eSWei Yang return dst_vma;
143643aa36eSWei Yang }
144643aa36eSWei Yang
uffd_mfill_unlock(struct vm_area_struct * vma)145867a43a3SLokesh Gidra static void uffd_mfill_unlock(struct vm_area_struct *vma)
146867a43a3SLokesh Gidra {
147867a43a3SLokesh Gidra mmap_read_unlock(vma->vm_mm);
148867a43a3SLokesh Gidra }
149867a43a3SLokesh Gidra #endif
150867a43a3SLokesh Gidra
151435cdb41SAxel Rasmussen /* Check if dst_addr is outside of file's size. Must be called with ptl held. */
mfill_file_over_size(struct vm_area_struct * dst_vma,unsigned long dst_addr)152435cdb41SAxel Rasmussen static bool mfill_file_over_size(struct vm_area_struct *dst_vma,
153435cdb41SAxel Rasmussen unsigned long dst_addr)
154435cdb41SAxel Rasmussen {
155435cdb41SAxel Rasmussen struct inode *inode;
156435cdb41SAxel Rasmussen pgoff_t offset, max_off;
157435cdb41SAxel Rasmussen
158435cdb41SAxel Rasmussen if (!dst_vma->vm_file)
159435cdb41SAxel Rasmussen return false;
160435cdb41SAxel Rasmussen
161435cdb41SAxel Rasmussen inode = dst_vma->vm_file->f_inode;
162435cdb41SAxel Rasmussen offset = linear_page_index(dst_vma, dst_addr);
163435cdb41SAxel Rasmussen max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
164435cdb41SAxel Rasmussen return offset >= max_off;
165435cdb41SAxel Rasmussen }
166435cdb41SAxel Rasmussen
16715313257SAxel Rasmussen /*
16815313257SAxel Rasmussen * Install PTEs, to map dst_addr (within dst_vma) to page.
16915313257SAxel Rasmussen *
1707d64ae3aSAxel Rasmussen * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem
1717d64ae3aSAxel Rasmussen * and anon, and for both shared and private VMAs.
17215313257SAxel Rasmussen */
mfill_atomic_install_pte(pmd_t * dst_pmd,struct vm_area_struct * dst_vma,unsigned long dst_addr,struct page * page,bool newly_allocated,uffd_flags_t flags)17361c50040SAxel Rasmussen int mfill_atomic_install_pte(pmd_t *dst_pmd,
17415313257SAxel Rasmussen struct vm_area_struct *dst_vma,
17515313257SAxel Rasmussen unsigned long dst_addr, struct page *page,
176d9712937SAxel Rasmussen bool newly_allocated, uffd_flags_t flags)
17715313257SAxel Rasmussen {
17815313257SAxel Rasmussen int ret;
17961c50040SAxel Rasmussen struct mm_struct *dst_mm = dst_vma->vm_mm;
18015313257SAxel Rasmussen pte_t _dst_pte, *dst_pte;
18115313257SAxel Rasmussen bool writable = dst_vma->vm_flags & VM_WRITE;
18215313257SAxel Rasmussen bool vm_shared = dst_vma->vm_flags & VM_SHARED;
18393b0d917SPeter Xu bool page_in_cache = page_mapping(page);
18415313257SAxel Rasmussen spinlock_t *ptl;
18528965f0fSVishal Moola (Oracle) struct folio *folio;
18615313257SAxel Rasmussen
18715313257SAxel Rasmussen _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
1889ae0f87dSPeter Xu _dst_pte = pte_mkdirty(_dst_pte);
18915313257SAxel Rasmussen if (page_in_cache && !vm_shared)
19015313257SAxel Rasmussen writable = false;
1918ee79edfSPeter Xu if (writable)
192161e393cSRick Edgecombe _dst_pte = pte_mkwrite(_dst_pte, dst_vma);
193d9712937SAxel Rasmussen if (flags & MFILL_ATOMIC_WP)
194f1eb1bacSPeter Xu _dst_pte = pte_mkuffd_wp(_dst_pte);
19515313257SAxel Rasmussen
1963622d3cdSHugh Dickins ret = -EAGAIN;
19715313257SAxel Rasmussen dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
1983622d3cdSHugh Dickins if (!dst_pte)
1993622d3cdSHugh Dickins goto out;
20015313257SAxel Rasmussen
201435cdb41SAxel Rasmussen if (mfill_file_over_size(dst_vma, dst_addr)) {
20215313257SAxel Rasmussen ret = -EFAULT;
20315313257SAxel Rasmussen goto out_unlock;
20415313257SAxel Rasmussen }
20515313257SAxel Rasmussen
20615313257SAxel Rasmussen ret = -EEXIST;
2078ee79edfSPeter Xu /*
2088ee79edfSPeter Xu * We allow to overwrite a pte marker: consider when both MISSING|WP
2098ee79edfSPeter Xu * registered, we firstly wr-protect a none pte which has no page cache
2108ee79edfSPeter Xu * page backing it, then access the page.
2118ee79edfSPeter Xu */
212c33c7948SRyan Roberts if (!pte_none_mostly(ptep_get(dst_pte)))
21315313257SAxel Rasmussen goto out_unlock;
21415313257SAxel Rasmussen
21528965f0fSVishal Moola (Oracle) folio = page_folio(page);
216cea86fe2SHugh Dickins if (page_in_cache) {
217cea86fe2SHugh Dickins /* Usually, cache pages are already added to LRU */
218cea86fe2SHugh Dickins if (newly_allocated)
21928965f0fSVishal Moola (Oracle) folio_add_lru(folio);
2207123e19cSDavid Hildenbrand folio_add_file_rmap_pte(folio, page, dst_vma);
221cea86fe2SHugh Dickins } else {
2222853b66bSMatthew Wilcox (Oracle) folio_add_new_anon_rmap(folio, dst_vma, dst_addr);
22328965f0fSVishal Moola (Oracle) folio_add_lru_vma(folio, dst_vma);
224cea86fe2SHugh Dickins }
22515313257SAxel Rasmussen
22615313257SAxel Rasmussen /*
22715313257SAxel Rasmussen * Must happen after rmap, as mm_counter() checks mapping (via
22815313257SAxel Rasmussen * PageAnon()), which is set by __page_set_anon_rmap().
22915313257SAxel Rasmussen */
230a23f517bSKefeng Wang inc_mm_counter(dst_mm, mm_counter(folio));
23115313257SAxel Rasmussen
23215313257SAxel Rasmussen set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
23315313257SAxel Rasmussen
23415313257SAxel Rasmussen /* No need to invalidate - it was non-present before */
23515313257SAxel Rasmussen update_mmu_cache(dst_vma, dst_addr, dst_pte);
23615313257SAxel Rasmussen ret = 0;
23715313257SAxel Rasmussen out_unlock:
23815313257SAxel Rasmussen pte_unmap_unlock(dst_pte, ptl);
2393622d3cdSHugh Dickins out:
24015313257SAxel Rasmussen return ret;
24115313257SAxel Rasmussen }
24215313257SAxel Rasmussen
mfill_atomic_pte_copy(pmd_t * dst_pmd,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,uffd_flags_t flags,struct folio ** foliop)24361c50040SAxel Rasmussen static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
244c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma,
245c1a4de99SAndrea Arcangeli unsigned long dst_addr,
246b6ebaedbSAndrea Arcangeli unsigned long src_addr,
247d9712937SAxel Rasmussen uffd_flags_t flags,
248d7be6d7eSZhangPeng struct folio **foliop)
249c1a4de99SAndrea Arcangeli {
25007e6d409SZhangPeng void *kaddr;
251c1a4de99SAndrea Arcangeli int ret;
25207e6d409SZhangPeng struct folio *folio;
253c1a4de99SAndrea Arcangeli
254d7be6d7eSZhangPeng if (!*foliop) {
255c1a4de99SAndrea Arcangeli ret = -ENOMEM;
25607e6d409SZhangPeng folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma,
25707e6d409SZhangPeng dst_addr, false);
25807e6d409SZhangPeng if (!folio)
259c1a4de99SAndrea Arcangeli goto out;
260c1a4de99SAndrea Arcangeli
26107e6d409SZhangPeng kaddr = kmap_local_folio(folio, 0);
2625521de7dSIra Weiny /*
2635521de7dSIra Weiny * The read mmap_lock is held here. Despite the
2645521de7dSIra Weiny * mmap_lock being read recursive a deadlock is still
2655521de7dSIra Weiny * possible if a writer has taken a lock. For example:
2665521de7dSIra Weiny *
2675521de7dSIra Weiny * process A thread 1 takes read lock on own mmap_lock
2685521de7dSIra Weiny * process A thread 2 calls mmap, blocks taking write lock
2695521de7dSIra Weiny * process B thread 1 takes page fault, read lock on own mmap lock
2705521de7dSIra Weiny * process B thread 2 calls mmap, blocks taking write lock
2715521de7dSIra Weiny * process A thread 1 blocks taking read lock on process B
2725521de7dSIra Weiny * process B thread 1 blocks taking read lock on process A
2735521de7dSIra Weiny *
2745521de7dSIra Weiny * Disable page faults to prevent potential deadlock
2755521de7dSIra Weiny * and retry the copy outside the mmap_lock.
2765521de7dSIra Weiny */
2775521de7dSIra Weiny pagefault_disable();
27807e6d409SZhangPeng ret = copy_from_user(kaddr, (const void __user *) src_addr,
279b6ebaedbSAndrea Arcangeli PAGE_SIZE);
2805521de7dSIra Weiny pagefault_enable();
28107e6d409SZhangPeng kunmap_local(kaddr);
282b6ebaedbSAndrea Arcangeli
283c1e8d7c6SMichel Lespinasse /* fallback to copy_from_user outside mmap_lock */
284b6ebaedbSAndrea Arcangeli if (unlikely(ret)) {
2859e368259SAndrea Arcangeli ret = -ENOENT;
286d7be6d7eSZhangPeng *foliop = folio;
287b6ebaedbSAndrea Arcangeli /* don't free the page */
288b6ebaedbSAndrea Arcangeli goto out;
289b6ebaedbSAndrea Arcangeli }
2907c25a0b8SMuchun Song
29107e6d409SZhangPeng flush_dcache_folio(folio);
292b6ebaedbSAndrea Arcangeli } else {
293d7be6d7eSZhangPeng folio = *foliop;
294d7be6d7eSZhangPeng *foliop = NULL;
295b6ebaedbSAndrea Arcangeli }
296c1a4de99SAndrea Arcangeli
297c1a4de99SAndrea Arcangeli /*
29807e6d409SZhangPeng * The memory barrier inside __folio_mark_uptodate makes sure that
299f4f5329dSWei Yang * preceding stores to the page contents become visible before
300c1a4de99SAndrea Arcangeli * the set_pte_at() write.
301c1a4de99SAndrea Arcangeli */
30207e6d409SZhangPeng __folio_mark_uptodate(folio);
303c1a4de99SAndrea Arcangeli
304c1a4de99SAndrea Arcangeli ret = -ENOMEM;
30507e6d409SZhangPeng if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
306c1a4de99SAndrea Arcangeli goto out_release;
307c1a4de99SAndrea Arcangeli
30861c50040SAxel Rasmussen ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
30907e6d409SZhangPeng &folio->page, true, flags);
31015313257SAxel Rasmussen if (ret)
31115313257SAxel Rasmussen goto out_release;
312c1a4de99SAndrea Arcangeli out:
313c1a4de99SAndrea Arcangeli return ret;
314c1a4de99SAndrea Arcangeli out_release:
31507e6d409SZhangPeng folio_put(folio);
316c1a4de99SAndrea Arcangeli goto out;
317c1a4de99SAndrea Arcangeli }
318c1a4de99SAndrea Arcangeli
mfill_atomic_pte_zeropage(pmd_t * dst_pmd,struct vm_area_struct * dst_vma,unsigned long dst_addr)31961c50040SAxel Rasmussen static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd,
320c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma,
321c1a4de99SAndrea Arcangeli unsigned long dst_addr)
322c1a4de99SAndrea Arcangeli {
323c1a4de99SAndrea Arcangeli pte_t _dst_pte, *dst_pte;
324c1a4de99SAndrea Arcangeli spinlock_t *ptl;
325c1a4de99SAndrea Arcangeli int ret;
326c1a4de99SAndrea Arcangeli
327c1a4de99SAndrea Arcangeli _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
328c1a4de99SAndrea Arcangeli dst_vma->vm_page_prot));
3293622d3cdSHugh Dickins ret = -EAGAIN;
33061c50040SAxel Rasmussen dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl);
3313622d3cdSHugh Dickins if (!dst_pte)
3323622d3cdSHugh Dickins goto out;
333435cdb41SAxel Rasmussen if (mfill_file_over_size(dst_vma, dst_addr)) {
334e2a50c1fSAndrea Arcangeli ret = -EFAULT;
335e2a50c1fSAndrea Arcangeli goto out_unlock;
336e2a50c1fSAndrea Arcangeli }
337e2a50c1fSAndrea Arcangeli ret = -EEXIST;
338c33c7948SRyan Roberts if (!pte_none(ptep_get(dst_pte)))
339c1a4de99SAndrea Arcangeli goto out_unlock;
34061c50040SAxel Rasmussen set_pte_at(dst_vma->vm_mm, dst_addr, dst_pte, _dst_pte);
341c1a4de99SAndrea Arcangeli /* No need to invalidate - it was non-present before */
342c1a4de99SAndrea Arcangeli update_mmu_cache(dst_vma, dst_addr, dst_pte);
343c1a4de99SAndrea Arcangeli ret = 0;
344c1a4de99SAndrea Arcangeli out_unlock:
345c1a4de99SAndrea Arcangeli pte_unmap_unlock(dst_pte, ptl);
3463622d3cdSHugh Dickins out:
347c1a4de99SAndrea Arcangeli return ret;
348c1a4de99SAndrea Arcangeli }
349c1a4de99SAndrea Arcangeli
35015313257SAxel Rasmussen /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
mfill_atomic_pte_continue(pmd_t * dst_pmd,struct vm_area_struct * dst_vma,unsigned long dst_addr,uffd_flags_t flags)35161c50040SAxel Rasmussen static int mfill_atomic_pte_continue(pmd_t *dst_pmd,
35215313257SAxel Rasmussen struct vm_area_struct *dst_vma,
35315313257SAxel Rasmussen unsigned long dst_addr,
354d9712937SAxel Rasmussen uffd_flags_t flags)
35515313257SAxel Rasmussen {
35615313257SAxel Rasmussen struct inode *inode = file_inode(dst_vma->vm_file);
35715313257SAxel Rasmussen pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
35812acf4fbSMatthew Wilcox (Oracle) struct folio *folio;
35915313257SAxel Rasmussen struct page *page;
36015313257SAxel Rasmussen int ret;
36115313257SAxel Rasmussen
36212acf4fbSMatthew Wilcox (Oracle) ret = shmem_get_folio(inode, pgoff, &folio, SGP_NOALLOC);
36312acf4fbSMatthew Wilcox (Oracle) /* Our caller expects us to return -EFAULT if we failed to find folio */
36473f37dbcSAxel Rasmussen if (ret == -ENOENT)
36573f37dbcSAxel Rasmussen ret = -EFAULT;
36615313257SAxel Rasmussen if (ret)
36715313257SAxel Rasmussen goto out;
36812acf4fbSMatthew Wilcox (Oracle) if (!folio) {
36915313257SAxel Rasmussen ret = -EFAULT;
37015313257SAxel Rasmussen goto out;
37115313257SAxel Rasmussen }
37215313257SAxel Rasmussen
37312acf4fbSMatthew Wilcox (Oracle) page = folio_file_page(folio, pgoff);
374a7605426SYang Shi if (PageHWPoison(page)) {
375a7605426SYang Shi ret = -EIO;
376a7605426SYang Shi goto out_release;
377a7605426SYang Shi }
378a7605426SYang Shi
37961c50040SAxel Rasmussen ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
380d9712937SAxel Rasmussen page, false, flags);
38115313257SAxel Rasmussen if (ret)
38215313257SAxel Rasmussen goto out_release;
38315313257SAxel Rasmussen
38412acf4fbSMatthew Wilcox (Oracle) folio_unlock(folio);
38515313257SAxel Rasmussen ret = 0;
38615313257SAxel Rasmussen out:
38715313257SAxel Rasmussen return ret;
38815313257SAxel Rasmussen out_release:
38912acf4fbSMatthew Wilcox (Oracle) folio_unlock(folio);
39012acf4fbSMatthew Wilcox (Oracle) folio_put(folio);
39115313257SAxel Rasmussen goto out;
39215313257SAxel Rasmussen }
39315313257SAxel Rasmussen
394fc71884aSAxel Rasmussen /* Handles UFFDIO_POISON for all non-hugetlb VMAs. */
mfill_atomic_pte_poison(pmd_t * dst_pmd,struct vm_area_struct * dst_vma,unsigned long dst_addr,uffd_flags_t flags)395fc71884aSAxel Rasmussen static int mfill_atomic_pte_poison(pmd_t *dst_pmd,
396fc71884aSAxel Rasmussen struct vm_area_struct *dst_vma,
397fc71884aSAxel Rasmussen unsigned long dst_addr,
398fc71884aSAxel Rasmussen uffd_flags_t flags)
399fc71884aSAxel Rasmussen {
400fc71884aSAxel Rasmussen int ret;
401fc71884aSAxel Rasmussen struct mm_struct *dst_mm = dst_vma->vm_mm;
402fc71884aSAxel Rasmussen pte_t _dst_pte, *dst_pte;
403fc71884aSAxel Rasmussen spinlock_t *ptl;
404fc71884aSAxel Rasmussen
405fc71884aSAxel Rasmussen _dst_pte = make_pte_marker(PTE_MARKER_POISONED);
406597425dfSHugh Dickins ret = -EAGAIN;
407fc71884aSAxel Rasmussen dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
408597425dfSHugh Dickins if (!dst_pte)
409597425dfSHugh Dickins goto out;
410fc71884aSAxel Rasmussen
411fc71884aSAxel Rasmussen if (mfill_file_over_size(dst_vma, dst_addr)) {
412fc71884aSAxel Rasmussen ret = -EFAULT;
413fc71884aSAxel Rasmussen goto out_unlock;
414fc71884aSAxel Rasmussen }
415fc71884aSAxel Rasmussen
416fc71884aSAxel Rasmussen ret = -EEXIST;
417fc71884aSAxel Rasmussen /* Refuse to overwrite any PTE, even a PTE marker (e.g. UFFD WP). */
418afccb080SRyan Roberts if (!pte_none(ptep_get(dst_pte)))
419fc71884aSAxel Rasmussen goto out_unlock;
420fc71884aSAxel Rasmussen
421fc71884aSAxel Rasmussen set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
422fc71884aSAxel Rasmussen
423fc71884aSAxel Rasmussen /* No need to invalidate - it was non-present before */
424fc71884aSAxel Rasmussen update_mmu_cache(dst_vma, dst_addr, dst_pte);
425fc71884aSAxel Rasmussen ret = 0;
426fc71884aSAxel Rasmussen out_unlock:
427fc71884aSAxel Rasmussen pte_unmap_unlock(dst_pte, ptl);
428597425dfSHugh Dickins out:
429fc71884aSAxel Rasmussen return ret;
430fc71884aSAxel Rasmussen }
431fc71884aSAxel Rasmussen
mm_alloc_pmd(struct mm_struct * mm,unsigned long address)432c1a4de99SAndrea Arcangeli static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
433c1a4de99SAndrea Arcangeli {
434c1a4de99SAndrea Arcangeli pgd_t *pgd;
435c2febafcSKirill A. Shutemov p4d_t *p4d;
436c1a4de99SAndrea Arcangeli pud_t *pud;
437c1a4de99SAndrea Arcangeli
438c1a4de99SAndrea Arcangeli pgd = pgd_offset(mm, address);
439c2febafcSKirill A. Shutemov p4d = p4d_alloc(mm, pgd, address);
440c2febafcSKirill A. Shutemov if (!p4d)
441c2febafcSKirill A. Shutemov return NULL;
442c2febafcSKirill A. Shutemov pud = pud_alloc(mm, p4d, address);
443c2febafcSKirill A. Shutemov if (!pud)
444c2febafcSKirill A. Shutemov return NULL;
445c1a4de99SAndrea Arcangeli /*
446c1a4de99SAndrea Arcangeli * Note that we didn't run this because the pmd was
447c1a4de99SAndrea Arcangeli * missing, the *pmd may be already established and in
448c1a4de99SAndrea Arcangeli * turn it may also be a trans_huge_pmd.
449c1a4de99SAndrea Arcangeli */
450c2febafcSKirill A. Shutemov return pmd_alloc(mm, pud, address);
451c1a4de99SAndrea Arcangeli }
452c1a4de99SAndrea Arcangeli
45360d4d2d2SMike Kravetz #ifdef CONFIG_HUGETLB_PAGE
45460d4d2d2SMike Kravetz /*
455a734991cSAxel Rasmussen * mfill_atomic processing for HUGETLB vmas. Note that this routine is
456867a43a3SLokesh Gidra * called with either vma-lock or mmap_lock held, it will release the lock
457867a43a3SLokesh Gidra * before returning.
45860d4d2d2SMike Kravetz */
mfill_atomic_hugetlb(struct userfaultfd_ctx * ctx,struct vm_area_struct * dst_vma,unsigned long dst_start,unsigned long src_start,unsigned long len,uffd_flags_t flags)45961c50040SAxel Rasmussen static __always_inline ssize_t mfill_atomic_hugetlb(
4605e4c24a5SLokesh Gidra struct userfaultfd_ctx *ctx,
46160d4d2d2SMike Kravetz struct vm_area_struct *dst_vma,
46260d4d2d2SMike Kravetz unsigned long dst_start,
46360d4d2d2SMike Kravetz unsigned long src_start,
46460d4d2d2SMike Kravetz unsigned long len,
465d9712937SAxel Rasmussen uffd_flags_t flags)
46660d4d2d2SMike Kravetz {
46761c50040SAxel Rasmussen struct mm_struct *dst_mm = dst_vma->vm_mm;
46860d4d2d2SMike Kravetz ssize_t err;
46960d4d2d2SMike Kravetz pte_t *dst_pte;
47060d4d2d2SMike Kravetz unsigned long src_addr, dst_addr;
47160d4d2d2SMike Kravetz long copied;
4720169fd51SZhangPeng struct folio *folio;
47360d4d2d2SMike Kravetz unsigned long vma_hpagesize;
47460d4d2d2SMike Kravetz pgoff_t idx;
47560d4d2d2SMike Kravetz u32 hash;
47660d4d2d2SMike Kravetz struct address_space *mapping;
47760d4d2d2SMike Kravetz
47860d4d2d2SMike Kravetz /*
47960d4d2d2SMike Kravetz * There is no default zero huge page for all huge page sizes as
48060d4d2d2SMike Kravetz * supported by hugetlb. A PMD_SIZE huge pages may exist as used
48160d4d2d2SMike Kravetz * by THP. Since we can not reliably insert a zero page, this
48260d4d2d2SMike Kravetz * feature is not supported.
48360d4d2d2SMike Kravetz */
4848a13897fSAxel Rasmussen if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) {
4855e4c24a5SLokesh Gidra up_read(&ctx->map_changing_lock);
486867a43a3SLokesh Gidra uffd_mfill_unlock(dst_vma);
48760d4d2d2SMike Kravetz return -EINVAL;
48860d4d2d2SMike Kravetz }
48960d4d2d2SMike Kravetz
49060d4d2d2SMike Kravetz src_addr = src_start;
49160d4d2d2SMike Kravetz dst_addr = dst_start;
49260d4d2d2SMike Kravetz copied = 0;
4930169fd51SZhangPeng folio = NULL;
49460d4d2d2SMike Kravetz vma_hpagesize = vma_kernel_pagesize(dst_vma);
49560d4d2d2SMike Kravetz
49660d4d2d2SMike Kravetz /*
49760d4d2d2SMike Kravetz * Validate alignment based on huge page size
49860d4d2d2SMike Kravetz */
49960d4d2d2SMike Kravetz err = -EINVAL;
50060d4d2d2SMike Kravetz if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
50160d4d2d2SMike Kravetz goto out_unlock;
50260d4d2d2SMike Kravetz
50360d4d2d2SMike Kravetz retry:
50460d4d2d2SMike Kravetz /*
505c1e8d7c6SMichel Lespinasse * On routine entry dst_vma is set. If we had to drop mmap_lock and
50660d4d2d2SMike Kravetz * retry, dst_vma will be set to NULL and we must lookup again.
50760d4d2d2SMike Kravetz */
50860d4d2d2SMike Kravetz if (!dst_vma) {
509867a43a3SLokesh Gidra dst_vma = uffd_mfill_lock(dst_mm, dst_start, len);
510867a43a3SLokesh Gidra if (IS_ERR(dst_vma)) {
511867a43a3SLokesh Gidra err = PTR_ERR(dst_vma);
512867a43a3SLokesh Gidra goto out;
513867a43a3SLokesh Gidra }
514867a43a3SLokesh Gidra
51527d02568SMike Rapoport err = -ENOENT;
516867a43a3SLokesh Gidra if (!is_vm_hugetlb_page(dst_vma))
517867a43a3SLokesh Gidra goto out_unlock_vma;
5181c9e8defSMike Kravetz
51927d02568SMike Rapoport err = -EINVAL;
52027d02568SMike Rapoport if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
521867a43a3SLokesh Gidra goto out_unlock_vma;
52260d4d2d2SMike Kravetz
52360d4d2d2SMike Kravetz /*
524867a43a3SLokesh Gidra * If memory mappings are changing because of non-cooperative
525867a43a3SLokesh Gidra * operation (e.g. mremap) running in parallel, bail out and
526867a43a3SLokesh Gidra * request the user to retry later
52760d4d2d2SMike Kravetz */
528867a43a3SLokesh Gidra down_read(&ctx->map_changing_lock);
529867a43a3SLokesh Gidra err = -EAGAIN;
530867a43a3SLokesh Gidra if (atomic_read(&ctx->mmap_changing))
53160d4d2d2SMike Kravetz goto out_unlock;
5321c9e8defSMike Kravetz }
53360d4d2d2SMike Kravetz
53460d4d2d2SMike Kravetz while (src_addr < src_start + len) {
53560d4d2d2SMike Kravetz BUG_ON(dst_addr >= dst_start + len);
53660d4d2d2SMike Kravetz
53760d4d2d2SMike Kravetz /*
53840549ba8SMike Kravetz * Serialize via vma_lock and hugetlb_fault_mutex.
53940549ba8SMike Kravetz * vma_lock ensures the dst_pte remains valid even
54040549ba8SMike Kravetz * in the case of shared pmds. fault mutex prevents
54140549ba8SMike Kravetz * races with other faulting threads.
54260d4d2d2SMike Kravetz */
543c0d0381aSMike Kravetz idx = linear_page_index(dst_vma, dst_addr);
5443a47c54fSMike Kravetz mapping = dst_vma->vm_file->f_mapping;
545188b04a7SWei Yang hash = hugetlb_fault_mutex_hash(mapping, idx);
54660d4d2d2SMike Kravetz mutex_lock(&hugetlb_fault_mutex_table[hash]);
54740549ba8SMike Kravetz hugetlb_vma_lock_read(dst_vma);
54860d4d2d2SMike Kravetz
54960d4d2d2SMike Kravetz err = -ENOMEM;
550aec44e0fSPeter Xu dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize);
55160d4d2d2SMike Kravetz if (!dst_pte) {
55240549ba8SMike Kravetz hugetlb_vma_unlock_read(dst_vma);
55360d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]);
55460d4d2d2SMike Kravetz goto out_unlock;
55560d4d2d2SMike Kravetz }
55660d4d2d2SMike Kravetz
557d9712937SAxel Rasmussen if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) &&
5586041c691SPeter Xu !huge_pte_none_mostly(huge_ptep_get(dst_pte))) {
55960d4d2d2SMike Kravetz err = -EEXIST;
56040549ba8SMike Kravetz hugetlb_vma_unlock_read(dst_vma);
56160d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]);
56260d4d2d2SMike Kravetz goto out_unlock;
56360d4d2d2SMike Kravetz }
56460d4d2d2SMike Kravetz
565d9712937SAxel Rasmussen err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr,
5660169fd51SZhangPeng src_addr, flags, &folio);
56760d4d2d2SMike Kravetz
56840549ba8SMike Kravetz hugetlb_vma_unlock_read(dst_vma);
56960d4d2d2SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]);
57060d4d2d2SMike Kravetz
57160d4d2d2SMike Kravetz cond_resched();
57260d4d2d2SMike Kravetz
5739e368259SAndrea Arcangeli if (unlikely(err == -ENOENT)) {
5745e4c24a5SLokesh Gidra up_read(&ctx->map_changing_lock);
575867a43a3SLokesh Gidra uffd_mfill_unlock(dst_vma);
5760169fd51SZhangPeng BUG_ON(!folio);
57760d4d2d2SMike Kravetz
5780169fd51SZhangPeng err = copy_folio_from_user(folio,
579e87340caSZhangPeng (const void __user *)src_addr, true);
58060d4d2d2SMike Kravetz if (unlikely(err)) {
58160d4d2d2SMike Kravetz err = -EFAULT;
58260d4d2d2SMike Kravetz goto out;
58360d4d2d2SMike Kravetz }
58460d4d2d2SMike Kravetz
58560d4d2d2SMike Kravetz dst_vma = NULL;
58660d4d2d2SMike Kravetz goto retry;
58760d4d2d2SMike Kravetz } else
5880169fd51SZhangPeng BUG_ON(folio);
58960d4d2d2SMike Kravetz
59060d4d2d2SMike Kravetz if (!err) {
59160d4d2d2SMike Kravetz dst_addr += vma_hpagesize;
59260d4d2d2SMike Kravetz src_addr += vma_hpagesize;
59360d4d2d2SMike Kravetz copied += vma_hpagesize;
59460d4d2d2SMike Kravetz
59560d4d2d2SMike Kravetz if (fatal_signal_pending(current))
59660d4d2d2SMike Kravetz err = -EINTR;
59760d4d2d2SMike Kravetz }
59860d4d2d2SMike Kravetz if (err)
59960d4d2d2SMike Kravetz break;
60060d4d2d2SMike Kravetz }
60160d4d2d2SMike Kravetz
60260d4d2d2SMike Kravetz out_unlock:
6035e4c24a5SLokesh Gidra up_read(&ctx->map_changing_lock);
604867a43a3SLokesh Gidra out_unlock_vma:
605867a43a3SLokesh Gidra uffd_mfill_unlock(dst_vma);
60660d4d2d2SMike Kravetz out:
6070169fd51SZhangPeng if (folio)
6080169fd51SZhangPeng folio_put(folio);
60960d4d2d2SMike Kravetz BUG_ON(copied < 0);
61060d4d2d2SMike Kravetz BUG_ON(err > 0);
61160d4d2d2SMike Kravetz BUG_ON(!copied && !err);
61260d4d2d2SMike Kravetz return copied ? copied : err;
61360d4d2d2SMike Kravetz }
61460d4d2d2SMike Kravetz #else /* !CONFIG_HUGETLB_PAGE */
61560d4d2d2SMike Kravetz /* fail at build time if gcc attempts to use this */
6165e4c24a5SLokesh Gidra extern ssize_t mfill_atomic_hugetlb(struct userfaultfd_ctx *ctx,
6175e4c24a5SLokesh Gidra struct vm_area_struct *dst_vma,
61860d4d2d2SMike Kravetz unsigned long dst_start,
61960d4d2d2SMike Kravetz unsigned long src_start,
62060d4d2d2SMike Kravetz unsigned long len,
621d9712937SAxel Rasmussen uffd_flags_t flags);
62260d4d2d2SMike Kravetz #endif /* CONFIG_HUGETLB_PAGE */
62360d4d2d2SMike Kravetz
mfill_atomic_pte(pmd_t * dst_pmd,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,uffd_flags_t flags,struct folio ** foliop)62461c50040SAxel Rasmussen static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd,
6253217d3c7SMike Rapoport struct vm_area_struct *dst_vma,
6263217d3c7SMike Rapoport unsigned long dst_addr,
6273217d3c7SMike Rapoport unsigned long src_addr,
628d9712937SAxel Rasmussen uffd_flags_t flags,
629d7be6d7eSZhangPeng struct folio **foliop)
6303217d3c7SMike Rapoport {
6313217d3c7SMike Rapoport ssize_t err;
6323217d3c7SMike Rapoport
633d9712937SAxel Rasmussen if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
63461c50040SAxel Rasmussen return mfill_atomic_pte_continue(dst_pmd, dst_vma,
635d9712937SAxel Rasmussen dst_addr, flags);
636fc71884aSAxel Rasmussen } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
637fc71884aSAxel Rasmussen return mfill_atomic_pte_poison(dst_pmd, dst_vma,
638fc71884aSAxel Rasmussen dst_addr, flags);
63915313257SAxel Rasmussen }
64015313257SAxel Rasmussen
6415b51072eSAndrea Arcangeli /*
6425b51072eSAndrea Arcangeli * The normal page fault path for a shmem will invoke the
6435b51072eSAndrea Arcangeli * fault, fill the hole in the file and COW it right away. The
6445b51072eSAndrea Arcangeli * result generates plain anonymous memory. So when we are
6455b51072eSAndrea Arcangeli * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
6465b51072eSAndrea Arcangeli * generate anonymous memory directly without actually filling
6475b51072eSAndrea Arcangeli * the hole. For the MAP_PRIVATE case the robustness check
6485b51072eSAndrea Arcangeli * only happens in the pagetable (to verify it's still none)
6495b51072eSAndrea Arcangeli * and not in the radix tree.
6505b51072eSAndrea Arcangeli */
6515b51072eSAndrea Arcangeli if (!(dst_vma->vm_flags & VM_SHARED)) {
652d9712937SAxel Rasmussen if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY))
65361c50040SAxel Rasmussen err = mfill_atomic_pte_copy(dst_pmd, dst_vma,
654d9712937SAxel Rasmussen dst_addr, src_addr,
655d7be6d7eSZhangPeng flags, foliop);
6563217d3c7SMike Rapoport else
65761c50040SAxel Rasmussen err = mfill_atomic_pte_zeropage(dst_pmd,
6583217d3c7SMike Rapoport dst_vma, dst_addr);
6593217d3c7SMike Rapoport } else {
66061c50040SAxel Rasmussen err = shmem_mfill_atomic_pte(dst_pmd, dst_vma,
66115313257SAxel Rasmussen dst_addr, src_addr,
662d7be6d7eSZhangPeng flags, foliop);
6633217d3c7SMike Rapoport }
6643217d3c7SMike Rapoport
6653217d3c7SMike Rapoport return err;
6663217d3c7SMike Rapoport }
6673217d3c7SMike Rapoport
mfill_atomic(struct userfaultfd_ctx * ctx,unsigned long dst_start,unsigned long src_start,unsigned long len,uffd_flags_t flags)6685e4c24a5SLokesh Gidra static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
669c1a4de99SAndrea Arcangeli unsigned long dst_start,
670c1a4de99SAndrea Arcangeli unsigned long src_start,
671c1a4de99SAndrea Arcangeli unsigned long len,
672d9712937SAxel Rasmussen uffd_flags_t flags)
673c1a4de99SAndrea Arcangeli {
6745e4c24a5SLokesh Gidra struct mm_struct *dst_mm = ctx->mm;
675c1a4de99SAndrea Arcangeli struct vm_area_struct *dst_vma;
676c1a4de99SAndrea Arcangeli ssize_t err;
677c1a4de99SAndrea Arcangeli pmd_t *dst_pmd;
678c1a4de99SAndrea Arcangeli unsigned long src_addr, dst_addr;
679b6ebaedbSAndrea Arcangeli long copied;
680d7be6d7eSZhangPeng struct folio *folio;
681c1a4de99SAndrea Arcangeli
682c1a4de99SAndrea Arcangeli /*
683c1a4de99SAndrea Arcangeli * Sanitize the command parameters:
684c1a4de99SAndrea Arcangeli */
685c1a4de99SAndrea Arcangeli BUG_ON(dst_start & ~PAGE_MASK);
686c1a4de99SAndrea Arcangeli BUG_ON(len & ~PAGE_MASK);
687c1a4de99SAndrea Arcangeli
688c1a4de99SAndrea Arcangeli /* Does the address range wrap, or is the span zero-sized? */
689c1a4de99SAndrea Arcangeli BUG_ON(src_start + len <= src_start);
690c1a4de99SAndrea Arcangeli BUG_ON(dst_start + len <= dst_start);
691c1a4de99SAndrea Arcangeli
692b6ebaedbSAndrea Arcangeli src_addr = src_start;
693b6ebaedbSAndrea Arcangeli dst_addr = dst_start;
694b6ebaedbSAndrea Arcangeli copied = 0;
695d7be6d7eSZhangPeng folio = NULL;
696b6ebaedbSAndrea Arcangeli retry:
697867a43a3SLokesh Gidra /*
698867a43a3SLokesh Gidra * Make sure the vma is not shared, that the dst range is
699867a43a3SLokesh Gidra * both valid and fully within a single existing vma.
700867a43a3SLokesh Gidra */
701867a43a3SLokesh Gidra dst_vma = uffd_mfill_lock(dst_mm, dst_start, len);
702867a43a3SLokesh Gidra if (IS_ERR(dst_vma)) {
703867a43a3SLokesh Gidra err = PTR_ERR(dst_vma);
704867a43a3SLokesh Gidra goto out;
705867a43a3SLokesh Gidra }
706c1a4de99SAndrea Arcangeli
707c1a4de99SAndrea Arcangeli /*
708df2cc96eSMike Rapoport * If memory mappings are changing because of non-cooperative
709df2cc96eSMike Rapoport * operation (e.g. mremap) running in parallel, bail out and
710df2cc96eSMike Rapoport * request the user to retry later
711df2cc96eSMike Rapoport */
7125e4c24a5SLokesh Gidra down_read(&ctx->map_changing_lock);
713df2cc96eSMike Rapoport err = -EAGAIN;
7145e4c24a5SLokesh Gidra if (atomic_read(&ctx->mmap_changing))
71526071cedSMike Rapoport goto out_unlock;
71627d02568SMike Rapoport
71727d02568SMike Rapoport err = -EINVAL;
71827d02568SMike Rapoport /*
71927d02568SMike Rapoport * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
72027d02568SMike Rapoport * it will overwrite vm_ops, so vma_is_anonymous must return false.
72127d02568SMike Rapoport */
72227d02568SMike Rapoport if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
72327d02568SMike Rapoport dst_vma->vm_flags & VM_SHARED))
72427d02568SMike Rapoport goto out_unlock;
72527d02568SMike Rapoport
72627d02568SMike Rapoport /*
72772981e0eSAndrea Arcangeli * validate 'mode' now that we know the dst_vma: don't allow
72872981e0eSAndrea Arcangeli * a wrprotect copy if the userfaultfd didn't register as WP.
72972981e0eSAndrea Arcangeli */
730d9712937SAxel Rasmussen if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP))
73172981e0eSAndrea Arcangeli goto out_unlock;
73272981e0eSAndrea Arcangeli
73372981e0eSAndrea Arcangeli /*
73427d02568SMike Rapoport * If this is a HUGETLB vma, pass off to appropriate routine
73527d02568SMike Rapoport */
73627d02568SMike Rapoport if (is_vm_hugetlb_page(dst_vma))
7375e4c24a5SLokesh Gidra return mfill_atomic_hugetlb(ctx, dst_vma, dst_start,
7385e4c24a5SLokesh Gidra src_start, len, flags);
73927d02568SMike Rapoport
74026071cedSMike Rapoport if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
741b6ebaedbSAndrea Arcangeli goto out_unlock;
742d9712937SAxel Rasmussen if (!vma_is_shmem(dst_vma) &&
743d9712937SAxel Rasmussen uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE))
744f6191471SAxel Rasmussen goto out_unlock;
745c1a4de99SAndrea Arcangeli
746b6ebaedbSAndrea Arcangeli while (src_addr < src_start + len) {
747c1a4de99SAndrea Arcangeli pmd_t dst_pmdval;
748b6ebaedbSAndrea Arcangeli
749c1a4de99SAndrea Arcangeli BUG_ON(dst_addr >= dst_start + len);
750b6ebaedbSAndrea Arcangeli
751c1a4de99SAndrea Arcangeli dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
752c1a4de99SAndrea Arcangeli if (unlikely(!dst_pmd)) {
753c1a4de99SAndrea Arcangeli err = -ENOMEM;
754c1a4de99SAndrea Arcangeli break;
755c1a4de99SAndrea Arcangeli }
756c1a4de99SAndrea Arcangeli
757dab6e717SPeter Zijlstra dst_pmdval = pmdp_get_lockless(dst_pmd);
758c1a4de99SAndrea Arcangeli /*
759c1a4de99SAndrea Arcangeli * If the dst_pmd is mapped as THP don't
760c1a4de99SAndrea Arcangeli * override it and just be strict.
761c1a4de99SAndrea Arcangeli */
762c1a4de99SAndrea Arcangeli if (unlikely(pmd_trans_huge(dst_pmdval))) {
763c1a4de99SAndrea Arcangeli err = -EEXIST;
764c1a4de99SAndrea Arcangeli break;
765c1a4de99SAndrea Arcangeli }
766c1a4de99SAndrea Arcangeli if (unlikely(pmd_none(dst_pmdval)) &&
7674cf58924SJoel Fernandes (Google) unlikely(__pte_alloc(dst_mm, dst_pmd))) {
768c1a4de99SAndrea Arcangeli err = -ENOMEM;
769c1a4de99SAndrea Arcangeli break;
770c1a4de99SAndrea Arcangeli }
771c1a4de99SAndrea Arcangeli /* If an huge pmd materialized from under us fail */
772c1a4de99SAndrea Arcangeli if (unlikely(pmd_trans_huge(*dst_pmd))) {
773c1a4de99SAndrea Arcangeli err = -EFAULT;
774c1a4de99SAndrea Arcangeli break;
775c1a4de99SAndrea Arcangeli }
776c1a4de99SAndrea Arcangeli
777c1a4de99SAndrea Arcangeli BUG_ON(pmd_none(*dst_pmd));
778c1a4de99SAndrea Arcangeli BUG_ON(pmd_trans_huge(*dst_pmd));
779c1a4de99SAndrea Arcangeli
78061c50040SAxel Rasmussen err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr,
781d7be6d7eSZhangPeng src_addr, flags, &folio);
782c1a4de99SAndrea Arcangeli cond_resched();
783c1a4de99SAndrea Arcangeli
7849e368259SAndrea Arcangeli if (unlikely(err == -ENOENT)) {
785d7be6d7eSZhangPeng void *kaddr;
786b6ebaedbSAndrea Arcangeli
7875e4c24a5SLokesh Gidra up_read(&ctx->map_changing_lock);
788867a43a3SLokesh Gidra uffd_mfill_unlock(dst_vma);
789d7be6d7eSZhangPeng BUG_ON(!folio);
790b6ebaedbSAndrea Arcangeli
791d7be6d7eSZhangPeng kaddr = kmap_local_folio(folio, 0);
792d7be6d7eSZhangPeng err = copy_from_user(kaddr,
793b6ebaedbSAndrea Arcangeli (const void __user *) src_addr,
794b6ebaedbSAndrea Arcangeli PAGE_SIZE);
795d7be6d7eSZhangPeng kunmap_local(kaddr);
796b6ebaedbSAndrea Arcangeli if (unlikely(err)) {
797b6ebaedbSAndrea Arcangeli err = -EFAULT;
798b6ebaedbSAndrea Arcangeli goto out;
799b6ebaedbSAndrea Arcangeli }
800d7be6d7eSZhangPeng flush_dcache_folio(folio);
801b6ebaedbSAndrea Arcangeli goto retry;
802b6ebaedbSAndrea Arcangeli } else
803d7be6d7eSZhangPeng BUG_ON(folio);
804b6ebaedbSAndrea Arcangeli
805c1a4de99SAndrea Arcangeli if (!err) {
806c1a4de99SAndrea Arcangeli dst_addr += PAGE_SIZE;
807c1a4de99SAndrea Arcangeli src_addr += PAGE_SIZE;
808c1a4de99SAndrea Arcangeli copied += PAGE_SIZE;
809c1a4de99SAndrea Arcangeli
810c1a4de99SAndrea Arcangeli if (fatal_signal_pending(current))
811c1a4de99SAndrea Arcangeli err = -EINTR;
812c1a4de99SAndrea Arcangeli }
813c1a4de99SAndrea Arcangeli if (err)
814c1a4de99SAndrea Arcangeli break;
815c1a4de99SAndrea Arcangeli }
816c1a4de99SAndrea Arcangeli
817b6ebaedbSAndrea Arcangeli out_unlock:
8185e4c24a5SLokesh Gidra up_read(&ctx->map_changing_lock);
819867a43a3SLokesh Gidra uffd_mfill_unlock(dst_vma);
820b6ebaedbSAndrea Arcangeli out:
821d7be6d7eSZhangPeng if (folio)
822d7be6d7eSZhangPeng folio_put(folio);
823c1a4de99SAndrea Arcangeli BUG_ON(copied < 0);
824c1a4de99SAndrea Arcangeli BUG_ON(err > 0);
825c1a4de99SAndrea Arcangeli BUG_ON(!copied && !err);
826c1a4de99SAndrea Arcangeli return copied ? copied : err;
827c1a4de99SAndrea Arcangeli }
828c1a4de99SAndrea Arcangeli
mfill_atomic_copy(struct userfaultfd_ctx * ctx,unsigned long dst_start,unsigned long src_start,unsigned long len,uffd_flags_t flags)8295e4c24a5SLokesh Gidra ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start,
830df2cc96eSMike Rapoport unsigned long src_start, unsigned long len,
8315e4c24a5SLokesh Gidra uffd_flags_t flags)
832c1a4de99SAndrea Arcangeli {
8335e4c24a5SLokesh Gidra return mfill_atomic(ctx, dst_start, src_start, len,
834d9712937SAxel Rasmussen uffd_flags_set_mode(flags, MFILL_ATOMIC_COPY));
835c1a4de99SAndrea Arcangeli }
836c1a4de99SAndrea Arcangeli
mfill_atomic_zeropage(struct userfaultfd_ctx * ctx,unsigned long start,unsigned long len)8375e4c24a5SLokesh Gidra ssize_t mfill_atomic_zeropage(struct userfaultfd_ctx *ctx,
8385e4c24a5SLokesh Gidra unsigned long start,
8395e4c24a5SLokesh Gidra unsigned long len)
840c1a4de99SAndrea Arcangeli {
8415e4c24a5SLokesh Gidra return mfill_atomic(ctx, start, 0, len,
842d9712937SAxel Rasmussen uffd_flags_set_mode(0, MFILL_ATOMIC_ZEROPAGE));
843f6191471SAxel Rasmussen }
844f6191471SAxel Rasmussen
mfill_atomic_continue(struct userfaultfd_ctx * ctx,unsigned long start,unsigned long len,uffd_flags_t flags)8455e4c24a5SLokesh Gidra ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx, unsigned long start,
8465e4c24a5SLokesh Gidra unsigned long len, uffd_flags_t flags)
847f6191471SAxel Rasmussen {
848b14d1671SJames Houghton
849b14d1671SJames Houghton /*
850b14d1671SJames Houghton * A caller might reasonably assume that UFFDIO_CONTINUE contains an
851b14d1671SJames Houghton * smp_wmb() to ensure that any writes to the about-to-be-mapped page by
852b14d1671SJames Houghton * the thread doing the UFFDIO_CONTINUE are guaranteed to be visible to
853b14d1671SJames Houghton * subsequent loads from the page through the newly mapped address range.
854b14d1671SJames Houghton */
855b14d1671SJames Houghton smp_wmb();
856b14d1671SJames Houghton
8575e4c24a5SLokesh Gidra return mfill_atomic(ctx, start, 0, len,
85802891844SAxel Rasmussen uffd_flags_set_mode(flags, MFILL_ATOMIC_CONTINUE));
859c1a4de99SAndrea Arcangeli }
860ffd05793SShaohua Li
mfill_atomic_poison(struct userfaultfd_ctx * ctx,unsigned long start,unsigned long len,uffd_flags_t flags)8615e4c24a5SLokesh Gidra ssize_t mfill_atomic_poison(struct userfaultfd_ctx *ctx, unsigned long start,
8625e4c24a5SLokesh Gidra unsigned long len, uffd_flags_t flags)
863fc71884aSAxel Rasmussen {
8645e4c24a5SLokesh Gidra return mfill_atomic(ctx, start, 0, len,
865fc71884aSAxel Rasmussen uffd_flags_set_mode(flags, MFILL_ATOMIC_POISON));
866fc71884aSAxel Rasmussen }
867fc71884aSAxel Rasmussen
uffd_wp_range(struct vm_area_struct * dst_vma,unsigned long start,unsigned long len,bool enable_wp)86861c50040SAxel Rasmussen long uffd_wp_range(struct vm_area_struct *dst_vma,
869f369b07cSPeter Xu unsigned long start, unsigned long len, bool enable_wp)
870f369b07cSPeter Xu {
871931298e1SDavid Hildenbrand unsigned int mm_cp_flags;
872f369b07cSPeter Xu struct mmu_gather tlb;
873d1751118SPeter Xu long ret;
874f369b07cSPeter Xu
875a1b92a3fSMuhammad Usama Anjum VM_WARN_ONCE(start < dst_vma->vm_start || start + len > dst_vma->vm_end,
876a1b92a3fSMuhammad Usama Anjum "The address range exceeds VMA boundary.\n");
877f369b07cSPeter Xu if (enable_wp)
878931298e1SDavid Hildenbrand mm_cp_flags = MM_CP_UFFD_WP;
879f369b07cSPeter Xu else
880931298e1SDavid Hildenbrand mm_cp_flags = MM_CP_UFFD_WP_RESOLVE;
881f369b07cSPeter Xu
882931298e1SDavid Hildenbrand /*
883931298e1SDavid Hildenbrand * vma->vm_page_prot already reflects that uffd-wp is enabled for this
884931298e1SDavid Hildenbrand * VMA (see userfaultfd_set_vm_flags()) and that all PTEs are supposed
885931298e1SDavid Hildenbrand * to be write-protected as default whenever protection changes.
886931298e1SDavid Hildenbrand * Try upgrading write permissions manually.
887931298e1SDavid Hildenbrand */
888931298e1SDavid Hildenbrand if (!enable_wp && vma_wants_manual_pte_write_upgrade(dst_vma))
889931298e1SDavid Hildenbrand mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
89061c50040SAxel Rasmussen tlb_gather_mmu(&tlb, dst_vma->vm_mm);
891d1751118SPeter Xu ret = change_protection(&tlb, dst_vma, start, start + len, mm_cp_flags);
892f369b07cSPeter Xu tlb_finish_mmu(&tlb);
893d1751118SPeter Xu
894d1751118SPeter Xu return ret;
895f369b07cSPeter Xu }
896f369b07cSPeter Xu
mwriteprotect_range(struct userfaultfd_ctx * ctx,unsigned long start,unsigned long len,bool enable_wp)8975e4c24a5SLokesh Gidra int mwriteprotect_range(struct userfaultfd_ctx *ctx, unsigned long start,
8985e4c24a5SLokesh Gidra unsigned long len, bool enable_wp)
899ffd05793SShaohua Li {
9005e4c24a5SLokesh Gidra struct mm_struct *dst_mm = ctx->mm;
901a1b92a3fSMuhammad Usama Anjum unsigned long end = start + len;
902a1b92a3fSMuhammad Usama Anjum unsigned long _start, _end;
903ffd05793SShaohua Li struct vm_area_struct *dst_vma;
9045a90d5a1SPeter Xu unsigned long page_mask;
905d1751118SPeter Xu long err;
906a1b92a3fSMuhammad Usama Anjum VMA_ITERATOR(vmi, dst_mm, start);
907ffd05793SShaohua Li
908ffd05793SShaohua Li /*
909ffd05793SShaohua Li * Sanitize the command parameters:
910ffd05793SShaohua Li */
911ffd05793SShaohua Li BUG_ON(start & ~PAGE_MASK);
912ffd05793SShaohua Li BUG_ON(len & ~PAGE_MASK);
913ffd05793SShaohua Li
914ffd05793SShaohua Li /* Does the address range wrap, or is the span zero-sized? */
915ffd05793SShaohua Li BUG_ON(start + len <= start);
916ffd05793SShaohua Li
917d8ed45c5SMichel Lespinasse mmap_read_lock(dst_mm);
918ffd05793SShaohua Li
919ffd05793SShaohua Li /*
920ffd05793SShaohua Li * If memory mappings are changing because of non-cooperative
921ffd05793SShaohua Li * operation (e.g. mremap) running in parallel, bail out and
922ffd05793SShaohua Li * request the user to retry later
923ffd05793SShaohua Li */
9245e4c24a5SLokesh Gidra down_read(&ctx->map_changing_lock);
925ffd05793SShaohua Li err = -EAGAIN;
9265e4c24a5SLokesh Gidra if (atomic_read(&ctx->mmap_changing))
927ffd05793SShaohua Li goto out_unlock;
928ffd05793SShaohua Li
929ffd05793SShaohua Li err = -ENOENT;
930a1b92a3fSMuhammad Usama Anjum for_each_vma_range(vmi, dst_vma, end) {
931b1f9e876SPeter Xu
932a1b92a3fSMuhammad Usama Anjum if (!userfaultfd_wp(dst_vma)) {
933a1b92a3fSMuhammad Usama Anjum err = -ENOENT;
934a1b92a3fSMuhammad Usama Anjum break;
935a1b92a3fSMuhammad Usama Anjum }
936ffd05793SShaohua Li
9375a90d5a1SPeter Xu if (is_vm_hugetlb_page(dst_vma)) {
9385a90d5a1SPeter Xu err = -EINVAL;
9395a90d5a1SPeter Xu page_mask = vma_kernel_pagesize(dst_vma) - 1;
9405a90d5a1SPeter Xu if ((start & page_mask) || (len & page_mask))
941a1b92a3fSMuhammad Usama Anjum break;
9425a90d5a1SPeter Xu }
9435a90d5a1SPeter Xu
944a1b92a3fSMuhammad Usama Anjum _start = max(dst_vma->vm_start, start);
945a1b92a3fSMuhammad Usama Anjum _end = min(dst_vma->vm_end, end);
946a1b92a3fSMuhammad Usama Anjum
94761c50040SAxel Rasmussen err = uffd_wp_range(dst_vma, _start, _end - _start, enable_wp);
948ffd05793SShaohua Li
949d1751118SPeter Xu /* Return 0 on success, <0 on failures */
950a1b92a3fSMuhammad Usama Anjum if (err < 0)
951a1b92a3fSMuhammad Usama Anjum break;
952ffd05793SShaohua Li err = 0;
953a1b92a3fSMuhammad Usama Anjum }
954ffd05793SShaohua Li out_unlock:
9555e4c24a5SLokesh Gidra up_read(&ctx->map_changing_lock);
956d8ed45c5SMichel Lespinasse mmap_read_unlock(dst_mm);
957ffd05793SShaohua Li return err;
958ffd05793SShaohua Li }
959adef4406SAndrea Arcangeli
960adef4406SAndrea Arcangeli
double_pt_lock(spinlock_t * ptl1,spinlock_t * ptl2)961adef4406SAndrea Arcangeli void double_pt_lock(spinlock_t *ptl1,
962adef4406SAndrea Arcangeli spinlock_t *ptl2)
963adef4406SAndrea Arcangeli __acquires(ptl1)
964adef4406SAndrea Arcangeli __acquires(ptl2)
965adef4406SAndrea Arcangeli {
966adef4406SAndrea Arcangeli spinlock_t *ptl_tmp;
967adef4406SAndrea Arcangeli
968adef4406SAndrea Arcangeli if (ptl1 > ptl2) {
969adef4406SAndrea Arcangeli /* exchange ptl1 and ptl2 */
970adef4406SAndrea Arcangeli ptl_tmp = ptl1;
971adef4406SAndrea Arcangeli ptl1 = ptl2;
972adef4406SAndrea Arcangeli ptl2 = ptl_tmp;
973adef4406SAndrea Arcangeli }
974adef4406SAndrea Arcangeli /* lock in virtual address order to avoid lock inversion */
975adef4406SAndrea Arcangeli spin_lock(ptl1);
976adef4406SAndrea Arcangeli if (ptl1 != ptl2)
977adef4406SAndrea Arcangeli spin_lock_nested(ptl2, SINGLE_DEPTH_NESTING);
978adef4406SAndrea Arcangeli else
979adef4406SAndrea Arcangeli __acquire(ptl2);
980adef4406SAndrea Arcangeli }
981adef4406SAndrea Arcangeli
double_pt_unlock(spinlock_t * ptl1,spinlock_t * ptl2)982adef4406SAndrea Arcangeli void double_pt_unlock(spinlock_t *ptl1,
983adef4406SAndrea Arcangeli spinlock_t *ptl2)
984adef4406SAndrea Arcangeli __releases(ptl1)
985adef4406SAndrea Arcangeli __releases(ptl2)
986adef4406SAndrea Arcangeli {
987adef4406SAndrea Arcangeli spin_unlock(ptl1);
988adef4406SAndrea Arcangeli if (ptl1 != ptl2)
989adef4406SAndrea Arcangeli spin_unlock(ptl2);
990adef4406SAndrea Arcangeli else
991adef4406SAndrea Arcangeli __release(ptl2);
992adef4406SAndrea Arcangeli }
993adef4406SAndrea Arcangeli
994adef4406SAndrea Arcangeli
move_present_pte(struct mm_struct * mm,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,unsigned long dst_addr,unsigned long src_addr,pte_t * dst_pte,pte_t * src_pte,pte_t orig_dst_pte,pte_t orig_src_pte,spinlock_t * dst_ptl,spinlock_t * src_ptl,struct folio * src_folio)995adef4406SAndrea Arcangeli static int move_present_pte(struct mm_struct *mm,
996adef4406SAndrea Arcangeli struct vm_area_struct *dst_vma,
997adef4406SAndrea Arcangeli struct vm_area_struct *src_vma,
998adef4406SAndrea Arcangeli unsigned long dst_addr, unsigned long src_addr,
999adef4406SAndrea Arcangeli pte_t *dst_pte, pte_t *src_pte,
1000adef4406SAndrea Arcangeli pte_t orig_dst_pte, pte_t orig_src_pte,
1001adef4406SAndrea Arcangeli spinlock_t *dst_ptl, spinlock_t *src_ptl,
1002adef4406SAndrea Arcangeli struct folio *src_folio)
1003adef4406SAndrea Arcangeli {
1004adef4406SAndrea Arcangeli int err = 0;
1005adef4406SAndrea Arcangeli
1006adef4406SAndrea Arcangeli double_pt_lock(dst_ptl, src_ptl);
1007adef4406SAndrea Arcangeli
100856ae10cfSRyan Roberts if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
100956ae10cfSRyan Roberts !pte_same(ptep_get(dst_pte), orig_dst_pte)) {
1010adef4406SAndrea Arcangeli err = -EAGAIN;
1011adef4406SAndrea Arcangeli goto out;
1012adef4406SAndrea Arcangeli }
1013adef4406SAndrea Arcangeli if (folio_test_large(src_folio) ||
1014adef4406SAndrea Arcangeli folio_maybe_dma_pinned(src_folio) ||
1015adef4406SAndrea Arcangeli !PageAnonExclusive(&src_folio->page)) {
1016adef4406SAndrea Arcangeli err = -EBUSY;
1017adef4406SAndrea Arcangeli goto out;
1018adef4406SAndrea Arcangeli }
1019adef4406SAndrea Arcangeli
1020adef4406SAndrea Arcangeli orig_src_pte = ptep_clear_flush(src_vma, src_addr, src_pte);
1021adef4406SAndrea Arcangeli /* Folio got pinned from under us. Put it back and fail the move. */
1022adef4406SAndrea Arcangeli if (folio_maybe_dma_pinned(src_folio)) {
1023adef4406SAndrea Arcangeli set_pte_at(mm, src_addr, src_pte, orig_src_pte);
1024adef4406SAndrea Arcangeli err = -EBUSY;
1025adef4406SAndrea Arcangeli goto out;
1026adef4406SAndrea Arcangeli }
1027adef4406SAndrea Arcangeli
1028d7a08838SQi Zheng folio_move_anon_rmap(src_folio, dst_vma);
1029d7a08838SQi Zheng WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr));
1030d7a08838SQi Zheng
1031adef4406SAndrea Arcangeli orig_dst_pte = mk_pte(&src_folio->page, dst_vma->vm_page_prot);
1032adef4406SAndrea Arcangeli /* Follow mremap() behavior and treat the entry dirty after the move */
1033adef4406SAndrea Arcangeli orig_dst_pte = pte_mkwrite(pte_mkdirty(orig_dst_pte), dst_vma);
1034adef4406SAndrea Arcangeli
1035adef4406SAndrea Arcangeli set_pte_at(mm, dst_addr, dst_pte, orig_dst_pte);
1036adef4406SAndrea Arcangeli out:
1037adef4406SAndrea Arcangeli double_pt_unlock(dst_ptl, src_ptl);
1038adef4406SAndrea Arcangeli return err;
1039adef4406SAndrea Arcangeli }
1040adef4406SAndrea Arcangeli
move_swap_pte(struct mm_struct * mm,unsigned long dst_addr,unsigned long src_addr,pte_t * dst_pte,pte_t * src_pte,pte_t orig_dst_pte,pte_t orig_src_pte,spinlock_t * dst_ptl,spinlock_t * src_ptl)1041adef4406SAndrea Arcangeli static int move_swap_pte(struct mm_struct *mm,
1042adef4406SAndrea Arcangeli unsigned long dst_addr, unsigned long src_addr,
1043adef4406SAndrea Arcangeli pte_t *dst_pte, pte_t *src_pte,
1044adef4406SAndrea Arcangeli pte_t orig_dst_pte, pte_t orig_src_pte,
1045adef4406SAndrea Arcangeli spinlock_t *dst_ptl, spinlock_t *src_ptl)
1046adef4406SAndrea Arcangeli {
1047adef4406SAndrea Arcangeli if (!pte_swp_exclusive(orig_src_pte))
1048adef4406SAndrea Arcangeli return -EBUSY;
1049adef4406SAndrea Arcangeli
1050adef4406SAndrea Arcangeli double_pt_lock(dst_ptl, src_ptl);
1051adef4406SAndrea Arcangeli
105256ae10cfSRyan Roberts if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
105356ae10cfSRyan Roberts !pte_same(ptep_get(dst_pte), orig_dst_pte)) {
1054adef4406SAndrea Arcangeli double_pt_unlock(dst_ptl, src_ptl);
1055adef4406SAndrea Arcangeli return -EAGAIN;
1056adef4406SAndrea Arcangeli }
1057adef4406SAndrea Arcangeli
1058adef4406SAndrea Arcangeli orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte);
1059adef4406SAndrea Arcangeli set_pte_at(mm, dst_addr, dst_pte, orig_src_pte);
1060adef4406SAndrea Arcangeli double_pt_unlock(dst_ptl, src_ptl);
1061adef4406SAndrea Arcangeli
1062adef4406SAndrea Arcangeli return 0;
1063adef4406SAndrea Arcangeli }
1064adef4406SAndrea Arcangeli
move_zeropage_pte(struct mm_struct * mm,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,unsigned long dst_addr,unsigned long src_addr,pte_t * dst_pte,pte_t * src_pte,pte_t orig_dst_pte,pte_t orig_src_pte,spinlock_t * dst_ptl,spinlock_t * src_ptl)1065eb1521daSSuren Baghdasaryan static int move_zeropage_pte(struct mm_struct *mm,
1066eb1521daSSuren Baghdasaryan struct vm_area_struct *dst_vma,
1067eb1521daSSuren Baghdasaryan struct vm_area_struct *src_vma,
1068eb1521daSSuren Baghdasaryan unsigned long dst_addr, unsigned long src_addr,
1069eb1521daSSuren Baghdasaryan pte_t *dst_pte, pte_t *src_pte,
1070eb1521daSSuren Baghdasaryan pte_t orig_dst_pte, pte_t orig_src_pte,
1071eb1521daSSuren Baghdasaryan spinlock_t *dst_ptl, spinlock_t *src_ptl)
1072eb1521daSSuren Baghdasaryan {
1073eb1521daSSuren Baghdasaryan pte_t zero_pte;
1074eb1521daSSuren Baghdasaryan
1075eb1521daSSuren Baghdasaryan double_pt_lock(dst_ptl, src_ptl);
1076eb1521daSSuren Baghdasaryan if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
1077eb1521daSSuren Baghdasaryan !pte_same(ptep_get(dst_pte), orig_dst_pte)) {
1078eb1521daSSuren Baghdasaryan double_pt_unlock(dst_ptl, src_ptl);
1079eb1521daSSuren Baghdasaryan return -EAGAIN;
1080eb1521daSSuren Baghdasaryan }
1081eb1521daSSuren Baghdasaryan
1082eb1521daSSuren Baghdasaryan zero_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
1083eb1521daSSuren Baghdasaryan dst_vma->vm_page_prot));
1084eb1521daSSuren Baghdasaryan ptep_clear_flush(src_vma, src_addr, src_pte);
1085eb1521daSSuren Baghdasaryan set_pte_at(mm, dst_addr, dst_pte, zero_pte);
1086eb1521daSSuren Baghdasaryan double_pt_unlock(dst_ptl, src_ptl);
1087eb1521daSSuren Baghdasaryan
1088eb1521daSSuren Baghdasaryan return 0;
1089eb1521daSSuren Baghdasaryan }
1090eb1521daSSuren Baghdasaryan
1091eb1521daSSuren Baghdasaryan
1092adef4406SAndrea Arcangeli /*
1093adef4406SAndrea Arcangeli * The mmap_lock for reading is held by the caller. Just move the page
1094adef4406SAndrea Arcangeli * from src_pmd to dst_pmd if possible, and return true if succeeded
1095adef4406SAndrea Arcangeli * in moving the page.
1096adef4406SAndrea Arcangeli */
move_pages_pte(struct mm_struct * mm,pmd_t * dst_pmd,pmd_t * src_pmd,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,unsigned long dst_addr,unsigned long src_addr,__u64 mode)1097adef4406SAndrea Arcangeli static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd,
1098adef4406SAndrea Arcangeli struct vm_area_struct *dst_vma,
1099adef4406SAndrea Arcangeli struct vm_area_struct *src_vma,
1100adef4406SAndrea Arcangeli unsigned long dst_addr, unsigned long src_addr,
1101adef4406SAndrea Arcangeli __u64 mode)
1102adef4406SAndrea Arcangeli {
1103adef4406SAndrea Arcangeli swp_entry_t entry;
1104adef4406SAndrea Arcangeli pte_t orig_src_pte, orig_dst_pte;
1105adef4406SAndrea Arcangeli pte_t src_folio_pte;
1106adef4406SAndrea Arcangeli spinlock_t *src_ptl, *dst_ptl;
1107adef4406SAndrea Arcangeli pte_t *src_pte = NULL;
1108adef4406SAndrea Arcangeli pte_t *dst_pte = NULL;
1109adef4406SAndrea Arcangeli
1110adef4406SAndrea Arcangeli struct folio *src_folio = NULL;
1111adef4406SAndrea Arcangeli struct anon_vma *src_anon_vma = NULL;
1112adef4406SAndrea Arcangeli struct mmu_notifier_range range;
1113adef4406SAndrea Arcangeli int err = 0;
1114adef4406SAndrea Arcangeli
1115adef4406SAndrea Arcangeli flush_cache_range(src_vma, src_addr, src_addr + PAGE_SIZE);
1116adef4406SAndrea Arcangeli mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
1117adef4406SAndrea Arcangeli src_addr, src_addr + PAGE_SIZE);
1118adef4406SAndrea Arcangeli mmu_notifier_invalidate_range_start(&range);
1119adef4406SAndrea Arcangeli retry:
1120adef4406SAndrea Arcangeli dst_pte = pte_offset_map_nolock(mm, dst_pmd, dst_addr, &dst_ptl);
1121adef4406SAndrea Arcangeli
1122adef4406SAndrea Arcangeli /* Retry if a huge pmd materialized from under us */
1123adef4406SAndrea Arcangeli if (unlikely(!dst_pte)) {
1124adef4406SAndrea Arcangeli err = -EAGAIN;
1125adef4406SAndrea Arcangeli goto out;
1126adef4406SAndrea Arcangeli }
1127adef4406SAndrea Arcangeli
1128adef4406SAndrea Arcangeli src_pte = pte_offset_map_nolock(mm, src_pmd, src_addr, &src_ptl);
1129adef4406SAndrea Arcangeli
1130adef4406SAndrea Arcangeli /*
1131adef4406SAndrea Arcangeli * We held the mmap_lock for reading so MADV_DONTNEED
1132adef4406SAndrea Arcangeli * can zap transparent huge pages under us, or the
1133adef4406SAndrea Arcangeli * transparent huge page fault can establish new
1134adef4406SAndrea Arcangeli * transparent huge pages under us.
1135adef4406SAndrea Arcangeli */
1136adef4406SAndrea Arcangeli if (unlikely(!src_pte)) {
1137adef4406SAndrea Arcangeli err = -EAGAIN;
1138adef4406SAndrea Arcangeli goto out;
1139adef4406SAndrea Arcangeli }
1140adef4406SAndrea Arcangeli
1141adef4406SAndrea Arcangeli /* Sanity checks before the operation */
1142adef4406SAndrea Arcangeli if (WARN_ON_ONCE(pmd_none(*dst_pmd)) || WARN_ON_ONCE(pmd_none(*src_pmd)) ||
1143adef4406SAndrea Arcangeli WARN_ON_ONCE(pmd_trans_huge(*dst_pmd)) || WARN_ON_ONCE(pmd_trans_huge(*src_pmd))) {
1144adef4406SAndrea Arcangeli err = -EINVAL;
1145adef4406SAndrea Arcangeli goto out;
1146adef4406SAndrea Arcangeli }
1147adef4406SAndrea Arcangeli
1148adef4406SAndrea Arcangeli spin_lock(dst_ptl);
114956ae10cfSRyan Roberts orig_dst_pte = ptep_get(dst_pte);
1150adef4406SAndrea Arcangeli spin_unlock(dst_ptl);
1151adef4406SAndrea Arcangeli if (!pte_none(orig_dst_pte)) {
1152adef4406SAndrea Arcangeli err = -EEXIST;
1153adef4406SAndrea Arcangeli goto out;
1154adef4406SAndrea Arcangeli }
1155adef4406SAndrea Arcangeli
1156adef4406SAndrea Arcangeli spin_lock(src_ptl);
115756ae10cfSRyan Roberts orig_src_pte = ptep_get(src_pte);
1158adef4406SAndrea Arcangeli spin_unlock(src_ptl);
1159adef4406SAndrea Arcangeli if (pte_none(orig_src_pte)) {
1160adef4406SAndrea Arcangeli if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES))
1161adef4406SAndrea Arcangeli err = -ENOENT;
1162adef4406SAndrea Arcangeli else /* nothing to do to move a hole */
1163adef4406SAndrea Arcangeli err = 0;
1164adef4406SAndrea Arcangeli goto out;
1165adef4406SAndrea Arcangeli }
1166adef4406SAndrea Arcangeli
1167adef4406SAndrea Arcangeli /* If PTE changed after we locked the folio them start over */
1168adef4406SAndrea Arcangeli if (src_folio && unlikely(!pte_same(src_folio_pte, orig_src_pte))) {
1169adef4406SAndrea Arcangeli err = -EAGAIN;
1170adef4406SAndrea Arcangeli goto out;
1171adef4406SAndrea Arcangeli }
1172adef4406SAndrea Arcangeli
1173adef4406SAndrea Arcangeli if (pte_present(orig_src_pte)) {
1174eb1521daSSuren Baghdasaryan if (is_zero_pfn(pte_pfn(orig_src_pte))) {
1175eb1521daSSuren Baghdasaryan err = move_zeropage_pte(mm, dst_vma, src_vma,
1176eb1521daSSuren Baghdasaryan dst_addr, src_addr, dst_pte, src_pte,
1177eb1521daSSuren Baghdasaryan orig_dst_pte, orig_src_pte,
1178eb1521daSSuren Baghdasaryan dst_ptl, src_ptl);
1179eb1521daSSuren Baghdasaryan goto out;
1180eb1521daSSuren Baghdasaryan }
1181eb1521daSSuren Baghdasaryan
1182adef4406SAndrea Arcangeli /*
1183adef4406SAndrea Arcangeli * Pin and lock both source folio and anon_vma. Since we are in
1184adef4406SAndrea Arcangeli * RCU read section, we can't block, so on contention have to
1185adef4406SAndrea Arcangeli * unmap the ptes, obtain the lock and retry.
1186adef4406SAndrea Arcangeli */
1187adef4406SAndrea Arcangeli if (!src_folio) {
1188adef4406SAndrea Arcangeli struct folio *folio;
1189adef4406SAndrea Arcangeli
1190adef4406SAndrea Arcangeli /*
1191adef4406SAndrea Arcangeli * Pin the page while holding the lock to be sure the
1192adef4406SAndrea Arcangeli * page isn't freed under us
1193adef4406SAndrea Arcangeli */
1194adef4406SAndrea Arcangeli spin_lock(src_ptl);
119556ae10cfSRyan Roberts if (!pte_same(orig_src_pte, ptep_get(src_pte))) {
1196adef4406SAndrea Arcangeli spin_unlock(src_ptl);
1197adef4406SAndrea Arcangeli err = -EAGAIN;
1198adef4406SAndrea Arcangeli goto out;
1199adef4406SAndrea Arcangeli }
1200adef4406SAndrea Arcangeli
1201adef4406SAndrea Arcangeli folio = vm_normal_folio(src_vma, src_addr, orig_src_pte);
1202adef4406SAndrea Arcangeli if (!folio || !PageAnonExclusive(&folio->page)) {
1203adef4406SAndrea Arcangeli spin_unlock(src_ptl);
1204adef4406SAndrea Arcangeli err = -EBUSY;
1205adef4406SAndrea Arcangeli goto out;
1206adef4406SAndrea Arcangeli }
1207adef4406SAndrea Arcangeli
1208adef4406SAndrea Arcangeli folio_get(folio);
1209adef4406SAndrea Arcangeli src_folio = folio;
1210adef4406SAndrea Arcangeli src_folio_pte = orig_src_pte;
1211adef4406SAndrea Arcangeli spin_unlock(src_ptl);
1212adef4406SAndrea Arcangeli
1213adef4406SAndrea Arcangeli if (!folio_trylock(src_folio)) {
1214adef4406SAndrea Arcangeli pte_unmap(&orig_src_pte);
1215adef4406SAndrea Arcangeli pte_unmap(&orig_dst_pte);
1216adef4406SAndrea Arcangeli src_pte = dst_pte = NULL;
1217adef4406SAndrea Arcangeli /* now we can block and wait */
1218adef4406SAndrea Arcangeli folio_lock(src_folio);
1219adef4406SAndrea Arcangeli goto retry;
1220adef4406SAndrea Arcangeli }
1221adef4406SAndrea Arcangeli
1222adef4406SAndrea Arcangeli if (WARN_ON_ONCE(!folio_test_anon(src_folio))) {
1223adef4406SAndrea Arcangeli err = -EBUSY;
1224adef4406SAndrea Arcangeli goto out;
1225adef4406SAndrea Arcangeli }
1226adef4406SAndrea Arcangeli }
1227adef4406SAndrea Arcangeli
1228adef4406SAndrea Arcangeli /* at this point we have src_folio locked */
1229adef4406SAndrea Arcangeli if (folio_test_large(src_folio)) {
1230982ae058SSuren Baghdasaryan /* split_folio() can block */
1231982ae058SSuren Baghdasaryan pte_unmap(&orig_src_pte);
1232982ae058SSuren Baghdasaryan pte_unmap(&orig_dst_pte);
1233982ae058SSuren Baghdasaryan src_pte = dst_pte = NULL;
1234adef4406SAndrea Arcangeli err = split_folio(src_folio);
1235adef4406SAndrea Arcangeli if (err)
1236adef4406SAndrea Arcangeli goto out;
1237982ae058SSuren Baghdasaryan /* have to reacquire the folio after it got split */
1238982ae058SSuren Baghdasaryan folio_unlock(src_folio);
1239982ae058SSuren Baghdasaryan folio_put(src_folio);
1240982ae058SSuren Baghdasaryan src_folio = NULL;
1241982ae058SSuren Baghdasaryan goto retry;
1242adef4406SAndrea Arcangeli }
1243adef4406SAndrea Arcangeli
1244adef4406SAndrea Arcangeli if (!src_anon_vma) {
1245adef4406SAndrea Arcangeli /*
1246adef4406SAndrea Arcangeli * folio_referenced walks the anon_vma chain
1247adef4406SAndrea Arcangeli * without the folio lock. Serialize against it with
1248adef4406SAndrea Arcangeli * the anon_vma lock, the folio lock is not enough.
1249adef4406SAndrea Arcangeli */
1250adef4406SAndrea Arcangeli src_anon_vma = folio_get_anon_vma(src_folio);
1251adef4406SAndrea Arcangeli if (!src_anon_vma) {
1252adef4406SAndrea Arcangeli /* page was unmapped from under us */
1253adef4406SAndrea Arcangeli err = -EAGAIN;
1254adef4406SAndrea Arcangeli goto out;
1255adef4406SAndrea Arcangeli }
1256adef4406SAndrea Arcangeli if (!anon_vma_trylock_write(src_anon_vma)) {
1257adef4406SAndrea Arcangeli pte_unmap(&orig_src_pte);
1258adef4406SAndrea Arcangeli pte_unmap(&orig_dst_pte);
1259adef4406SAndrea Arcangeli src_pte = dst_pte = NULL;
1260adef4406SAndrea Arcangeli /* now we can block and wait */
1261adef4406SAndrea Arcangeli anon_vma_lock_write(src_anon_vma);
1262adef4406SAndrea Arcangeli goto retry;
1263adef4406SAndrea Arcangeli }
1264adef4406SAndrea Arcangeli }
1265adef4406SAndrea Arcangeli
1266adef4406SAndrea Arcangeli err = move_present_pte(mm, dst_vma, src_vma,
1267adef4406SAndrea Arcangeli dst_addr, src_addr, dst_pte, src_pte,
1268adef4406SAndrea Arcangeli orig_dst_pte, orig_src_pte,
1269adef4406SAndrea Arcangeli dst_ptl, src_ptl, src_folio);
1270adef4406SAndrea Arcangeli } else {
1271adef4406SAndrea Arcangeli entry = pte_to_swp_entry(orig_src_pte);
1272adef4406SAndrea Arcangeli if (non_swap_entry(entry)) {
1273adef4406SAndrea Arcangeli if (is_migration_entry(entry)) {
1274adef4406SAndrea Arcangeli pte_unmap(&orig_src_pte);
1275adef4406SAndrea Arcangeli pte_unmap(&orig_dst_pte);
1276adef4406SAndrea Arcangeli src_pte = dst_pte = NULL;
1277adef4406SAndrea Arcangeli migration_entry_wait(mm, src_pmd, src_addr);
1278adef4406SAndrea Arcangeli err = -EAGAIN;
1279adef4406SAndrea Arcangeli } else
1280adef4406SAndrea Arcangeli err = -EFAULT;
1281adef4406SAndrea Arcangeli goto out;
1282adef4406SAndrea Arcangeli }
1283adef4406SAndrea Arcangeli
1284adef4406SAndrea Arcangeli err = move_swap_pte(mm, dst_addr, src_addr,
1285adef4406SAndrea Arcangeli dst_pte, src_pte,
1286adef4406SAndrea Arcangeli orig_dst_pte, orig_src_pte,
1287adef4406SAndrea Arcangeli dst_ptl, src_ptl);
1288adef4406SAndrea Arcangeli }
1289adef4406SAndrea Arcangeli
1290adef4406SAndrea Arcangeli out:
1291adef4406SAndrea Arcangeli if (src_anon_vma) {
1292adef4406SAndrea Arcangeli anon_vma_unlock_write(src_anon_vma);
1293adef4406SAndrea Arcangeli put_anon_vma(src_anon_vma);
1294adef4406SAndrea Arcangeli }
1295adef4406SAndrea Arcangeli if (src_folio) {
1296adef4406SAndrea Arcangeli folio_unlock(src_folio);
1297adef4406SAndrea Arcangeli folio_put(src_folio);
1298adef4406SAndrea Arcangeli }
1299adef4406SAndrea Arcangeli if (dst_pte)
1300adef4406SAndrea Arcangeli pte_unmap(dst_pte);
1301adef4406SAndrea Arcangeli if (src_pte)
1302adef4406SAndrea Arcangeli pte_unmap(src_pte);
1303adef4406SAndrea Arcangeli mmu_notifier_invalidate_range_end(&range);
1304adef4406SAndrea Arcangeli
1305adef4406SAndrea Arcangeli return err;
1306adef4406SAndrea Arcangeli }
1307adef4406SAndrea Arcangeli
1308adef4406SAndrea Arcangeli #ifdef CONFIG_TRANSPARENT_HUGEPAGE
move_splits_huge_pmd(unsigned long dst_addr,unsigned long src_addr,unsigned long src_end)1309adef4406SAndrea Arcangeli static inline bool move_splits_huge_pmd(unsigned long dst_addr,
1310adef4406SAndrea Arcangeli unsigned long src_addr,
1311adef4406SAndrea Arcangeli unsigned long src_end)
1312adef4406SAndrea Arcangeli {
1313adef4406SAndrea Arcangeli return (src_addr & ~HPAGE_PMD_MASK) || (dst_addr & ~HPAGE_PMD_MASK) ||
1314adef4406SAndrea Arcangeli src_end - src_addr < HPAGE_PMD_SIZE;
1315adef4406SAndrea Arcangeli }
1316adef4406SAndrea Arcangeli #else
move_splits_huge_pmd(unsigned long dst_addr,unsigned long src_addr,unsigned long src_end)1317adef4406SAndrea Arcangeli static inline bool move_splits_huge_pmd(unsigned long dst_addr,
1318adef4406SAndrea Arcangeli unsigned long src_addr,
1319adef4406SAndrea Arcangeli unsigned long src_end)
1320adef4406SAndrea Arcangeli {
1321adef4406SAndrea Arcangeli /* This is unreachable anyway, just to avoid warnings when HPAGE_PMD_SIZE==0 */
1322adef4406SAndrea Arcangeli return false;
1323adef4406SAndrea Arcangeli }
1324adef4406SAndrea Arcangeli #endif
1325adef4406SAndrea Arcangeli
vma_move_compatible(struct vm_area_struct * vma)1326adef4406SAndrea Arcangeli static inline bool vma_move_compatible(struct vm_area_struct *vma)
1327adef4406SAndrea Arcangeli {
1328adef4406SAndrea Arcangeli return !(vma->vm_flags & (VM_PFNMAP | VM_IO | VM_HUGETLB |
1329adef4406SAndrea Arcangeli VM_MIXEDMAP | VM_SHADOW_STACK));
1330adef4406SAndrea Arcangeli }
1331adef4406SAndrea Arcangeli
validate_move_areas(struct userfaultfd_ctx * ctx,struct vm_area_struct * src_vma,struct vm_area_struct * dst_vma)1332adef4406SAndrea Arcangeli static int validate_move_areas(struct userfaultfd_ctx *ctx,
1333adef4406SAndrea Arcangeli struct vm_area_struct *src_vma,
1334adef4406SAndrea Arcangeli struct vm_area_struct *dst_vma)
1335adef4406SAndrea Arcangeli {
1336adef4406SAndrea Arcangeli /* Only allow moving if both have the same access and protection */
1337adef4406SAndrea Arcangeli if ((src_vma->vm_flags & VM_ACCESS_FLAGS) != (dst_vma->vm_flags & VM_ACCESS_FLAGS) ||
1338adef4406SAndrea Arcangeli pgprot_val(src_vma->vm_page_prot) != pgprot_val(dst_vma->vm_page_prot))
1339adef4406SAndrea Arcangeli return -EINVAL;
1340adef4406SAndrea Arcangeli
1341adef4406SAndrea Arcangeli /* Only allow moving if both are mlocked or both aren't */
1342adef4406SAndrea Arcangeli if ((src_vma->vm_flags & VM_LOCKED) != (dst_vma->vm_flags & VM_LOCKED))
1343adef4406SAndrea Arcangeli return -EINVAL;
1344adef4406SAndrea Arcangeli
1345adef4406SAndrea Arcangeli /*
1346adef4406SAndrea Arcangeli * For now, we keep it simple and only move between writable VMAs.
1347adef4406SAndrea Arcangeli * Access flags are equal, therefore cheching only the source is enough.
1348adef4406SAndrea Arcangeli */
1349adef4406SAndrea Arcangeli if (!(src_vma->vm_flags & VM_WRITE))
1350adef4406SAndrea Arcangeli return -EINVAL;
1351adef4406SAndrea Arcangeli
1352adef4406SAndrea Arcangeli /* Check if vma flags indicate content which can be moved */
1353adef4406SAndrea Arcangeli if (!vma_move_compatible(src_vma) || !vma_move_compatible(dst_vma))
1354adef4406SAndrea Arcangeli return -EINVAL;
1355adef4406SAndrea Arcangeli
1356adef4406SAndrea Arcangeli /* Ensure dst_vma is registered in uffd we are operating on */
1357adef4406SAndrea Arcangeli if (!dst_vma->vm_userfaultfd_ctx.ctx ||
1358adef4406SAndrea Arcangeli dst_vma->vm_userfaultfd_ctx.ctx != ctx)
1359adef4406SAndrea Arcangeli return -EINVAL;
1360adef4406SAndrea Arcangeli
1361adef4406SAndrea Arcangeli /* Only allow moving across anonymous vmas */
1362adef4406SAndrea Arcangeli if (!vma_is_anonymous(src_vma) || !vma_is_anonymous(dst_vma))
1363adef4406SAndrea Arcangeli return -EINVAL;
1364adef4406SAndrea Arcangeli
1365adef4406SAndrea Arcangeli return 0;
1366adef4406SAndrea Arcangeli }
1367adef4406SAndrea Arcangeli
1368867a43a3SLokesh Gidra static __always_inline
find_vmas_mm_locked(struct mm_struct * mm,unsigned long dst_start,unsigned long src_start,struct vm_area_struct ** dst_vmap,struct vm_area_struct ** src_vmap)1369867a43a3SLokesh Gidra int find_vmas_mm_locked(struct mm_struct *mm,
1370867a43a3SLokesh Gidra unsigned long dst_start,
1371867a43a3SLokesh Gidra unsigned long src_start,
1372867a43a3SLokesh Gidra struct vm_area_struct **dst_vmap,
1373867a43a3SLokesh Gidra struct vm_area_struct **src_vmap)
1374867a43a3SLokesh Gidra {
1375867a43a3SLokesh Gidra struct vm_area_struct *vma;
1376867a43a3SLokesh Gidra
1377867a43a3SLokesh Gidra mmap_assert_locked(mm);
1378867a43a3SLokesh Gidra vma = find_vma_and_prepare_anon(mm, dst_start);
1379867a43a3SLokesh Gidra if (IS_ERR(vma))
1380867a43a3SLokesh Gidra return PTR_ERR(vma);
1381867a43a3SLokesh Gidra
1382867a43a3SLokesh Gidra *dst_vmap = vma;
1383867a43a3SLokesh Gidra /* Skip finding src_vma if src_start is in dst_vma */
1384867a43a3SLokesh Gidra if (src_start >= vma->vm_start && src_start < vma->vm_end)
1385867a43a3SLokesh Gidra goto out_success;
1386867a43a3SLokesh Gidra
1387867a43a3SLokesh Gidra vma = vma_lookup(mm, src_start);
1388867a43a3SLokesh Gidra if (!vma)
1389867a43a3SLokesh Gidra return -ENOENT;
1390867a43a3SLokesh Gidra out_success:
1391867a43a3SLokesh Gidra *src_vmap = vma;
1392867a43a3SLokesh Gidra return 0;
1393867a43a3SLokesh Gidra }
1394867a43a3SLokesh Gidra
1395867a43a3SLokesh Gidra #ifdef CONFIG_PER_VMA_LOCK
uffd_move_lock(struct mm_struct * mm,unsigned long dst_start,unsigned long src_start,struct vm_area_struct ** dst_vmap,struct vm_area_struct ** src_vmap)1396867a43a3SLokesh Gidra static int uffd_move_lock(struct mm_struct *mm,
1397867a43a3SLokesh Gidra unsigned long dst_start,
1398867a43a3SLokesh Gidra unsigned long src_start,
1399867a43a3SLokesh Gidra struct vm_area_struct **dst_vmap,
1400867a43a3SLokesh Gidra struct vm_area_struct **src_vmap)
1401867a43a3SLokesh Gidra {
1402867a43a3SLokesh Gidra struct vm_area_struct *vma;
1403867a43a3SLokesh Gidra int err;
1404867a43a3SLokesh Gidra
1405867a43a3SLokesh Gidra vma = lock_vma(mm, dst_start);
1406867a43a3SLokesh Gidra if (IS_ERR(vma))
1407867a43a3SLokesh Gidra return PTR_ERR(vma);
1408867a43a3SLokesh Gidra
1409867a43a3SLokesh Gidra *dst_vmap = vma;
1410867a43a3SLokesh Gidra /*
1411867a43a3SLokesh Gidra * Skip finding src_vma if src_start is in dst_vma. This also ensures
1412867a43a3SLokesh Gidra * that we don't lock the same vma twice.
1413867a43a3SLokesh Gidra */
1414867a43a3SLokesh Gidra if (src_start >= vma->vm_start && src_start < vma->vm_end) {
1415867a43a3SLokesh Gidra *src_vmap = vma;
1416867a43a3SLokesh Gidra return 0;
1417867a43a3SLokesh Gidra }
1418867a43a3SLokesh Gidra
1419867a43a3SLokesh Gidra /*
1420867a43a3SLokesh Gidra * Using lock_vma() to get src_vma can lead to following deadlock:
1421867a43a3SLokesh Gidra *
1422867a43a3SLokesh Gidra * Thread1 Thread2
1423867a43a3SLokesh Gidra * ------- -------
1424867a43a3SLokesh Gidra * vma_start_read(dst_vma)
1425867a43a3SLokesh Gidra * mmap_write_lock(mm)
1426867a43a3SLokesh Gidra * vma_start_write(src_vma)
1427867a43a3SLokesh Gidra * vma_start_read(src_vma)
1428867a43a3SLokesh Gidra * mmap_read_lock(mm)
1429867a43a3SLokesh Gidra * vma_start_write(dst_vma)
1430867a43a3SLokesh Gidra */
1431867a43a3SLokesh Gidra *src_vmap = lock_vma_under_rcu(mm, src_start);
1432867a43a3SLokesh Gidra if (likely(*src_vmap))
1433867a43a3SLokesh Gidra return 0;
1434867a43a3SLokesh Gidra
1435867a43a3SLokesh Gidra /* Undo any locking and retry in mmap_lock critical section */
1436867a43a3SLokesh Gidra vma_end_read(*dst_vmap);
1437867a43a3SLokesh Gidra
1438867a43a3SLokesh Gidra mmap_read_lock(mm);
1439867a43a3SLokesh Gidra err = find_vmas_mm_locked(mm, dst_start, src_start, dst_vmap, src_vmap);
1440867a43a3SLokesh Gidra if (!err) {
1441867a43a3SLokesh Gidra /*
1442867a43a3SLokesh Gidra * See comment in lock_vma() as to why not using
1443867a43a3SLokesh Gidra * vma_start_read() here.
1444867a43a3SLokesh Gidra */
1445867a43a3SLokesh Gidra down_read(&(*dst_vmap)->vm_lock->lock);
1446867a43a3SLokesh Gidra if (*dst_vmap != *src_vmap)
1447*30af24faSLokesh Gidra down_read_nested(&(*src_vmap)->vm_lock->lock,
1448*30af24faSLokesh Gidra SINGLE_DEPTH_NESTING);
1449867a43a3SLokesh Gidra }
1450867a43a3SLokesh Gidra mmap_read_unlock(mm);
1451867a43a3SLokesh Gidra return err;
1452867a43a3SLokesh Gidra }
1453867a43a3SLokesh Gidra
uffd_move_unlock(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma)1454867a43a3SLokesh Gidra static void uffd_move_unlock(struct vm_area_struct *dst_vma,
1455867a43a3SLokesh Gidra struct vm_area_struct *src_vma)
1456867a43a3SLokesh Gidra {
1457867a43a3SLokesh Gidra vma_end_read(src_vma);
1458867a43a3SLokesh Gidra if (src_vma != dst_vma)
1459867a43a3SLokesh Gidra vma_end_read(dst_vma);
1460867a43a3SLokesh Gidra }
1461867a43a3SLokesh Gidra
1462867a43a3SLokesh Gidra #else
1463867a43a3SLokesh Gidra
uffd_move_lock(struct mm_struct * mm,unsigned long dst_start,unsigned long src_start,struct vm_area_struct ** dst_vmap,struct vm_area_struct ** src_vmap)1464867a43a3SLokesh Gidra static int uffd_move_lock(struct mm_struct *mm,
1465867a43a3SLokesh Gidra unsigned long dst_start,
1466867a43a3SLokesh Gidra unsigned long src_start,
1467867a43a3SLokesh Gidra struct vm_area_struct **dst_vmap,
1468867a43a3SLokesh Gidra struct vm_area_struct **src_vmap)
1469867a43a3SLokesh Gidra {
1470867a43a3SLokesh Gidra int err;
1471867a43a3SLokesh Gidra
1472867a43a3SLokesh Gidra mmap_read_lock(mm);
1473867a43a3SLokesh Gidra err = find_vmas_mm_locked(mm, dst_start, src_start, dst_vmap, src_vmap);
1474867a43a3SLokesh Gidra if (err)
1475867a43a3SLokesh Gidra mmap_read_unlock(mm);
1476867a43a3SLokesh Gidra return err;
1477867a43a3SLokesh Gidra }
1478867a43a3SLokesh Gidra
uffd_move_unlock(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma)1479867a43a3SLokesh Gidra static void uffd_move_unlock(struct vm_area_struct *dst_vma,
1480867a43a3SLokesh Gidra struct vm_area_struct *src_vma)
1481867a43a3SLokesh Gidra {
1482867a43a3SLokesh Gidra mmap_assert_locked(src_vma->vm_mm);
1483867a43a3SLokesh Gidra mmap_read_unlock(dst_vma->vm_mm);
1484867a43a3SLokesh Gidra }
1485867a43a3SLokesh Gidra #endif
1486867a43a3SLokesh Gidra
1487adef4406SAndrea Arcangeli /**
1488adef4406SAndrea Arcangeli * move_pages - move arbitrary anonymous pages of an existing vma
1489adef4406SAndrea Arcangeli * @ctx: pointer to the userfaultfd context
1490adef4406SAndrea Arcangeli * @dst_start: start of the destination virtual memory range
1491adef4406SAndrea Arcangeli * @src_start: start of the source virtual memory range
1492adef4406SAndrea Arcangeli * @len: length of the virtual memory range
1493adef4406SAndrea Arcangeli * @mode: flags from uffdio_move.mode
1494adef4406SAndrea Arcangeli *
1495867a43a3SLokesh Gidra * It will either use the mmap_lock in read mode or per-vma locks
1496adef4406SAndrea Arcangeli *
1497adef4406SAndrea Arcangeli * move_pages() remaps arbitrary anonymous pages atomically in zero
1498adef4406SAndrea Arcangeli * copy. It only works on non shared anonymous pages because those can
1499adef4406SAndrea Arcangeli * be relocated without generating non linear anon_vmas in the rmap
1500adef4406SAndrea Arcangeli * code.
1501adef4406SAndrea Arcangeli *
1502adef4406SAndrea Arcangeli * It provides a zero copy mechanism to handle userspace page faults.
1503adef4406SAndrea Arcangeli * The source vma pages should have mapcount == 1, which can be
1504adef4406SAndrea Arcangeli * enforced by using madvise(MADV_DONTFORK) on src vma.
1505adef4406SAndrea Arcangeli *
1506adef4406SAndrea Arcangeli * The thread receiving the page during the userland page fault
1507adef4406SAndrea Arcangeli * will receive the faulting page in the source vma through the network,
1508adef4406SAndrea Arcangeli * storage or any other I/O device (MADV_DONTFORK in the source vma
1509adef4406SAndrea Arcangeli * avoids move_pages() to fail with -EBUSY if the process forks before
1510adef4406SAndrea Arcangeli * move_pages() is called), then it will call move_pages() to map the
1511adef4406SAndrea Arcangeli * page in the faulting address in the destination vma.
1512adef4406SAndrea Arcangeli *
1513adef4406SAndrea Arcangeli * This userfaultfd command works purely via pagetables, so it's the
1514adef4406SAndrea Arcangeli * most efficient way to move physical non shared anonymous pages
1515adef4406SAndrea Arcangeli * across different virtual addresses. Unlike mremap()/mmap()/munmap()
1516adef4406SAndrea Arcangeli * it does not create any new vmas. The mapping in the destination
1517adef4406SAndrea Arcangeli * address is atomic.
1518adef4406SAndrea Arcangeli *
1519adef4406SAndrea Arcangeli * It only works if the vma protection bits are identical from the
1520adef4406SAndrea Arcangeli * source and destination vma.
1521adef4406SAndrea Arcangeli *
1522adef4406SAndrea Arcangeli * It can remap non shared anonymous pages within the same vma too.
1523adef4406SAndrea Arcangeli *
1524adef4406SAndrea Arcangeli * If the source virtual memory range has any unmapped holes, or if
1525adef4406SAndrea Arcangeli * the destination virtual memory range is not a whole unmapped hole,
1526adef4406SAndrea Arcangeli * move_pages() will fail respectively with -ENOENT or -EEXIST. This
1527adef4406SAndrea Arcangeli * provides a very strict behavior to avoid any chance of memory
1528adef4406SAndrea Arcangeli * corruption going unnoticed if there are userland race conditions.
1529adef4406SAndrea Arcangeli * Only one thread should resolve the userland page fault at any given
1530adef4406SAndrea Arcangeli * time for any given faulting address. This means that if two threads
1531adef4406SAndrea Arcangeli * try to both call move_pages() on the same destination address at the
1532adef4406SAndrea Arcangeli * same time, the second thread will get an explicit error from this
1533adef4406SAndrea Arcangeli * command.
1534adef4406SAndrea Arcangeli *
1535adef4406SAndrea Arcangeli * The command retval will return "len" is successful. The command
1536adef4406SAndrea Arcangeli * however can be interrupted by fatal signals or errors. If
1537adef4406SAndrea Arcangeli * interrupted it will return the number of bytes successfully
1538adef4406SAndrea Arcangeli * remapped before the interruption if any, or the negative error if
1539adef4406SAndrea Arcangeli * none. It will never return zero. Either it will return an error or
1540adef4406SAndrea Arcangeli * an amount of bytes successfully moved. If the retval reports a
1541adef4406SAndrea Arcangeli * "short" remap, the move_pages() command should be repeated by
1542adef4406SAndrea Arcangeli * userland with src+retval, dst+reval, len-retval if it wants to know
1543adef4406SAndrea Arcangeli * about the error that interrupted it.
1544adef4406SAndrea Arcangeli *
1545adef4406SAndrea Arcangeli * The UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES flag can be specified to
1546adef4406SAndrea Arcangeli * prevent -ENOENT errors to materialize if there are holes in the
1547adef4406SAndrea Arcangeli * source virtual range that is being remapped. The holes will be
1548adef4406SAndrea Arcangeli * accounted as successfully remapped in the retval of the
1549adef4406SAndrea Arcangeli * command. This is mostly useful to remap hugepage naturally aligned
1550adef4406SAndrea Arcangeli * virtual regions without knowing if there are transparent hugepage
1551adef4406SAndrea Arcangeli * in the regions or not, but preventing the risk of having to split
1552adef4406SAndrea Arcangeli * the hugepmd during the remap.
1553adef4406SAndrea Arcangeli *
1554adef4406SAndrea Arcangeli * If there's any rmap walk that is taking the anon_vma locks without
1555adef4406SAndrea Arcangeli * first obtaining the folio lock (the only current instance is
1556adef4406SAndrea Arcangeli * folio_referenced), they will have to verify if the folio->mapping
1557adef4406SAndrea Arcangeli * has changed after taking the anon_vma lock. If it changed they
1558adef4406SAndrea Arcangeli * should release the lock and retry obtaining a new anon_vma, because
1559adef4406SAndrea Arcangeli * it means the anon_vma was changed by move_pages() before the lock
1560adef4406SAndrea Arcangeli * could be obtained. This is the only additional complexity added to
1561adef4406SAndrea Arcangeli * the rmap code to provide this anonymous page remapping functionality.
1562adef4406SAndrea Arcangeli */
move_pages(struct userfaultfd_ctx * ctx,unsigned long dst_start,unsigned long src_start,unsigned long len,__u64 mode)1563867a43a3SLokesh Gidra ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start,
1564867a43a3SLokesh Gidra unsigned long src_start, unsigned long len, __u64 mode)
1565adef4406SAndrea Arcangeli {
1566867a43a3SLokesh Gidra struct mm_struct *mm = ctx->mm;
1567adef4406SAndrea Arcangeli struct vm_area_struct *src_vma, *dst_vma;
1568adef4406SAndrea Arcangeli unsigned long src_addr, dst_addr;
1569adef4406SAndrea Arcangeli pmd_t *src_pmd, *dst_pmd;
1570adef4406SAndrea Arcangeli long err = -EINVAL;
1571adef4406SAndrea Arcangeli ssize_t moved = 0;
1572adef4406SAndrea Arcangeli
1573adef4406SAndrea Arcangeli /* Sanitize the command parameters. */
1574adef4406SAndrea Arcangeli if (WARN_ON_ONCE(src_start & ~PAGE_MASK) ||
1575adef4406SAndrea Arcangeli WARN_ON_ONCE(dst_start & ~PAGE_MASK) ||
1576adef4406SAndrea Arcangeli WARN_ON_ONCE(len & ~PAGE_MASK))
1577adef4406SAndrea Arcangeli goto out;
1578adef4406SAndrea Arcangeli
1579adef4406SAndrea Arcangeli /* Does the address range wrap, or is the span zero-sized? */
1580adef4406SAndrea Arcangeli if (WARN_ON_ONCE(src_start + len <= src_start) ||
1581adef4406SAndrea Arcangeli WARN_ON_ONCE(dst_start + len <= dst_start))
1582adef4406SAndrea Arcangeli goto out;
1583adef4406SAndrea Arcangeli
1584867a43a3SLokesh Gidra err = uffd_move_lock(mm, dst_start, src_start, &dst_vma, &src_vma);
1585867a43a3SLokesh Gidra if (err)
1586867a43a3SLokesh Gidra goto out;
1587867a43a3SLokesh Gidra
1588867a43a3SLokesh Gidra /* Re-check after taking map_changing_lock */
1589867a43a3SLokesh Gidra err = -EAGAIN;
1590867a43a3SLokesh Gidra down_read(&ctx->map_changing_lock);
1591867a43a3SLokesh Gidra if (likely(atomic_read(&ctx->mmap_changing)))
1592867a43a3SLokesh Gidra goto out_unlock;
1593adef4406SAndrea Arcangeli /*
1594adef4406SAndrea Arcangeli * Make sure the vma is not shared, that the src and dst remap
1595adef4406SAndrea Arcangeli * ranges are both valid and fully within a single existing
1596adef4406SAndrea Arcangeli * vma.
1597adef4406SAndrea Arcangeli */
1598867a43a3SLokesh Gidra err = -EINVAL;
1599867a43a3SLokesh Gidra if (src_vma->vm_flags & VM_SHARED)
1600867a43a3SLokesh Gidra goto out_unlock;
1601867a43a3SLokesh Gidra if (src_start + len > src_vma->vm_end)
1602867a43a3SLokesh Gidra goto out_unlock;
1603adef4406SAndrea Arcangeli
1604867a43a3SLokesh Gidra if (dst_vma->vm_flags & VM_SHARED)
1605867a43a3SLokesh Gidra goto out_unlock;
1606867a43a3SLokesh Gidra if (dst_start + len > dst_vma->vm_end)
1607867a43a3SLokesh Gidra goto out_unlock;
1608adef4406SAndrea Arcangeli
1609adef4406SAndrea Arcangeli err = validate_move_areas(ctx, src_vma, dst_vma);
1610adef4406SAndrea Arcangeli if (err)
1611867a43a3SLokesh Gidra goto out_unlock;
1612adef4406SAndrea Arcangeli
1613adef4406SAndrea Arcangeli for (src_addr = src_start, dst_addr = dst_start;
1614adef4406SAndrea Arcangeli src_addr < src_start + len;) {
1615adef4406SAndrea Arcangeli spinlock_t *ptl;
1616adef4406SAndrea Arcangeli pmd_t dst_pmdval;
1617adef4406SAndrea Arcangeli unsigned long step_size;
1618adef4406SAndrea Arcangeli
1619adef4406SAndrea Arcangeli /*
1620adef4406SAndrea Arcangeli * Below works because anonymous area would not have a
1621adef4406SAndrea Arcangeli * transparent huge PUD. If file-backed support is added,
1622adef4406SAndrea Arcangeli * that case would need to be handled here.
1623adef4406SAndrea Arcangeli */
1624adef4406SAndrea Arcangeli src_pmd = mm_find_pmd(mm, src_addr);
1625adef4406SAndrea Arcangeli if (unlikely(!src_pmd)) {
1626adef4406SAndrea Arcangeli if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) {
1627adef4406SAndrea Arcangeli err = -ENOENT;
1628adef4406SAndrea Arcangeli break;
1629adef4406SAndrea Arcangeli }
1630adef4406SAndrea Arcangeli src_pmd = mm_alloc_pmd(mm, src_addr);
1631adef4406SAndrea Arcangeli if (unlikely(!src_pmd)) {
1632adef4406SAndrea Arcangeli err = -ENOMEM;
1633adef4406SAndrea Arcangeli break;
1634adef4406SAndrea Arcangeli }
1635adef4406SAndrea Arcangeli }
1636adef4406SAndrea Arcangeli dst_pmd = mm_alloc_pmd(mm, dst_addr);
1637adef4406SAndrea Arcangeli if (unlikely(!dst_pmd)) {
1638adef4406SAndrea Arcangeli err = -ENOMEM;
1639adef4406SAndrea Arcangeli break;
1640adef4406SAndrea Arcangeli }
1641adef4406SAndrea Arcangeli
1642adef4406SAndrea Arcangeli dst_pmdval = pmdp_get_lockless(dst_pmd);
1643adef4406SAndrea Arcangeli /*
1644adef4406SAndrea Arcangeli * If the dst_pmd is mapped as THP don't override it and just
1645adef4406SAndrea Arcangeli * be strict. If dst_pmd changes into TPH after this check, the
1646adef4406SAndrea Arcangeli * move_pages_huge_pmd() will detect the change and retry
1647adef4406SAndrea Arcangeli * while move_pages_pte() will detect the change and fail.
1648adef4406SAndrea Arcangeli */
1649adef4406SAndrea Arcangeli if (unlikely(pmd_trans_huge(dst_pmdval))) {
1650adef4406SAndrea Arcangeli err = -EEXIST;
1651adef4406SAndrea Arcangeli break;
1652adef4406SAndrea Arcangeli }
1653adef4406SAndrea Arcangeli
1654adef4406SAndrea Arcangeli ptl = pmd_trans_huge_lock(src_pmd, src_vma);
1655adef4406SAndrea Arcangeli if (ptl) {
1656adef4406SAndrea Arcangeli if (pmd_devmap(*src_pmd)) {
1657adef4406SAndrea Arcangeli spin_unlock(ptl);
1658adef4406SAndrea Arcangeli err = -ENOENT;
1659adef4406SAndrea Arcangeli break;
1660adef4406SAndrea Arcangeli }
1661adef4406SAndrea Arcangeli
1662adef4406SAndrea Arcangeli /* Check if we can move the pmd without splitting it. */
1663adef4406SAndrea Arcangeli if (move_splits_huge_pmd(dst_addr, src_addr, src_start + len) ||
1664adef4406SAndrea Arcangeli !pmd_none(dst_pmdval)) {
1665adef4406SAndrea Arcangeli struct folio *folio = pfn_folio(pmd_pfn(*src_pmd));
1666adef4406SAndrea Arcangeli
1667eb1521daSSuren Baghdasaryan if (!folio || (!is_huge_zero_page(&folio->page) &&
1668eb1521daSSuren Baghdasaryan !PageAnonExclusive(&folio->page))) {
1669adef4406SAndrea Arcangeli spin_unlock(ptl);
1670adef4406SAndrea Arcangeli err = -EBUSY;
1671adef4406SAndrea Arcangeli break;
1672adef4406SAndrea Arcangeli }
1673adef4406SAndrea Arcangeli
1674adef4406SAndrea Arcangeli spin_unlock(ptl);
1675adef4406SAndrea Arcangeli split_huge_pmd(src_vma, src_pmd, src_addr);
1676adef4406SAndrea Arcangeli /* The folio will be split by move_pages_pte() */
1677adef4406SAndrea Arcangeli continue;
1678adef4406SAndrea Arcangeli }
1679adef4406SAndrea Arcangeli
1680adef4406SAndrea Arcangeli err = move_pages_huge_pmd(mm, dst_pmd, src_pmd,
1681adef4406SAndrea Arcangeli dst_pmdval, dst_vma, src_vma,
1682adef4406SAndrea Arcangeli dst_addr, src_addr);
1683adef4406SAndrea Arcangeli step_size = HPAGE_PMD_SIZE;
1684adef4406SAndrea Arcangeli } else {
1685adef4406SAndrea Arcangeli if (pmd_none(*src_pmd)) {
1686adef4406SAndrea Arcangeli if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) {
1687adef4406SAndrea Arcangeli err = -ENOENT;
1688adef4406SAndrea Arcangeli break;
1689adef4406SAndrea Arcangeli }
1690adef4406SAndrea Arcangeli if (unlikely(__pte_alloc(mm, src_pmd))) {
1691adef4406SAndrea Arcangeli err = -ENOMEM;
1692adef4406SAndrea Arcangeli break;
1693adef4406SAndrea Arcangeli }
1694adef4406SAndrea Arcangeli }
1695adef4406SAndrea Arcangeli
1696adef4406SAndrea Arcangeli if (unlikely(pte_alloc(mm, dst_pmd))) {
1697adef4406SAndrea Arcangeli err = -ENOMEM;
1698adef4406SAndrea Arcangeli break;
1699adef4406SAndrea Arcangeli }
1700adef4406SAndrea Arcangeli
1701adef4406SAndrea Arcangeli err = move_pages_pte(mm, dst_pmd, src_pmd,
1702adef4406SAndrea Arcangeli dst_vma, src_vma,
1703adef4406SAndrea Arcangeli dst_addr, src_addr, mode);
1704adef4406SAndrea Arcangeli step_size = PAGE_SIZE;
1705adef4406SAndrea Arcangeli }
1706adef4406SAndrea Arcangeli
1707adef4406SAndrea Arcangeli cond_resched();
1708adef4406SAndrea Arcangeli
1709adef4406SAndrea Arcangeli if (fatal_signal_pending(current)) {
1710adef4406SAndrea Arcangeli /* Do not override an error */
1711adef4406SAndrea Arcangeli if (!err || err == -EAGAIN)
1712adef4406SAndrea Arcangeli err = -EINTR;
1713adef4406SAndrea Arcangeli break;
1714adef4406SAndrea Arcangeli }
1715adef4406SAndrea Arcangeli
1716adef4406SAndrea Arcangeli if (err) {
1717adef4406SAndrea Arcangeli if (err == -EAGAIN)
1718adef4406SAndrea Arcangeli continue;
1719adef4406SAndrea Arcangeli break;
1720adef4406SAndrea Arcangeli }
1721adef4406SAndrea Arcangeli
1722adef4406SAndrea Arcangeli /* Proceed to the next page */
1723adef4406SAndrea Arcangeli dst_addr += step_size;
1724adef4406SAndrea Arcangeli src_addr += step_size;
1725adef4406SAndrea Arcangeli moved += step_size;
1726adef4406SAndrea Arcangeli }
1727adef4406SAndrea Arcangeli
1728867a43a3SLokesh Gidra out_unlock:
1729867a43a3SLokesh Gidra up_read(&ctx->map_changing_lock);
1730867a43a3SLokesh Gidra uffd_move_unlock(dst_vma, src_vma);
1731adef4406SAndrea Arcangeli out:
1732adef4406SAndrea Arcangeli VM_WARN_ON(moved < 0);
1733adef4406SAndrea Arcangeli VM_WARN_ON(err > 0);
1734adef4406SAndrea Arcangeli VM_WARN_ON(!moved && !err);
1735adef4406SAndrea Arcangeli return moved ? moved : err;
1736adef4406SAndrea Arcangeli }
1737