xref: /linux/mm/userfaultfd.c (revision d198b34f3855eee2571dda03eea75a09c7c31480)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  mm/userfaultfd.c
4  *
5  *  Copyright (C) 2015  Red Hat, Inc.
6  */
7 
8 #include <linux/mm.h>
9 #include <linux/sched/signal.h>
10 #include <linux/pagemap.h>
11 #include <linux/rmap.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/userfaultfd_k.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/hugetlb.h>
17 #include <linux/shmem_fs.h>
18 #include <asm/tlbflush.h>
19 #include "internal.h"
20 
21 static __always_inline
22 struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
23 				    unsigned long dst_start,
24 				    unsigned long len)
25 {
26 	/*
27 	 * Make sure that the dst range is both valid and fully within a
28 	 * single existing vma.
29 	 */
30 	struct vm_area_struct *dst_vma;
31 
32 	dst_vma = find_vma(dst_mm, dst_start);
33 	if (!dst_vma)
34 		return NULL;
35 
36 	if (dst_start < dst_vma->vm_start ||
37 	    dst_start + len > dst_vma->vm_end)
38 		return NULL;
39 
40 	/*
41 	 * Check the vma is registered in uffd, this is required to
42 	 * enforce the VM_MAYWRITE check done at uffd registration
43 	 * time.
44 	 */
45 	if (!dst_vma->vm_userfaultfd_ctx.ctx)
46 		return NULL;
47 
48 	return dst_vma;
49 }
50 
51 static int mcopy_atomic_pte(struct mm_struct *dst_mm,
52 			    pmd_t *dst_pmd,
53 			    struct vm_area_struct *dst_vma,
54 			    unsigned long dst_addr,
55 			    unsigned long src_addr,
56 			    struct page **pagep)
57 {
58 	struct mem_cgroup *memcg;
59 	pte_t _dst_pte, *dst_pte;
60 	spinlock_t *ptl;
61 	void *page_kaddr;
62 	int ret;
63 	struct page *page;
64 	pgoff_t offset, max_off;
65 	struct inode *inode;
66 
67 	if (!*pagep) {
68 		ret = -ENOMEM;
69 		page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr);
70 		if (!page)
71 			goto out;
72 
73 		page_kaddr = kmap_atomic(page);
74 		ret = copy_from_user(page_kaddr,
75 				     (const void __user *) src_addr,
76 				     PAGE_SIZE);
77 		kunmap_atomic(page_kaddr);
78 
79 		/* fallback to copy_from_user outside mmap_sem */
80 		if (unlikely(ret)) {
81 			ret = -ENOENT;
82 			*pagep = page;
83 			/* don't free the page */
84 			goto out;
85 		}
86 	} else {
87 		page = *pagep;
88 		*pagep = NULL;
89 	}
90 
91 	/*
92 	 * The memory barrier inside __SetPageUptodate makes sure that
93 	 * preceding stores to the page contents become visible before
94 	 * the set_pte_at() write.
95 	 */
96 	__SetPageUptodate(page);
97 
98 	ret = -ENOMEM;
99 	if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg, false))
100 		goto out_release;
101 
102 	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
103 	if (dst_vma->vm_flags & VM_WRITE)
104 		_dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
105 
106 	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
107 	if (dst_vma->vm_file) {
108 		/* the shmem MAP_PRIVATE case requires checking the i_size */
109 		inode = dst_vma->vm_file->f_inode;
110 		offset = linear_page_index(dst_vma, dst_addr);
111 		max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
112 		ret = -EFAULT;
113 		if (unlikely(offset >= max_off))
114 			goto out_release_uncharge_unlock;
115 	}
116 	ret = -EEXIST;
117 	if (!pte_none(*dst_pte))
118 		goto out_release_uncharge_unlock;
119 
120 	inc_mm_counter(dst_mm, MM_ANONPAGES);
121 	page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
122 	mem_cgroup_commit_charge(page, memcg, false, false);
123 	lru_cache_add_active_or_unevictable(page, dst_vma);
124 
125 	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
126 
127 	/* No need to invalidate - it was non-present before */
128 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
129 
130 	pte_unmap_unlock(dst_pte, ptl);
131 	ret = 0;
132 out:
133 	return ret;
134 out_release_uncharge_unlock:
135 	pte_unmap_unlock(dst_pte, ptl);
136 	mem_cgroup_cancel_charge(page, memcg, false);
137 out_release:
138 	put_page(page);
139 	goto out;
140 }
141 
142 static int mfill_zeropage_pte(struct mm_struct *dst_mm,
143 			      pmd_t *dst_pmd,
144 			      struct vm_area_struct *dst_vma,
145 			      unsigned long dst_addr)
146 {
147 	pte_t _dst_pte, *dst_pte;
148 	spinlock_t *ptl;
149 	int ret;
150 	pgoff_t offset, max_off;
151 	struct inode *inode;
152 
153 	_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
154 					 dst_vma->vm_page_prot));
155 	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
156 	if (dst_vma->vm_file) {
157 		/* the shmem MAP_PRIVATE case requires checking the i_size */
158 		inode = dst_vma->vm_file->f_inode;
159 		offset = linear_page_index(dst_vma, dst_addr);
160 		max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
161 		ret = -EFAULT;
162 		if (unlikely(offset >= max_off))
163 			goto out_unlock;
164 	}
165 	ret = -EEXIST;
166 	if (!pte_none(*dst_pte))
167 		goto out_unlock;
168 	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
169 	/* No need to invalidate - it was non-present before */
170 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
171 	ret = 0;
172 out_unlock:
173 	pte_unmap_unlock(dst_pte, ptl);
174 	return ret;
175 }
176 
177 static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
178 {
179 	pgd_t *pgd;
180 	p4d_t *p4d;
181 	pud_t *pud;
182 
183 	pgd = pgd_offset(mm, address);
184 	p4d = p4d_alloc(mm, pgd, address);
185 	if (!p4d)
186 		return NULL;
187 	pud = pud_alloc(mm, p4d, address);
188 	if (!pud)
189 		return NULL;
190 	/*
191 	 * Note that we didn't run this because the pmd was
192 	 * missing, the *pmd may be already established and in
193 	 * turn it may also be a trans_huge_pmd.
194 	 */
195 	return pmd_alloc(mm, pud, address);
196 }
197 
198 #ifdef CONFIG_HUGETLB_PAGE
199 /*
200  * __mcopy_atomic processing for HUGETLB vmas.  Note that this routine is
201  * called with mmap_sem held, it will release mmap_sem before returning.
202  */
203 static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
204 					      struct vm_area_struct *dst_vma,
205 					      unsigned long dst_start,
206 					      unsigned long src_start,
207 					      unsigned long len,
208 					      bool zeropage)
209 {
210 	int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED;
211 	int vm_shared = dst_vma->vm_flags & VM_SHARED;
212 	ssize_t err;
213 	pte_t *dst_pte;
214 	unsigned long src_addr, dst_addr;
215 	long copied;
216 	struct page *page;
217 	unsigned long vma_hpagesize;
218 	pgoff_t idx;
219 	u32 hash;
220 	struct address_space *mapping;
221 
222 	/*
223 	 * There is no default zero huge page for all huge page sizes as
224 	 * supported by hugetlb.  A PMD_SIZE huge pages may exist as used
225 	 * by THP.  Since we can not reliably insert a zero page, this
226 	 * feature is not supported.
227 	 */
228 	if (zeropage) {
229 		up_read(&dst_mm->mmap_sem);
230 		return -EINVAL;
231 	}
232 
233 	src_addr = src_start;
234 	dst_addr = dst_start;
235 	copied = 0;
236 	page = NULL;
237 	vma_hpagesize = vma_kernel_pagesize(dst_vma);
238 
239 	/*
240 	 * Validate alignment based on huge page size
241 	 */
242 	err = -EINVAL;
243 	if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
244 		goto out_unlock;
245 
246 retry:
247 	/*
248 	 * On routine entry dst_vma is set.  If we had to drop mmap_sem and
249 	 * retry, dst_vma will be set to NULL and we must lookup again.
250 	 */
251 	if (!dst_vma) {
252 		err = -ENOENT;
253 		dst_vma = find_dst_vma(dst_mm, dst_start, len);
254 		if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
255 			goto out_unlock;
256 
257 		err = -EINVAL;
258 		if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
259 			goto out_unlock;
260 
261 		vm_shared = dst_vma->vm_flags & VM_SHARED;
262 	}
263 
264 	/*
265 	 * If not shared, ensure the dst_vma has a anon_vma.
266 	 */
267 	err = -ENOMEM;
268 	if (!vm_shared) {
269 		if (unlikely(anon_vma_prepare(dst_vma)))
270 			goto out_unlock;
271 	}
272 
273 	while (src_addr < src_start + len) {
274 		pte_t dst_pteval;
275 
276 		BUG_ON(dst_addr >= dst_start + len);
277 
278 		/*
279 		 * Serialize via hugetlb_fault_mutex
280 		 */
281 		idx = linear_page_index(dst_vma, dst_addr);
282 		mapping = dst_vma->vm_file->f_mapping;
283 		hash = hugetlb_fault_mutex_hash(mapping, idx);
284 		mutex_lock(&hugetlb_fault_mutex_table[hash]);
285 
286 		err = -ENOMEM;
287 		dst_pte = huge_pte_alloc(dst_mm, dst_addr, vma_hpagesize);
288 		if (!dst_pte) {
289 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
290 			goto out_unlock;
291 		}
292 
293 		err = -EEXIST;
294 		dst_pteval = huge_ptep_get(dst_pte);
295 		if (!huge_pte_none(dst_pteval)) {
296 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
297 			goto out_unlock;
298 		}
299 
300 		err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
301 						dst_addr, src_addr, &page);
302 
303 		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
304 		vm_alloc_shared = vm_shared;
305 
306 		cond_resched();
307 
308 		if (unlikely(err == -ENOENT)) {
309 			up_read(&dst_mm->mmap_sem);
310 			BUG_ON(!page);
311 
312 			err = copy_huge_page_from_user(page,
313 						(const void __user *)src_addr,
314 						vma_hpagesize / PAGE_SIZE,
315 						true);
316 			if (unlikely(err)) {
317 				err = -EFAULT;
318 				goto out;
319 			}
320 			down_read(&dst_mm->mmap_sem);
321 
322 			dst_vma = NULL;
323 			goto retry;
324 		} else
325 			BUG_ON(page);
326 
327 		if (!err) {
328 			dst_addr += vma_hpagesize;
329 			src_addr += vma_hpagesize;
330 			copied += vma_hpagesize;
331 
332 			if (fatal_signal_pending(current))
333 				err = -EINTR;
334 		}
335 		if (err)
336 			break;
337 	}
338 
339 out_unlock:
340 	up_read(&dst_mm->mmap_sem);
341 out:
342 	if (page) {
343 		/*
344 		 * We encountered an error and are about to free a newly
345 		 * allocated huge page.
346 		 *
347 		 * Reservation handling is very subtle, and is different for
348 		 * private and shared mappings.  See the routine
349 		 * restore_reserve_on_error for details.  Unfortunately, we
350 		 * can not call restore_reserve_on_error now as it would
351 		 * require holding mmap_sem.
352 		 *
353 		 * If a reservation for the page existed in the reservation
354 		 * map of a private mapping, the map was modified to indicate
355 		 * the reservation was consumed when the page was allocated.
356 		 * We clear the PagePrivate flag now so that the global
357 		 * reserve count will not be incremented in free_huge_page.
358 		 * The reservation map will still indicate the reservation
359 		 * was consumed and possibly prevent later page allocation.
360 		 * This is better than leaking a global reservation.  If no
361 		 * reservation existed, it is still safe to clear PagePrivate
362 		 * as no adjustments to reservation counts were made during
363 		 * allocation.
364 		 *
365 		 * The reservation map for shared mappings indicates which
366 		 * pages have reservations.  When a huge page is allocated
367 		 * for an address with a reservation, no change is made to
368 		 * the reserve map.  In this case PagePrivate will be set
369 		 * to indicate that the global reservation count should be
370 		 * incremented when the page is freed.  This is the desired
371 		 * behavior.  However, when a huge page is allocated for an
372 		 * address without a reservation a reservation entry is added
373 		 * to the reservation map, and PagePrivate will not be set.
374 		 * When the page is freed, the global reserve count will NOT
375 		 * be incremented and it will appear as though we have leaked
376 		 * reserved page.  In this case, set PagePrivate so that the
377 		 * global reserve count will be incremented to match the
378 		 * reservation map entry which was created.
379 		 *
380 		 * Note that vm_alloc_shared is based on the flags of the vma
381 		 * for which the page was originally allocated.  dst_vma could
382 		 * be different or NULL on error.
383 		 */
384 		if (vm_alloc_shared)
385 			SetPagePrivate(page);
386 		else
387 			ClearPagePrivate(page);
388 		put_page(page);
389 	}
390 	BUG_ON(copied < 0);
391 	BUG_ON(err > 0);
392 	BUG_ON(!copied && !err);
393 	return copied ? copied : err;
394 }
395 #else /* !CONFIG_HUGETLB_PAGE */
396 /* fail at build time if gcc attempts to use this */
397 extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
398 				      struct vm_area_struct *dst_vma,
399 				      unsigned long dst_start,
400 				      unsigned long src_start,
401 				      unsigned long len,
402 				      bool zeropage);
403 #endif /* CONFIG_HUGETLB_PAGE */
404 
405 static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
406 						pmd_t *dst_pmd,
407 						struct vm_area_struct *dst_vma,
408 						unsigned long dst_addr,
409 						unsigned long src_addr,
410 						struct page **page,
411 						bool zeropage)
412 {
413 	ssize_t err;
414 
415 	/*
416 	 * The normal page fault path for a shmem will invoke the
417 	 * fault, fill the hole in the file and COW it right away. The
418 	 * result generates plain anonymous memory. So when we are
419 	 * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
420 	 * generate anonymous memory directly without actually filling
421 	 * the hole. For the MAP_PRIVATE case the robustness check
422 	 * only happens in the pagetable (to verify it's still none)
423 	 * and not in the radix tree.
424 	 */
425 	if (!(dst_vma->vm_flags & VM_SHARED)) {
426 		if (!zeropage)
427 			err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
428 					       dst_addr, src_addr, page);
429 		else
430 			err = mfill_zeropage_pte(dst_mm, dst_pmd,
431 						 dst_vma, dst_addr);
432 	} else {
433 		if (!zeropage)
434 			err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd,
435 						     dst_vma, dst_addr,
436 						     src_addr, page);
437 		else
438 			err = shmem_mfill_zeropage_pte(dst_mm, dst_pmd,
439 						       dst_vma, dst_addr);
440 	}
441 
442 	return err;
443 }
444 
445 static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
446 					      unsigned long dst_start,
447 					      unsigned long src_start,
448 					      unsigned long len,
449 					      bool zeropage,
450 					      bool *mmap_changing)
451 {
452 	struct vm_area_struct *dst_vma;
453 	ssize_t err;
454 	pmd_t *dst_pmd;
455 	unsigned long src_addr, dst_addr;
456 	long copied;
457 	struct page *page;
458 
459 	/*
460 	 * Sanitize the command parameters:
461 	 */
462 	BUG_ON(dst_start & ~PAGE_MASK);
463 	BUG_ON(len & ~PAGE_MASK);
464 
465 	/* Does the address range wrap, or is the span zero-sized? */
466 	BUG_ON(src_start + len <= src_start);
467 	BUG_ON(dst_start + len <= dst_start);
468 
469 	src_addr = src_start;
470 	dst_addr = dst_start;
471 	copied = 0;
472 	page = NULL;
473 retry:
474 	down_read(&dst_mm->mmap_sem);
475 
476 	/*
477 	 * If memory mappings are changing because of non-cooperative
478 	 * operation (e.g. mremap) running in parallel, bail out and
479 	 * request the user to retry later
480 	 */
481 	err = -EAGAIN;
482 	if (mmap_changing && READ_ONCE(*mmap_changing))
483 		goto out_unlock;
484 
485 	/*
486 	 * Make sure the vma is not shared, that the dst range is
487 	 * both valid and fully within a single existing vma.
488 	 */
489 	err = -ENOENT;
490 	dst_vma = find_dst_vma(dst_mm, dst_start, len);
491 	if (!dst_vma)
492 		goto out_unlock;
493 
494 	err = -EINVAL;
495 	/*
496 	 * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
497 	 * it will overwrite vm_ops, so vma_is_anonymous must return false.
498 	 */
499 	if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
500 	    dst_vma->vm_flags & VM_SHARED))
501 		goto out_unlock;
502 
503 	/*
504 	 * If this is a HUGETLB vma, pass off to appropriate routine
505 	 */
506 	if (is_vm_hugetlb_page(dst_vma))
507 		return  __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
508 						src_start, len, zeropage);
509 
510 	if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
511 		goto out_unlock;
512 
513 	/*
514 	 * Ensure the dst_vma has a anon_vma or this page
515 	 * would get a NULL anon_vma when moved in the
516 	 * dst_vma.
517 	 */
518 	err = -ENOMEM;
519 	if (!(dst_vma->vm_flags & VM_SHARED) &&
520 	    unlikely(anon_vma_prepare(dst_vma)))
521 		goto out_unlock;
522 
523 	while (src_addr < src_start + len) {
524 		pmd_t dst_pmdval;
525 
526 		BUG_ON(dst_addr >= dst_start + len);
527 
528 		dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
529 		if (unlikely(!dst_pmd)) {
530 			err = -ENOMEM;
531 			break;
532 		}
533 
534 		dst_pmdval = pmd_read_atomic(dst_pmd);
535 		/*
536 		 * If the dst_pmd is mapped as THP don't
537 		 * override it and just be strict.
538 		 */
539 		if (unlikely(pmd_trans_huge(dst_pmdval))) {
540 			err = -EEXIST;
541 			break;
542 		}
543 		if (unlikely(pmd_none(dst_pmdval)) &&
544 		    unlikely(__pte_alloc(dst_mm, dst_pmd))) {
545 			err = -ENOMEM;
546 			break;
547 		}
548 		/* If an huge pmd materialized from under us fail */
549 		if (unlikely(pmd_trans_huge(*dst_pmd))) {
550 			err = -EFAULT;
551 			break;
552 		}
553 
554 		BUG_ON(pmd_none(*dst_pmd));
555 		BUG_ON(pmd_trans_huge(*dst_pmd));
556 
557 		err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
558 				       src_addr, &page, zeropage);
559 		cond_resched();
560 
561 		if (unlikely(err == -ENOENT)) {
562 			void *page_kaddr;
563 
564 			up_read(&dst_mm->mmap_sem);
565 			BUG_ON(!page);
566 
567 			page_kaddr = kmap(page);
568 			err = copy_from_user(page_kaddr,
569 					     (const void __user *) src_addr,
570 					     PAGE_SIZE);
571 			kunmap(page);
572 			if (unlikely(err)) {
573 				err = -EFAULT;
574 				goto out;
575 			}
576 			goto retry;
577 		} else
578 			BUG_ON(page);
579 
580 		if (!err) {
581 			dst_addr += PAGE_SIZE;
582 			src_addr += PAGE_SIZE;
583 			copied += PAGE_SIZE;
584 
585 			if (fatal_signal_pending(current))
586 				err = -EINTR;
587 		}
588 		if (err)
589 			break;
590 	}
591 
592 out_unlock:
593 	up_read(&dst_mm->mmap_sem);
594 out:
595 	if (page)
596 		put_page(page);
597 	BUG_ON(copied < 0);
598 	BUG_ON(err > 0);
599 	BUG_ON(!copied && !err);
600 	return copied ? copied : err;
601 }
602 
603 ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
604 		     unsigned long src_start, unsigned long len,
605 		     bool *mmap_changing)
606 {
607 	return __mcopy_atomic(dst_mm, dst_start, src_start, len, false,
608 			      mmap_changing);
609 }
610 
611 ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
612 		       unsigned long len, bool *mmap_changing)
613 {
614 	return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing);
615 }
616