xref: /linux/mm/gup.c (revision dcb8cbb58a218c99aab0dbf3f76cf06a04d44f37)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/err.h>
5 #include <linux/spinlock.h>
6 
7 #include <linux/mm.h>
8 #include <linux/memremap.h>
9 #include <linux/pagemap.h>
10 #include <linux/rmap.h>
11 #include <linux/swap.h>
12 #include <linux/swapops.h>
13 #include <linux/secretmem.h>
14 
15 #include <linux/sched/signal.h>
16 #include <linux/rwsem.h>
17 #include <linux/hugetlb.h>
18 #include <linux/migrate.h>
19 #include <linux/mm_inline.h>
20 #include <linux/sched/mm.h>
21 
22 #include <asm/mmu_context.h>
23 #include <asm/tlbflush.h>
24 
25 #include "internal.h"
26 
27 struct follow_page_context {
28 	struct dev_pagemap *pgmap;
29 	unsigned int page_mask;
30 };
31 
32 static inline void sanity_check_pinned_pages(struct page **pages,
33 					     unsigned long npages)
34 {
35 	if (!IS_ENABLED(CONFIG_DEBUG_VM))
36 		return;
37 
38 	/*
39 	 * We only pin anonymous pages if they are exclusive. Once pinned, we
40 	 * can no longer turn them possibly shared and PageAnonExclusive() will
41 	 * stick around until the page is freed.
42 	 *
43 	 * We'd like to verify that our pinned anonymous pages are still mapped
44 	 * exclusively. The issue with anon THP is that we don't know how
45 	 * they are/were mapped when pinning them. However, for anon
46 	 * THP we can assume that either the given page (PTE-mapped THP) or
47 	 * the head page (PMD-mapped THP) should be PageAnonExclusive(). If
48 	 * neither is the case, there is certainly something wrong.
49 	 */
50 	for (; npages; npages--, pages++) {
51 		struct page *page = *pages;
52 		struct folio *folio = page_folio(page);
53 
54 		if (!folio_test_anon(folio))
55 			continue;
56 		if (!folio_test_large(folio) || folio_test_hugetlb(folio))
57 			VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page);
58 		else
59 			/* Either a PTE-mapped or a PMD-mapped THP. */
60 			VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page) &&
61 				       !PageAnonExclusive(page), page);
62 	}
63 }
64 
65 /*
66  * Return the folio with ref appropriately incremented,
67  * or NULL if that failed.
68  */
69 static inline struct folio *try_get_folio(struct page *page, int refs)
70 {
71 	struct folio *folio;
72 
73 retry:
74 	folio = page_folio(page);
75 	if (WARN_ON_ONCE(folio_ref_count(folio) < 0))
76 		return NULL;
77 	if (unlikely(!folio_ref_try_add_rcu(folio, refs)))
78 		return NULL;
79 
80 	/*
81 	 * At this point we have a stable reference to the folio; but it
82 	 * could be that between calling page_folio() and the refcount
83 	 * increment, the folio was split, in which case we'd end up
84 	 * holding a reference on a folio that has nothing to do with the page
85 	 * we were given anymore.
86 	 * So now that the folio is stable, recheck that the page still
87 	 * belongs to this folio.
88 	 */
89 	if (unlikely(page_folio(page) != folio)) {
90 		if (!put_devmap_managed_page_refs(&folio->page, refs))
91 			folio_put_refs(folio, refs);
92 		goto retry;
93 	}
94 
95 	return folio;
96 }
97 
98 /**
99  * try_grab_folio() - Attempt to get or pin a folio.
100  * @page:  pointer to page to be grabbed
101  * @refs:  the value to (effectively) add to the folio's refcount
102  * @flags: gup flags: these are the FOLL_* flag values.
103  *
104  * "grab" names in this file mean, "look at flags to decide whether to use
105  * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
106  *
107  * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
108  * same time. (That's true throughout the get_user_pages*() and
109  * pin_user_pages*() APIs.) Cases:
110  *
111  *    FOLL_GET: folio's refcount will be incremented by @refs.
112  *
113  *    FOLL_PIN on large folios: folio's refcount will be incremented by
114  *    @refs, and its pincount will be incremented by @refs.
115  *
116  *    FOLL_PIN on single-page folios: folio's refcount will be incremented by
117  *    @refs * GUP_PIN_COUNTING_BIAS.
118  *
119  * Return: The folio containing @page (with refcount appropriately
120  * incremented) for success, or NULL upon failure. If neither FOLL_GET
121  * nor FOLL_PIN was set, that's considered failure, and furthermore,
122  * a likely bug in the caller, so a warning is also emitted.
123  */
124 struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags)
125 {
126 	if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
127 		return NULL;
128 
129 	if (flags & FOLL_GET)
130 		return try_get_folio(page, refs);
131 	else if (flags & FOLL_PIN) {
132 		struct folio *folio;
133 
134 		/*
135 		 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
136 		 * right zone, so fail and let the caller fall back to the slow
137 		 * path.
138 		 */
139 		if (unlikely((flags & FOLL_LONGTERM) &&
140 			     !is_longterm_pinnable_page(page)))
141 			return NULL;
142 
143 		/*
144 		 * CAUTION: Don't use compound_head() on the page before this
145 		 * point, the result won't be stable.
146 		 */
147 		folio = try_get_folio(page, refs);
148 		if (!folio)
149 			return NULL;
150 
151 		/*
152 		 * When pinning a large folio, use an exact count to track it.
153 		 *
154 		 * However, be sure to *also* increment the normal folio
155 		 * refcount field at least once, so that the folio really
156 		 * is pinned.  That's why the refcount from the earlier
157 		 * try_get_folio() is left intact.
158 		 */
159 		if (folio_test_large(folio))
160 			atomic_add(refs, &folio->_pincount);
161 		else
162 			folio_ref_add(folio,
163 					refs * (GUP_PIN_COUNTING_BIAS - 1));
164 		/*
165 		 * Adjust the pincount before re-checking the PTE for changes.
166 		 * This is essentially a smp_mb() and is paired with a memory
167 		 * barrier in page_try_share_anon_rmap().
168 		 */
169 		smp_mb__after_atomic();
170 
171 		node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
172 
173 		return folio;
174 	}
175 
176 	WARN_ON_ONCE(1);
177 	return NULL;
178 }
179 
180 static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
181 {
182 	if (flags & FOLL_PIN) {
183 		node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs);
184 		if (folio_test_large(folio))
185 			atomic_sub(refs, &folio->_pincount);
186 		else
187 			refs *= GUP_PIN_COUNTING_BIAS;
188 	}
189 
190 	if (!put_devmap_managed_page_refs(&folio->page, refs))
191 		folio_put_refs(folio, refs);
192 }
193 
194 /**
195  * try_grab_page() - elevate a page's refcount by a flag-dependent amount
196  * @page:    pointer to page to be grabbed
197  * @flags:   gup flags: these are the FOLL_* flag values.
198  *
199  * This might not do anything at all, depending on the flags argument.
200  *
201  * "grab" names in this file mean, "look at flags to decide whether to use
202  * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
203  *
204  * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
205  * time. Cases: please see the try_grab_folio() documentation, with
206  * "refs=1".
207  *
208  * Return: 0 for success, or if no action was required (if neither FOLL_PIN
209  * nor FOLL_GET was set, nothing is done). A negative error code for failure:
210  *
211  *   -ENOMEM		FOLL_GET or FOLL_PIN was set, but the page could not
212  *			be grabbed.
213  */
214 int __must_check try_grab_page(struct page *page, unsigned int flags)
215 {
216 	struct folio *folio = page_folio(page);
217 
218 	if (WARN_ON_ONCE(folio_ref_count(folio) <= 0))
219 		return -ENOMEM;
220 
221 	if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
222 		return -EREMOTEIO;
223 
224 	if (flags & FOLL_GET)
225 		folio_ref_inc(folio);
226 	else if (flags & FOLL_PIN) {
227 		/*
228 		 * Similar to try_grab_folio(): be sure to *also*
229 		 * increment the normal page refcount field at least once,
230 		 * so that the page really is pinned.
231 		 */
232 		if (folio_test_large(folio)) {
233 			folio_ref_add(folio, 1);
234 			atomic_add(1, &folio->_pincount);
235 		} else {
236 			folio_ref_add(folio, GUP_PIN_COUNTING_BIAS);
237 		}
238 
239 		node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, 1);
240 	}
241 
242 	return 0;
243 }
244 
245 /**
246  * unpin_user_page() - release a dma-pinned page
247  * @page:            pointer to page to be released
248  *
249  * Pages that were pinned via pin_user_pages*() must be released via either
250  * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
251  * that such pages can be separately tracked and uniquely handled. In
252  * particular, interactions with RDMA and filesystems need special handling.
253  */
254 void unpin_user_page(struct page *page)
255 {
256 	sanity_check_pinned_pages(&page, 1);
257 	gup_put_folio(page_folio(page), 1, FOLL_PIN);
258 }
259 EXPORT_SYMBOL(unpin_user_page);
260 
261 static inline struct folio *gup_folio_range_next(struct page *start,
262 		unsigned long npages, unsigned long i, unsigned int *ntails)
263 {
264 	struct page *next = nth_page(start, i);
265 	struct folio *folio = page_folio(next);
266 	unsigned int nr = 1;
267 
268 	if (folio_test_large(folio))
269 		nr = min_t(unsigned int, npages - i,
270 			   folio_nr_pages(folio) - folio_page_idx(folio, next));
271 
272 	*ntails = nr;
273 	return folio;
274 }
275 
276 static inline struct folio *gup_folio_next(struct page **list,
277 		unsigned long npages, unsigned long i, unsigned int *ntails)
278 {
279 	struct folio *folio = page_folio(list[i]);
280 	unsigned int nr;
281 
282 	for (nr = i + 1; nr < npages; nr++) {
283 		if (page_folio(list[nr]) != folio)
284 			break;
285 	}
286 
287 	*ntails = nr - i;
288 	return folio;
289 }
290 
291 /**
292  * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
293  * @pages:  array of pages to be maybe marked dirty, and definitely released.
294  * @npages: number of pages in the @pages array.
295  * @make_dirty: whether to mark the pages dirty
296  *
297  * "gup-pinned page" refers to a page that has had one of the get_user_pages()
298  * variants called on that page.
299  *
300  * For each page in the @pages array, make that page (or its head page, if a
301  * compound page) dirty, if @make_dirty is true, and if the page was previously
302  * listed as clean. In any case, releases all pages using unpin_user_page(),
303  * possibly via unpin_user_pages(), for the non-dirty case.
304  *
305  * Please see the unpin_user_page() documentation for details.
306  *
307  * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
308  * required, then the caller should a) verify that this is really correct,
309  * because _lock() is usually required, and b) hand code it:
310  * set_page_dirty_lock(), unpin_user_page().
311  *
312  */
313 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
314 				 bool make_dirty)
315 {
316 	unsigned long i;
317 	struct folio *folio;
318 	unsigned int nr;
319 
320 	if (!make_dirty) {
321 		unpin_user_pages(pages, npages);
322 		return;
323 	}
324 
325 	sanity_check_pinned_pages(pages, npages);
326 	for (i = 0; i < npages; i += nr) {
327 		folio = gup_folio_next(pages, npages, i, &nr);
328 		/*
329 		 * Checking PageDirty at this point may race with
330 		 * clear_page_dirty_for_io(), but that's OK. Two key
331 		 * cases:
332 		 *
333 		 * 1) This code sees the page as already dirty, so it
334 		 * skips the call to set_page_dirty(). That could happen
335 		 * because clear_page_dirty_for_io() called
336 		 * page_mkclean(), followed by set_page_dirty().
337 		 * However, now the page is going to get written back,
338 		 * which meets the original intention of setting it
339 		 * dirty, so all is well: clear_page_dirty_for_io() goes
340 		 * on to call TestClearPageDirty(), and write the page
341 		 * back.
342 		 *
343 		 * 2) This code sees the page as clean, so it calls
344 		 * set_page_dirty(). The page stays dirty, despite being
345 		 * written back, so it gets written back again in the
346 		 * next writeback cycle. This is harmless.
347 		 */
348 		if (!folio_test_dirty(folio)) {
349 			folio_lock(folio);
350 			folio_mark_dirty(folio);
351 			folio_unlock(folio);
352 		}
353 		gup_put_folio(folio, nr, FOLL_PIN);
354 	}
355 }
356 EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
357 
358 /**
359  * unpin_user_page_range_dirty_lock() - release and optionally dirty
360  * gup-pinned page range
361  *
362  * @page:  the starting page of a range maybe marked dirty, and definitely released.
363  * @npages: number of consecutive pages to release.
364  * @make_dirty: whether to mark the pages dirty
365  *
366  * "gup-pinned page range" refers to a range of pages that has had one of the
367  * pin_user_pages() variants called on that page.
368  *
369  * For the page ranges defined by [page .. page+npages], make that range (or
370  * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
371  * page range was previously listed as clean.
372  *
373  * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
374  * required, then the caller should a) verify that this is really correct,
375  * because _lock() is usually required, and b) hand code it:
376  * set_page_dirty_lock(), unpin_user_page().
377  *
378  */
379 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
380 				      bool make_dirty)
381 {
382 	unsigned long i;
383 	struct folio *folio;
384 	unsigned int nr;
385 
386 	for (i = 0; i < npages; i += nr) {
387 		folio = gup_folio_range_next(page, npages, i, &nr);
388 		if (make_dirty && !folio_test_dirty(folio)) {
389 			folio_lock(folio);
390 			folio_mark_dirty(folio);
391 			folio_unlock(folio);
392 		}
393 		gup_put_folio(folio, nr, FOLL_PIN);
394 	}
395 }
396 EXPORT_SYMBOL(unpin_user_page_range_dirty_lock);
397 
398 static void unpin_user_pages_lockless(struct page **pages, unsigned long npages)
399 {
400 	unsigned long i;
401 	struct folio *folio;
402 	unsigned int nr;
403 
404 	/*
405 	 * Don't perform any sanity checks because we might have raced with
406 	 * fork() and some anonymous pages might now actually be shared --
407 	 * which is why we're unpinning after all.
408 	 */
409 	for (i = 0; i < npages; i += nr) {
410 		folio = gup_folio_next(pages, npages, i, &nr);
411 		gup_put_folio(folio, nr, FOLL_PIN);
412 	}
413 }
414 
415 /**
416  * unpin_user_pages() - release an array of gup-pinned pages.
417  * @pages:  array of pages to be marked dirty and released.
418  * @npages: number of pages in the @pages array.
419  *
420  * For each page in the @pages array, release the page using unpin_user_page().
421  *
422  * Please see the unpin_user_page() documentation for details.
423  */
424 void unpin_user_pages(struct page **pages, unsigned long npages)
425 {
426 	unsigned long i;
427 	struct folio *folio;
428 	unsigned int nr;
429 
430 	/*
431 	 * If this WARN_ON() fires, then the system *might* be leaking pages (by
432 	 * leaving them pinned), but probably not. More likely, gup/pup returned
433 	 * a hard -ERRNO error to the caller, who erroneously passed it here.
434 	 */
435 	if (WARN_ON(IS_ERR_VALUE(npages)))
436 		return;
437 
438 	sanity_check_pinned_pages(pages, npages);
439 	for (i = 0; i < npages; i += nr) {
440 		folio = gup_folio_next(pages, npages, i, &nr);
441 		gup_put_folio(folio, nr, FOLL_PIN);
442 	}
443 }
444 EXPORT_SYMBOL(unpin_user_pages);
445 
446 /*
447  * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
448  * lifecycle.  Avoid setting the bit unless necessary, or it might cause write
449  * cache bouncing on large SMP machines for concurrent pinned gups.
450  */
451 static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
452 {
453 	if (!test_bit(MMF_HAS_PINNED, mm_flags))
454 		set_bit(MMF_HAS_PINNED, mm_flags);
455 }
456 
457 #ifdef CONFIG_MMU
458 static struct page *no_page_table(struct vm_area_struct *vma,
459 		unsigned int flags)
460 {
461 	/*
462 	 * When core dumping an enormous anonymous area that nobody
463 	 * has touched so far, we don't want to allocate unnecessary pages or
464 	 * page tables.  Return error instead of NULL to skip handle_mm_fault,
465 	 * then get_dump_page() will return NULL to leave a hole in the dump.
466 	 * But we can only make this optimization where a hole would surely
467 	 * be zero-filled if handle_mm_fault() actually did handle it.
468 	 */
469 	if ((flags & FOLL_DUMP) &&
470 			(vma_is_anonymous(vma) || !vma->vm_ops->fault))
471 		return ERR_PTR(-EFAULT);
472 	return NULL;
473 }
474 
475 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
476 		pte_t *pte, unsigned int flags)
477 {
478 	if (flags & FOLL_TOUCH) {
479 		pte_t entry = *pte;
480 
481 		if (flags & FOLL_WRITE)
482 			entry = pte_mkdirty(entry);
483 		entry = pte_mkyoung(entry);
484 
485 		if (!pte_same(*pte, entry)) {
486 			set_pte_at(vma->vm_mm, address, pte, entry);
487 			update_mmu_cache(vma, address, pte);
488 		}
489 	}
490 
491 	/* Proper page table entry exists, but no corresponding struct page */
492 	return -EEXIST;
493 }
494 
495 /* FOLL_FORCE can write to even unwritable PTEs in COW mappings. */
496 static inline bool can_follow_write_pte(pte_t pte, struct page *page,
497 					struct vm_area_struct *vma,
498 					unsigned int flags)
499 {
500 	/* If the pte is writable, we can write to the page. */
501 	if (pte_write(pte))
502 		return true;
503 
504 	/* Maybe FOLL_FORCE is set to override it? */
505 	if (!(flags & FOLL_FORCE))
506 		return false;
507 
508 	/* But FOLL_FORCE has no effect on shared mappings */
509 	if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
510 		return false;
511 
512 	/* ... or read-only private ones */
513 	if (!(vma->vm_flags & VM_MAYWRITE))
514 		return false;
515 
516 	/* ... or already writable ones that just need to take a write fault */
517 	if (vma->vm_flags & VM_WRITE)
518 		return false;
519 
520 	/*
521 	 * See can_change_pte_writable(): we broke COW and could map the page
522 	 * writable if we have an exclusive anonymous page ...
523 	 */
524 	if (!page || !PageAnon(page) || !PageAnonExclusive(page))
525 		return false;
526 
527 	/* ... and a write-fault isn't required for other reasons. */
528 	if (vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte))
529 		return false;
530 	return !userfaultfd_pte_wp(vma, pte);
531 }
532 
533 static struct page *follow_page_pte(struct vm_area_struct *vma,
534 		unsigned long address, pmd_t *pmd, unsigned int flags,
535 		struct dev_pagemap **pgmap)
536 {
537 	struct mm_struct *mm = vma->vm_mm;
538 	struct page *page;
539 	spinlock_t *ptl;
540 	pte_t *ptep, pte;
541 	int ret;
542 
543 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
544 	if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
545 			 (FOLL_PIN | FOLL_GET)))
546 		return ERR_PTR(-EINVAL);
547 	if (unlikely(pmd_bad(*pmd)))
548 		return no_page_table(vma, flags);
549 
550 	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
551 	pte = *ptep;
552 	if (!pte_present(pte))
553 		goto no_page;
554 	if (pte_protnone(pte) && !gup_can_follow_protnone(flags))
555 		goto no_page;
556 
557 	page = vm_normal_page(vma, address, pte);
558 
559 	/*
560 	 * We only care about anon pages in can_follow_write_pte() and don't
561 	 * have to worry about pte_devmap() because they are never anon.
562 	 */
563 	if ((flags & FOLL_WRITE) &&
564 	    !can_follow_write_pte(pte, page, vma, flags)) {
565 		page = NULL;
566 		goto out;
567 	}
568 
569 	if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
570 		/*
571 		 * Only return device mapping pages in the FOLL_GET or FOLL_PIN
572 		 * case since they are only valid while holding the pgmap
573 		 * reference.
574 		 */
575 		*pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
576 		if (*pgmap)
577 			page = pte_page(pte);
578 		else
579 			goto no_page;
580 	} else if (unlikely(!page)) {
581 		if (flags & FOLL_DUMP) {
582 			/* Avoid special (like zero) pages in core dumps */
583 			page = ERR_PTR(-EFAULT);
584 			goto out;
585 		}
586 
587 		if (is_zero_pfn(pte_pfn(pte))) {
588 			page = pte_page(pte);
589 		} else {
590 			ret = follow_pfn_pte(vma, address, ptep, flags);
591 			page = ERR_PTR(ret);
592 			goto out;
593 		}
594 	}
595 
596 	if (!pte_write(pte) && gup_must_unshare(vma, flags, page)) {
597 		page = ERR_PTR(-EMLINK);
598 		goto out;
599 	}
600 
601 	VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
602 		       !PageAnonExclusive(page), page);
603 
604 	/* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
605 	ret = try_grab_page(page, flags);
606 	if (unlikely(ret)) {
607 		page = ERR_PTR(ret);
608 		goto out;
609 	}
610 
611 	/*
612 	 * We need to make the page accessible if and only if we are going
613 	 * to access its content (the FOLL_PIN case).  Please see
614 	 * Documentation/core-api/pin_user_pages.rst for details.
615 	 */
616 	if (flags & FOLL_PIN) {
617 		ret = arch_make_page_accessible(page);
618 		if (ret) {
619 			unpin_user_page(page);
620 			page = ERR_PTR(ret);
621 			goto out;
622 		}
623 	}
624 	if (flags & FOLL_TOUCH) {
625 		if ((flags & FOLL_WRITE) &&
626 		    !pte_dirty(pte) && !PageDirty(page))
627 			set_page_dirty(page);
628 		/*
629 		 * pte_mkyoung() would be more correct here, but atomic care
630 		 * is needed to avoid losing the dirty bit: it is easier to use
631 		 * mark_page_accessed().
632 		 */
633 		mark_page_accessed(page);
634 	}
635 out:
636 	pte_unmap_unlock(ptep, ptl);
637 	return page;
638 no_page:
639 	pte_unmap_unlock(ptep, ptl);
640 	if (!pte_none(pte))
641 		return NULL;
642 	return no_page_table(vma, flags);
643 }
644 
645 static struct page *follow_pmd_mask(struct vm_area_struct *vma,
646 				    unsigned long address, pud_t *pudp,
647 				    unsigned int flags,
648 				    struct follow_page_context *ctx)
649 {
650 	pmd_t *pmd, pmdval;
651 	spinlock_t *ptl;
652 	struct page *page;
653 	struct mm_struct *mm = vma->vm_mm;
654 
655 	pmd = pmd_offset(pudp, address);
656 	/*
657 	 * The READ_ONCE() will stabilize the pmdval in a register or
658 	 * on the stack so that it will stop changing under the code.
659 	 */
660 	pmdval = READ_ONCE(*pmd);
661 	if (pmd_none(pmdval))
662 		return no_page_table(vma, flags);
663 	if (!pmd_present(pmdval))
664 		return no_page_table(vma, flags);
665 	if (pmd_devmap(pmdval)) {
666 		ptl = pmd_lock(mm, pmd);
667 		page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
668 		spin_unlock(ptl);
669 		if (page)
670 			return page;
671 	}
672 	if (likely(!pmd_trans_huge(pmdval)))
673 		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
674 
675 	if (pmd_protnone(pmdval) && !gup_can_follow_protnone(flags))
676 		return no_page_table(vma, flags);
677 
678 	ptl = pmd_lock(mm, pmd);
679 	if (unlikely(!pmd_present(*pmd))) {
680 		spin_unlock(ptl);
681 		return no_page_table(vma, flags);
682 	}
683 	if (unlikely(!pmd_trans_huge(*pmd))) {
684 		spin_unlock(ptl);
685 		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
686 	}
687 	if (flags & FOLL_SPLIT_PMD) {
688 		int ret;
689 		page = pmd_page(*pmd);
690 		if (is_huge_zero_page(page)) {
691 			spin_unlock(ptl);
692 			ret = 0;
693 			split_huge_pmd(vma, pmd, address);
694 			if (pmd_trans_unstable(pmd))
695 				ret = -EBUSY;
696 		} else {
697 			spin_unlock(ptl);
698 			split_huge_pmd(vma, pmd, address);
699 			ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
700 		}
701 
702 		return ret ? ERR_PTR(ret) :
703 			follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
704 	}
705 	page = follow_trans_huge_pmd(vma, address, pmd, flags);
706 	spin_unlock(ptl);
707 	ctx->page_mask = HPAGE_PMD_NR - 1;
708 	return page;
709 }
710 
711 static struct page *follow_pud_mask(struct vm_area_struct *vma,
712 				    unsigned long address, p4d_t *p4dp,
713 				    unsigned int flags,
714 				    struct follow_page_context *ctx)
715 {
716 	pud_t *pud;
717 	spinlock_t *ptl;
718 	struct page *page;
719 	struct mm_struct *mm = vma->vm_mm;
720 
721 	pud = pud_offset(p4dp, address);
722 	if (pud_none(*pud))
723 		return no_page_table(vma, flags);
724 	if (pud_devmap(*pud)) {
725 		ptl = pud_lock(mm, pud);
726 		page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
727 		spin_unlock(ptl);
728 		if (page)
729 			return page;
730 	}
731 	if (unlikely(pud_bad(*pud)))
732 		return no_page_table(vma, flags);
733 
734 	return follow_pmd_mask(vma, address, pud, flags, ctx);
735 }
736 
737 static struct page *follow_p4d_mask(struct vm_area_struct *vma,
738 				    unsigned long address, pgd_t *pgdp,
739 				    unsigned int flags,
740 				    struct follow_page_context *ctx)
741 {
742 	p4d_t *p4d;
743 
744 	p4d = p4d_offset(pgdp, address);
745 	if (p4d_none(*p4d))
746 		return no_page_table(vma, flags);
747 	BUILD_BUG_ON(p4d_huge(*p4d));
748 	if (unlikely(p4d_bad(*p4d)))
749 		return no_page_table(vma, flags);
750 
751 	return follow_pud_mask(vma, address, p4d, flags, ctx);
752 }
753 
754 /**
755  * follow_page_mask - look up a page descriptor from a user-virtual address
756  * @vma: vm_area_struct mapping @address
757  * @address: virtual address to look up
758  * @flags: flags modifying lookup behaviour
759  * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
760  *       pointer to output page_mask
761  *
762  * @flags can have FOLL_ flags set, defined in <linux/mm.h>
763  *
764  * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
765  * the device's dev_pagemap metadata to avoid repeating expensive lookups.
766  *
767  * When getting an anonymous page and the caller has to trigger unsharing
768  * of a shared anonymous page first, -EMLINK is returned. The caller should
769  * trigger a fault with FAULT_FLAG_UNSHARE set. Note that unsharing is only
770  * relevant with FOLL_PIN and !FOLL_WRITE.
771  *
772  * On output, the @ctx->page_mask is set according to the size of the page.
773  *
774  * Return: the mapped (struct page *), %NULL if no mapping exists, or
775  * an error pointer if there is a mapping to something not represented
776  * by a page descriptor (see also vm_normal_page()).
777  */
778 static struct page *follow_page_mask(struct vm_area_struct *vma,
779 			      unsigned long address, unsigned int flags,
780 			      struct follow_page_context *ctx)
781 {
782 	pgd_t *pgd;
783 	struct page *page;
784 	struct mm_struct *mm = vma->vm_mm;
785 
786 	ctx->page_mask = 0;
787 
788 	/*
789 	 * Call hugetlb_follow_page_mask for hugetlb vmas as it will use
790 	 * special hugetlb page table walking code.  This eliminates the
791 	 * need to check for hugetlb entries in the general walking code.
792 	 *
793 	 * hugetlb_follow_page_mask is only for follow_page() handling here.
794 	 * Ordinary GUP uses follow_hugetlb_page for hugetlb processing.
795 	 */
796 	if (is_vm_hugetlb_page(vma)) {
797 		page = hugetlb_follow_page_mask(vma, address, flags);
798 		if (!page)
799 			page = no_page_table(vma, flags);
800 		return page;
801 	}
802 
803 	pgd = pgd_offset(mm, address);
804 
805 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
806 		return no_page_table(vma, flags);
807 
808 	return follow_p4d_mask(vma, address, pgd, flags, ctx);
809 }
810 
811 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
812 			 unsigned int foll_flags)
813 {
814 	struct follow_page_context ctx = { NULL };
815 	struct page *page;
816 
817 	if (vma_is_secretmem(vma))
818 		return NULL;
819 
820 	if (WARN_ON_ONCE(foll_flags & FOLL_PIN))
821 		return NULL;
822 
823 	page = follow_page_mask(vma, address, foll_flags, &ctx);
824 	if (ctx.pgmap)
825 		put_dev_pagemap(ctx.pgmap);
826 	return page;
827 }
828 
829 static int get_gate_page(struct mm_struct *mm, unsigned long address,
830 		unsigned int gup_flags, struct vm_area_struct **vma,
831 		struct page **page)
832 {
833 	pgd_t *pgd;
834 	p4d_t *p4d;
835 	pud_t *pud;
836 	pmd_t *pmd;
837 	pte_t *pte;
838 	int ret = -EFAULT;
839 
840 	/* user gate pages are read-only */
841 	if (gup_flags & FOLL_WRITE)
842 		return -EFAULT;
843 	if (address > TASK_SIZE)
844 		pgd = pgd_offset_k(address);
845 	else
846 		pgd = pgd_offset_gate(mm, address);
847 	if (pgd_none(*pgd))
848 		return -EFAULT;
849 	p4d = p4d_offset(pgd, address);
850 	if (p4d_none(*p4d))
851 		return -EFAULT;
852 	pud = pud_offset(p4d, address);
853 	if (pud_none(*pud))
854 		return -EFAULT;
855 	pmd = pmd_offset(pud, address);
856 	if (!pmd_present(*pmd))
857 		return -EFAULT;
858 	VM_BUG_ON(pmd_trans_huge(*pmd));
859 	pte = pte_offset_map(pmd, address);
860 	if (pte_none(*pte))
861 		goto unmap;
862 	*vma = get_gate_vma(mm);
863 	if (!page)
864 		goto out;
865 	*page = vm_normal_page(*vma, address, *pte);
866 	if (!*page) {
867 		if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
868 			goto unmap;
869 		*page = pte_page(*pte);
870 	}
871 	ret = try_grab_page(*page, gup_flags);
872 	if (unlikely(ret))
873 		goto unmap;
874 out:
875 	ret = 0;
876 unmap:
877 	pte_unmap(pte);
878 	return ret;
879 }
880 
881 /*
882  * mmap_lock must be held on entry.  If @flags has FOLL_UNLOCKABLE but not
883  * FOLL_NOWAIT, the mmap_lock may be released.  If it is, *@locked will be set
884  * to 0 and -EBUSY returned.
885  */
886 static int faultin_page(struct vm_area_struct *vma,
887 		unsigned long address, unsigned int *flags, bool unshare,
888 		int *locked)
889 {
890 	unsigned int fault_flags = 0;
891 	vm_fault_t ret;
892 
893 	if (*flags & FOLL_NOFAULT)
894 		return -EFAULT;
895 	if (*flags & FOLL_WRITE)
896 		fault_flags |= FAULT_FLAG_WRITE;
897 	if (*flags & FOLL_REMOTE)
898 		fault_flags |= FAULT_FLAG_REMOTE;
899 	if (*flags & FOLL_UNLOCKABLE) {
900 		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
901 		/*
902 		 * FAULT_FLAG_INTERRUPTIBLE is opt-in. GUP callers must set
903 		 * FOLL_INTERRUPTIBLE to enable FAULT_FLAG_INTERRUPTIBLE.
904 		 * That's because some callers may not be prepared to
905 		 * handle early exits caused by non-fatal signals.
906 		 */
907 		if (*flags & FOLL_INTERRUPTIBLE)
908 			fault_flags |= FAULT_FLAG_INTERRUPTIBLE;
909 	}
910 	if (*flags & FOLL_NOWAIT)
911 		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
912 	if (*flags & FOLL_TRIED) {
913 		/*
914 		 * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
915 		 * can co-exist
916 		 */
917 		fault_flags |= FAULT_FLAG_TRIED;
918 	}
919 	if (unshare) {
920 		fault_flags |= FAULT_FLAG_UNSHARE;
921 		/* FAULT_FLAG_WRITE and FAULT_FLAG_UNSHARE are incompatible */
922 		VM_BUG_ON(fault_flags & FAULT_FLAG_WRITE);
923 	}
924 
925 	ret = handle_mm_fault(vma, address, fault_flags, NULL);
926 
927 	if (ret & VM_FAULT_COMPLETED) {
928 		/*
929 		 * With FAULT_FLAG_RETRY_NOWAIT we'll never release the
930 		 * mmap lock in the page fault handler. Sanity check this.
931 		 */
932 		WARN_ON_ONCE(fault_flags & FAULT_FLAG_RETRY_NOWAIT);
933 		*locked = 0;
934 
935 		/*
936 		 * We should do the same as VM_FAULT_RETRY, but let's not
937 		 * return -EBUSY since that's not reflecting the reality of
938 		 * what has happened - we've just fully completed a page
939 		 * fault, with the mmap lock released.  Use -EAGAIN to show
940 		 * that we want to take the mmap lock _again_.
941 		 */
942 		return -EAGAIN;
943 	}
944 
945 	if (ret & VM_FAULT_ERROR) {
946 		int err = vm_fault_to_errno(ret, *flags);
947 
948 		if (err)
949 			return err;
950 		BUG();
951 	}
952 
953 	if (ret & VM_FAULT_RETRY) {
954 		if (!(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
955 			*locked = 0;
956 		return -EBUSY;
957 	}
958 
959 	return 0;
960 }
961 
962 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
963 {
964 	vm_flags_t vm_flags = vma->vm_flags;
965 	int write = (gup_flags & FOLL_WRITE);
966 	int foreign = (gup_flags & FOLL_REMOTE);
967 
968 	if (vm_flags & (VM_IO | VM_PFNMAP))
969 		return -EFAULT;
970 
971 	if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
972 		return -EFAULT;
973 
974 	if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
975 		return -EOPNOTSUPP;
976 
977 	if (vma_is_secretmem(vma))
978 		return -EFAULT;
979 
980 	if (write) {
981 		if (!(vm_flags & VM_WRITE)) {
982 			if (!(gup_flags & FOLL_FORCE))
983 				return -EFAULT;
984 			/* hugetlb does not support FOLL_FORCE|FOLL_WRITE. */
985 			if (is_vm_hugetlb_page(vma))
986 				return -EFAULT;
987 			/*
988 			 * We used to let the write,force case do COW in a
989 			 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
990 			 * set a breakpoint in a read-only mapping of an
991 			 * executable, without corrupting the file (yet only
992 			 * when that file had been opened for writing!).
993 			 * Anon pages in shared mappings are surprising: now
994 			 * just reject it.
995 			 */
996 			if (!is_cow_mapping(vm_flags))
997 				return -EFAULT;
998 		}
999 	} else if (!(vm_flags & VM_READ)) {
1000 		if (!(gup_flags & FOLL_FORCE))
1001 			return -EFAULT;
1002 		/*
1003 		 * Is there actually any vma we can reach here which does not
1004 		 * have VM_MAYREAD set?
1005 		 */
1006 		if (!(vm_flags & VM_MAYREAD))
1007 			return -EFAULT;
1008 	}
1009 	/*
1010 	 * gups are always data accesses, not instruction
1011 	 * fetches, so execute=false here
1012 	 */
1013 	if (!arch_vma_access_permitted(vma, write, false, foreign))
1014 		return -EFAULT;
1015 	return 0;
1016 }
1017 
1018 /**
1019  * __get_user_pages() - pin user pages in memory
1020  * @mm:		mm_struct of target mm
1021  * @start:	starting user address
1022  * @nr_pages:	number of pages from start to pin
1023  * @gup_flags:	flags modifying pin behaviour
1024  * @pages:	array that receives pointers to the pages pinned.
1025  *		Should be at least nr_pages long. Or NULL, if caller
1026  *		only intends to ensure the pages are faulted in.
1027  * @locked:     whether we're still with the mmap_lock held
1028  *
1029  * Returns either number of pages pinned (which may be less than the
1030  * number requested), or an error. Details about the return value:
1031  *
1032  * -- If nr_pages is 0, returns 0.
1033  * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1034  * -- If nr_pages is >0, and some pages were pinned, returns the number of
1035  *    pages pinned. Again, this may be less than nr_pages.
1036  * -- 0 return value is possible when the fault would need to be retried.
1037  *
1038  * The caller is responsible for releasing returned @pages, via put_page().
1039  *
1040  * Must be called with mmap_lock held.  It may be released.  See below.
1041  *
1042  * __get_user_pages walks a process's page tables and takes a reference to
1043  * each struct page that each user address corresponds to at a given
1044  * instant. That is, it takes the page that would be accessed if a user
1045  * thread accesses the given user virtual address at that instant.
1046  *
1047  * This does not guarantee that the page exists in the user mappings when
1048  * __get_user_pages returns, and there may even be a completely different
1049  * page there in some cases (eg. if mmapped pagecache has been invalidated
1050  * and subsequently re-faulted). However it does guarantee that the page
1051  * won't be freed completely. And mostly callers simply care that the page
1052  * contains data that was valid *at some point in time*. Typically, an IO
1053  * or similar operation cannot guarantee anything stronger anyway because
1054  * locks can't be held over the syscall boundary.
1055  *
1056  * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
1057  * the page is written to, set_page_dirty (or set_page_dirty_lock, as
1058  * appropriate) must be called after the page is finished with, and
1059  * before put_page is called.
1060  *
1061  * If FOLL_UNLOCKABLE is set without FOLL_NOWAIT then the mmap_lock may
1062  * be released. If this happens *@locked will be set to 0 on return.
1063  *
1064  * A caller using such a combination of @gup_flags must therefore hold the
1065  * mmap_lock for reading only, and recognize when it's been released. Otherwise,
1066  * it must be held for either reading or writing and will not be released.
1067  *
1068  * In most cases, get_user_pages or get_user_pages_fast should be used
1069  * instead of __get_user_pages. __get_user_pages should be used only if
1070  * you need some special @gup_flags.
1071  */
1072 static long __get_user_pages(struct mm_struct *mm,
1073 		unsigned long start, unsigned long nr_pages,
1074 		unsigned int gup_flags, struct page **pages,
1075 		int *locked)
1076 {
1077 	long ret = 0, i = 0;
1078 	struct vm_area_struct *vma = NULL;
1079 	struct follow_page_context ctx = { NULL };
1080 
1081 	if (!nr_pages)
1082 		return 0;
1083 
1084 	start = untagged_addr_remote(mm, start);
1085 
1086 	VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
1087 
1088 	do {
1089 		struct page *page;
1090 		unsigned int foll_flags = gup_flags;
1091 		unsigned int page_increm;
1092 
1093 		/* first iteration or cross vma bound */
1094 		if (!vma || start >= vma->vm_end) {
1095 			vma = find_extend_vma(mm, start);
1096 			if (!vma && in_gate_area(mm, start)) {
1097 				ret = get_gate_page(mm, start & PAGE_MASK,
1098 						gup_flags, &vma,
1099 						pages ? &pages[i] : NULL);
1100 				if (ret)
1101 					goto out;
1102 				ctx.page_mask = 0;
1103 				goto next_page;
1104 			}
1105 
1106 			if (!vma) {
1107 				ret = -EFAULT;
1108 				goto out;
1109 			}
1110 			ret = check_vma_flags(vma, gup_flags);
1111 			if (ret)
1112 				goto out;
1113 
1114 			if (is_vm_hugetlb_page(vma)) {
1115 				i = follow_hugetlb_page(mm, vma, pages,
1116 							&start, &nr_pages, i,
1117 							gup_flags, locked);
1118 				if (!*locked) {
1119 					/*
1120 					 * We've got a VM_FAULT_RETRY
1121 					 * and we've lost mmap_lock.
1122 					 * We must stop here.
1123 					 */
1124 					BUG_ON(gup_flags & FOLL_NOWAIT);
1125 					goto out;
1126 				}
1127 				continue;
1128 			}
1129 		}
1130 retry:
1131 		/*
1132 		 * If we have a pending SIGKILL, don't keep faulting pages and
1133 		 * potentially allocating memory.
1134 		 */
1135 		if (fatal_signal_pending(current)) {
1136 			ret = -EINTR;
1137 			goto out;
1138 		}
1139 		cond_resched();
1140 
1141 		page = follow_page_mask(vma, start, foll_flags, &ctx);
1142 		if (!page || PTR_ERR(page) == -EMLINK) {
1143 			ret = faultin_page(vma, start, &foll_flags,
1144 					   PTR_ERR(page) == -EMLINK, locked);
1145 			switch (ret) {
1146 			case 0:
1147 				goto retry;
1148 			case -EBUSY:
1149 			case -EAGAIN:
1150 				ret = 0;
1151 				fallthrough;
1152 			case -EFAULT:
1153 			case -ENOMEM:
1154 			case -EHWPOISON:
1155 				goto out;
1156 			}
1157 			BUG();
1158 		} else if (PTR_ERR(page) == -EEXIST) {
1159 			/*
1160 			 * Proper page table entry exists, but no corresponding
1161 			 * struct page. If the caller expects **pages to be
1162 			 * filled in, bail out now, because that can't be done
1163 			 * for this page.
1164 			 */
1165 			if (pages) {
1166 				ret = PTR_ERR(page);
1167 				goto out;
1168 			}
1169 
1170 			goto next_page;
1171 		} else if (IS_ERR(page)) {
1172 			ret = PTR_ERR(page);
1173 			goto out;
1174 		}
1175 		if (pages) {
1176 			pages[i] = page;
1177 			flush_anon_page(vma, page, start);
1178 			flush_dcache_page(page);
1179 			ctx.page_mask = 0;
1180 		}
1181 next_page:
1182 		page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
1183 		if (page_increm > nr_pages)
1184 			page_increm = nr_pages;
1185 		i += page_increm;
1186 		start += page_increm * PAGE_SIZE;
1187 		nr_pages -= page_increm;
1188 	} while (nr_pages);
1189 out:
1190 	if (ctx.pgmap)
1191 		put_dev_pagemap(ctx.pgmap);
1192 	return i ? i : ret;
1193 }
1194 
1195 static bool vma_permits_fault(struct vm_area_struct *vma,
1196 			      unsigned int fault_flags)
1197 {
1198 	bool write   = !!(fault_flags & FAULT_FLAG_WRITE);
1199 	bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
1200 	vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
1201 
1202 	if (!(vm_flags & vma->vm_flags))
1203 		return false;
1204 
1205 	/*
1206 	 * The architecture might have a hardware protection
1207 	 * mechanism other than read/write that can deny access.
1208 	 *
1209 	 * gup always represents data access, not instruction
1210 	 * fetches, so execute=false here:
1211 	 */
1212 	if (!arch_vma_access_permitted(vma, write, false, foreign))
1213 		return false;
1214 
1215 	return true;
1216 }
1217 
1218 /**
1219  * fixup_user_fault() - manually resolve a user page fault
1220  * @mm:		mm_struct of target mm
1221  * @address:	user address
1222  * @fault_flags:flags to pass down to handle_mm_fault()
1223  * @unlocked:	did we unlock the mmap_lock while retrying, maybe NULL if caller
1224  *		does not allow retry. If NULL, the caller must guarantee
1225  *		that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
1226  *
1227  * This is meant to be called in the specific scenario where for locking reasons
1228  * we try to access user memory in atomic context (within a pagefault_disable()
1229  * section), this returns -EFAULT, and we want to resolve the user fault before
1230  * trying again.
1231  *
1232  * Typically this is meant to be used by the futex code.
1233  *
1234  * The main difference with get_user_pages() is that this function will
1235  * unconditionally call handle_mm_fault() which will in turn perform all the
1236  * necessary SW fixup of the dirty and young bits in the PTE, while
1237  * get_user_pages() only guarantees to update these in the struct page.
1238  *
1239  * This is important for some architectures where those bits also gate the
1240  * access permission to the page because they are maintained in software.  On
1241  * such architectures, gup() will not be enough to make a subsequent access
1242  * succeed.
1243  *
1244  * This function will not return with an unlocked mmap_lock. So it has not the
1245  * same semantics wrt the @mm->mmap_lock as does filemap_fault().
1246  */
1247 int fixup_user_fault(struct mm_struct *mm,
1248 		     unsigned long address, unsigned int fault_flags,
1249 		     bool *unlocked)
1250 {
1251 	struct vm_area_struct *vma;
1252 	vm_fault_t ret;
1253 
1254 	address = untagged_addr_remote(mm, address);
1255 
1256 	if (unlocked)
1257 		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1258 
1259 retry:
1260 	vma = find_extend_vma(mm, address);
1261 	if (!vma || address < vma->vm_start)
1262 		return -EFAULT;
1263 
1264 	if (!vma_permits_fault(vma, fault_flags))
1265 		return -EFAULT;
1266 
1267 	if ((fault_flags & FAULT_FLAG_KILLABLE) &&
1268 	    fatal_signal_pending(current))
1269 		return -EINTR;
1270 
1271 	ret = handle_mm_fault(vma, address, fault_flags, NULL);
1272 
1273 	if (ret & VM_FAULT_COMPLETED) {
1274 		/*
1275 		 * NOTE: it's a pity that we need to retake the lock here
1276 		 * to pair with the unlock() in the callers. Ideally we
1277 		 * could tell the callers so they do not need to unlock.
1278 		 */
1279 		mmap_read_lock(mm);
1280 		*unlocked = true;
1281 		return 0;
1282 	}
1283 
1284 	if (ret & VM_FAULT_ERROR) {
1285 		int err = vm_fault_to_errno(ret, 0);
1286 
1287 		if (err)
1288 			return err;
1289 		BUG();
1290 	}
1291 
1292 	if (ret & VM_FAULT_RETRY) {
1293 		mmap_read_lock(mm);
1294 		*unlocked = true;
1295 		fault_flags |= FAULT_FLAG_TRIED;
1296 		goto retry;
1297 	}
1298 
1299 	return 0;
1300 }
1301 EXPORT_SYMBOL_GPL(fixup_user_fault);
1302 
1303 /*
1304  * GUP always responds to fatal signals.  When FOLL_INTERRUPTIBLE is
1305  * specified, it'll also respond to generic signals.  The caller of GUP
1306  * that has FOLL_INTERRUPTIBLE should take care of the GUP interruption.
1307  */
1308 static bool gup_signal_pending(unsigned int flags)
1309 {
1310 	if (fatal_signal_pending(current))
1311 		return true;
1312 
1313 	if (!(flags & FOLL_INTERRUPTIBLE))
1314 		return false;
1315 
1316 	return signal_pending(current);
1317 }
1318 
1319 /*
1320  * Locking: (*locked == 1) means that the mmap_lock has already been acquired by
1321  * the caller. This function may drop the mmap_lock. If it does so, then it will
1322  * set (*locked = 0).
1323  *
1324  * (*locked == 0) means that the caller expects this function to acquire and
1325  * drop the mmap_lock. Therefore, the value of *locked will still be zero when
1326  * the function returns, even though it may have changed temporarily during
1327  * function execution.
1328  *
1329  * Please note that this function, unlike __get_user_pages(), will not return 0
1330  * for nr_pages > 0, unless FOLL_NOWAIT is used.
1331  */
1332 static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
1333 						unsigned long start,
1334 						unsigned long nr_pages,
1335 						struct page **pages,
1336 						int *locked,
1337 						unsigned int flags)
1338 {
1339 	long ret, pages_done;
1340 	bool must_unlock = false;
1341 
1342 	/*
1343 	 * The internal caller expects GUP to manage the lock internally and the
1344 	 * lock must be released when this returns.
1345 	 */
1346 	if (!*locked) {
1347 		if (mmap_read_lock_killable(mm))
1348 			return -EAGAIN;
1349 		must_unlock = true;
1350 		*locked = 1;
1351 	}
1352 	else
1353 		mmap_assert_locked(mm);
1354 
1355 	if (flags & FOLL_PIN)
1356 		mm_set_has_pinned_flag(&mm->flags);
1357 
1358 	/*
1359 	 * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
1360 	 * is to set FOLL_GET if the caller wants pages[] filled in (but has
1361 	 * carelessly failed to specify FOLL_GET), so keep doing that, but only
1362 	 * for FOLL_GET, not for the newer FOLL_PIN.
1363 	 *
1364 	 * FOLL_PIN always expects pages to be non-null, but no need to assert
1365 	 * that here, as any failures will be obvious enough.
1366 	 */
1367 	if (pages && !(flags & FOLL_PIN))
1368 		flags |= FOLL_GET;
1369 
1370 	pages_done = 0;
1371 	for (;;) {
1372 		ret = __get_user_pages(mm, start, nr_pages, flags, pages,
1373 				       locked);
1374 		if (!(flags & FOLL_UNLOCKABLE)) {
1375 			/* VM_FAULT_RETRY couldn't trigger, bypass */
1376 			pages_done = ret;
1377 			break;
1378 		}
1379 
1380 		/* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */
1381 		if (!*locked) {
1382 			BUG_ON(ret < 0);
1383 			BUG_ON(ret >= nr_pages);
1384 		}
1385 
1386 		if (ret > 0) {
1387 			nr_pages -= ret;
1388 			pages_done += ret;
1389 			if (!nr_pages)
1390 				break;
1391 		}
1392 		if (*locked) {
1393 			/*
1394 			 * VM_FAULT_RETRY didn't trigger or it was a
1395 			 * FOLL_NOWAIT.
1396 			 */
1397 			if (!pages_done)
1398 				pages_done = ret;
1399 			break;
1400 		}
1401 		/*
1402 		 * VM_FAULT_RETRY triggered, so seek to the faulting offset.
1403 		 * For the prefault case (!pages) we only update counts.
1404 		 */
1405 		if (likely(pages))
1406 			pages += ret;
1407 		start += ret << PAGE_SHIFT;
1408 
1409 		/* The lock was temporarily dropped, so we must unlock later */
1410 		must_unlock = true;
1411 
1412 retry:
1413 		/*
1414 		 * Repeat on the address that fired VM_FAULT_RETRY
1415 		 * with both FAULT_FLAG_ALLOW_RETRY and
1416 		 * FAULT_FLAG_TRIED.  Note that GUP can be interrupted
1417 		 * by fatal signals of even common signals, depending on
1418 		 * the caller's request. So we need to check it before we
1419 		 * start trying again otherwise it can loop forever.
1420 		 */
1421 		if (gup_signal_pending(flags)) {
1422 			if (!pages_done)
1423 				pages_done = -EINTR;
1424 			break;
1425 		}
1426 
1427 		ret = mmap_read_lock_killable(mm);
1428 		if (ret) {
1429 			BUG_ON(ret > 0);
1430 			if (!pages_done)
1431 				pages_done = ret;
1432 			break;
1433 		}
1434 
1435 		*locked = 1;
1436 		ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
1437 				       pages, locked);
1438 		if (!*locked) {
1439 			/* Continue to retry until we succeeded */
1440 			BUG_ON(ret != 0);
1441 			goto retry;
1442 		}
1443 		if (ret != 1) {
1444 			BUG_ON(ret > 1);
1445 			if (!pages_done)
1446 				pages_done = ret;
1447 			break;
1448 		}
1449 		nr_pages--;
1450 		pages_done++;
1451 		if (!nr_pages)
1452 			break;
1453 		if (likely(pages))
1454 			pages++;
1455 		start += PAGE_SIZE;
1456 	}
1457 	if (must_unlock && *locked) {
1458 		/*
1459 		 * We either temporarily dropped the lock, or the caller
1460 		 * requested that we both acquire and drop the lock. Either way,
1461 		 * we must now unlock, and notify the caller of that state.
1462 		 */
1463 		mmap_read_unlock(mm);
1464 		*locked = 0;
1465 	}
1466 	return pages_done;
1467 }
1468 
1469 /**
1470  * populate_vma_page_range() -  populate a range of pages in the vma.
1471  * @vma:   target vma
1472  * @start: start address
1473  * @end:   end address
1474  * @locked: whether the mmap_lock is still held
1475  *
1476  * This takes care of mlocking the pages too if VM_LOCKED is set.
1477  *
1478  * Return either number of pages pinned in the vma, or a negative error
1479  * code on error.
1480  *
1481  * vma->vm_mm->mmap_lock must be held.
1482  *
1483  * If @locked is NULL, it may be held for read or write and will
1484  * be unperturbed.
1485  *
1486  * If @locked is non-NULL, it must held for read only and may be
1487  * released.  If it's released, *@locked will be set to 0.
1488  */
1489 long populate_vma_page_range(struct vm_area_struct *vma,
1490 		unsigned long start, unsigned long end, int *locked)
1491 {
1492 	struct mm_struct *mm = vma->vm_mm;
1493 	unsigned long nr_pages = (end - start) / PAGE_SIZE;
1494 	int local_locked = 1;
1495 	int gup_flags;
1496 	long ret;
1497 
1498 	VM_BUG_ON(!PAGE_ALIGNED(start));
1499 	VM_BUG_ON(!PAGE_ALIGNED(end));
1500 	VM_BUG_ON_VMA(start < vma->vm_start, vma);
1501 	VM_BUG_ON_VMA(end   > vma->vm_end, vma);
1502 	mmap_assert_locked(mm);
1503 
1504 	/*
1505 	 * Rightly or wrongly, the VM_LOCKONFAULT case has never used
1506 	 * faultin_page() to break COW, so it has no work to do here.
1507 	 */
1508 	if (vma->vm_flags & VM_LOCKONFAULT)
1509 		return nr_pages;
1510 
1511 	gup_flags = FOLL_TOUCH;
1512 	/*
1513 	 * We want to touch writable mappings with a write fault in order
1514 	 * to break COW, except for shared mappings because these don't COW
1515 	 * and we would not want to dirty them for nothing.
1516 	 */
1517 	if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1518 		gup_flags |= FOLL_WRITE;
1519 
1520 	/*
1521 	 * We want mlock to succeed for regions that have any permissions
1522 	 * other than PROT_NONE.
1523 	 */
1524 	if (vma_is_accessible(vma))
1525 		gup_flags |= FOLL_FORCE;
1526 
1527 	if (locked)
1528 		gup_flags |= FOLL_UNLOCKABLE;
1529 
1530 	/*
1531 	 * We made sure addr is within a VMA, so the following will
1532 	 * not result in a stack expansion that recurses back here.
1533 	 */
1534 	ret = __get_user_pages(mm, start, nr_pages, gup_flags,
1535 			       NULL, locked ? locked : &local_locked);
1536 	lru_add_drain();
1537 	return ret;
1538 }
1539 
1540 /*
1541  * faultin_vma_page_range() - populate (prefault) page tables inside the
1542  *			      given VMA range readable/writable
1543  *
1544  * This takes care of mlocking the pages, too, if VM_LOCKED is set.
1545  *
1546  * @vma: target vma
1547  * @start: start address
1548  * @end: end address
1549  * @write: whether to prefault readable or writable
1550  * @locked: whether the mmap_lock is still held
1551  *
1552  * Returns either number of processed pages in the vma, or a negative error
1553  * code on error (see __get_user_pages()).
1554  *
1555  * vma->vm_mm->mmap_lock must be held. The range must be page-aligned and
1556  * covered by the VMA. If it's released, *@locked will be set to 0.
1557  */
1558 long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
1559 			    unsigned long end, bool write, int *locked)
1560 {
1561 	struct mm_struct *mm = vma->vm_mm;
1562 	unsigned long nr_pages = (end - start) / PAGE_SIZE;
1563 	int gup_flags;
1564 	long ret;
1565 
1566 	VM_BUG_ON(!PAGE_ALIGNED(start));
1567 	VM_BUG_ON(!PAGE_ALIGNED(end));
1568 	VM_BUG_ON_VMA(start < vma->vm_start, vma);
1569 	VM_BUG_ON_VMA(end > vma->vm_end, vma);
1570 	mmap_assert_locked(mm);
1571 
1572 	/*
1573 	 * FOLL_TOUCH: Mark page accessed and thereby young; will also mark
1574 	 *	       the page dirty with FOLL_WRITE -- which doesn't make a
1575 	 *	       difference with !FOLL_FORCE, because the page is writable
1576 	 *	       in the page table.
1577 	 * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit
1578 	 *		  a poisoned page.
1579 	 * !FOLL_FORCE: Require proper access permissions.
1580 	 */
1581 	gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE;
1582 	if (write)
1583 		gup_flags |= FOLL_WRITE;
1584 
1585 	/*
1586 	 * We want to report -EINVAL instead of -EFAULT for any permission
1587 	 * problems or incompatible mappings.
1588 	 */
1589 	if (check_vma_flags(vma, gup_flags))
1590 		return -EINVAL;
1591 
1592 	ret = __get_user_pages(mm, start, nr_pages, gup_flags,
1593 			       NULL, locked);
1594 	lru_add_drain();
1595 	return ret;
1596 }
1597 
1598 /*
1599  * __mm_populate - populate and/or mlock pages within a range of address space.
1600  *
1601  * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1602  * flags. VMAs must be already marked with the desired vm_flags, and
1603  * mmap_lock must not be held.
1604  */
1605 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1606 {
1607 	struct mm_struct *mm = current->mm;
1608 	unsigned long end, nstart, nend;
1609 	struct vm_area_struct *vma = NULL;
1610 	int locked = 0;
1611 	long ret = 0;
1612 
1613 	end = start + len;
1614 
1615 	for (nstart = start; nstart < end; nstart = nend) {
1616 		/*
1617 		 * We want to fault in pages for [nstart; end) address range.
1618 		 * Find first corresponding VMA.
1619 		 */
1620 		if (!locked) {
1621 			locked = 1;
1622 			mmap_read_lock(mm);
1623 			vma = find_vma_intersection(mm, nstart, end);
1624 		} else if (nstart >= vma->vm_end)
1625 			vma = find_vma_intersection(mm, vma->vm_end, end);
1626 
1627 		if (!vma)
1628 			break;
1629 		/*
1630 		 * Set [nstart; nend) to intersection of desired address
1631 		 * range with the first VMA. Also, skip undesirable VMA types.
1632 		 */
1633 		nend = min(end, vma->vm_end);
1634 		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1635 			continue;
1636 		if (nstart < vma->vm_start)
1637 			nstart = vma->vm_start;
1638 		/*
1639 		 * Now fault in a range of pages. populate_vma_page_range()
1640 		 * double checks the vma flags, so that it won't mlock pages
1641 		 * if the vma was already munlocked.
1642 		 */
1643 		ret = populate_vma_page_range(vma, nstart, nend, &locked);
1644 		if (ret < 0) {
1645 			if (ignore_errors) {
1646 				ret = 0;
1647 				continue;	/* continue at next VMA */
1648 			}
1649 			break;
1650 		}
1651 		nend = nstart + ret * PAGE_SIZE;
1652 		ret = 0;
1653 	}
1654 	if (locked)
1655 		mmap_read_unlock(mm);
1656 	return ret;	/* 0 or negative error code */
1657 }
1658 #else /* CONFIG_MMU */
1659 static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
1660 		unsigned long nr_pages, struct page **pages,
1661 		int *locked, unsigned int foll_flags)
1662 {
1663 	struct vm_area_struct *vma;
1664 	bool must_unlock = false;
1665 	unsigned long vm_flags;
1666 	long i;
1667 
1668 	if (!nr_pages)
1669 		return 0;
1670 
1671 	/*
1672 	 * The internal caller expects GUP to manage the lock internally and the
1673 	 * lock must be released when this returns.
1674 	 */
1675 	if (!*locked) {
1676 		if (mmap_read_lock_killable(mm))
1677 			return -EAGAIN;
1678 		must_unlock = true;
1679 		*locked = 1;
1680 	}
1681 
1682 	/* calculate required read or write permissions.
1683 	 * If FOLL_FORCE is set, we only require the "MAY" flags.
1684 	 */
1685 	vm_flags  = (foll_flags & FOLL_WRITE) ?
1686 			(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1687 	vm_flags &= (foll_flags & FOLL_FORCE) ?
1688 			(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1689 
1690 	for (i = 0; i < nr_pages; i++) {
1691 		vma = find_vma(mm, start);
1692 		if (!vma)
1693 			break;
1694 
1695 		/* protect what we can, including chardevs */
1696 		if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1697 		    !(vm_flags & vma->vm_flags))
1698 			break;
1699 
1700 		if (pages) {
1701 			pages[i] = virt_to_page((void *)start);
1702 			if (pages[i])
1703 				get_page(pages[i]);
1704 		}
1705 
1706 		start = (start + PAGE_SIZE) & PAGE_MASK;
1707 	}
1708 
1709 	if (must_unlock && *locked) {
1710 		mmap_read_unlock(mm);
1711 		*locked = 0;
1712 	}
1713 
1714 	return i ? : -EFAULT;
1715 }
1716 #endif /* !CONFIG_MMU */
1717 
1718 /**
1719  * fault_in_writeable - fault in userspace address range for writing
1720  * @uaddr: start of address range
1721  * @size: size of address range
1722  *
1723  * Returns the number of bytes not faulted in (like copy_to_user() and
1724  * copy_from_user()).
1725  */
1726 size_t fault_in_writeable(char __user *uaddr, size_t size)
1727 {
1728 	char __user *start = uaddr, *end;
1729 
1730 	if (unlikely(size == 0))
1731 		return 0;
1732 	if (!user_write_access_begin(uaddr, size))
1733 		return size;
1734 	if (!PAGE_ALIGNED(uaddr)) {
1735 		unsafe_put_user(0, uaddr, out);
1736 		uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr);
1737 	}
1738 	end = (char __user *)PAGE_ALIGN((unsigned long)start + size);
1739 	if (unlikely(end < start))
1740 		end = NULL;
1741 	while (uaddr != end) {
1742 		unsafe_put_user(0, uaddr, out);
1743 		uaddr += PAGE_SIZE;
1744 	}
1745 
1746 out:
1747 	user_write_access_end();
1748 	if (size > uaddr - start)
1749 		return size - (uaddr - start);
1750 	return 0;
1751 }
1752 EXPORT_SYMBOL(fault_in_writeable);
1753 
1754 /**
1755  * fault_in_subpage_writeable - fault in an address range for writing
1756  * @uaddr: start of address range
1757  * @size: size of address range
1758  *
1759  * Fault in a user address range for writing while checking for permissions at
1760  * sub-page granularity (e.g. arm64 MTE). This function should be used when
1761  * the caller cannot guarantee forward progress of a copy_to_user() loop.
1762  *
1763  * Returns the number of bytes not faulted in (like copy_to_user() and
1764  * copy_from_user()).
1765  */
1766 size_t fault_in_subpage_writeable(char __user *uaddr, size_t size)
1767 {
1768 	size_t faulted_in;
1769 
1770 	/*
1771 	 * Attempt faulting in at page granularity first for page table
1772 	 * permission checking. The arch-specific probe_subpage_writeable()
1773 	 * functions may not check for this.
1774 	 */
1775 	faulted_in = size - fault_in_writeable(uaddr, size);
1776 	if (faulted_in)
1777 		faulted_in -= probe_subpage_writeable(uaddr, faulted_in);
1778 
1779 	return size - faulted_in;
1780 }
1781 EXPORT_SYMBOL(fault_in_subpage_writeable);
1782 
1783 /*
1784  * fault_in_safe_writeable - fault in an address range for writing
1785  * @uaddr: start of address range
1786  * @size: length of address range
1787  *
1788  * Faults in an address range for writing.  This is primarily useful when we
1789  * already know that some or all of the pages in the address range aren't in
1790  * memory.
1791  *
1792  * Unlike fault_in_writeable(), this function is non-destructive.
1793  *
1794  * Note that we don't pin or otherwise hold the pages referenced that we fault
1795  * in.  There's no guarantee that they'll stay in memory for any duration of
1796  * time.
1797  *
1798  * Returns the number of bytes not faulted in, like copy_to_user() and
1799  * copy_from_user().
1800  */
1801 size_t fault_in_safe_writeable(const char __user *uaddr, size_t size)
1802 {
1803 	unsigned long start = (unsigned long)uaddr, end;
1804 	struct mm_struct *mm = current->mm;
1805 	bool unlocked = false;
1806 
1807 	if (unlikely(size == 0))
1808 		return 0;
1809 	end = PAGE_ALIGN(start + size);
1810 	if (end < start)
1811 		end = 0;
1812 
1813 	mmap_read_lock(mm);
1814 	do {
1815 		if (fixup_user_fault(mm, start, FAULT_FLAG_WRITE, &unlocked))
1816 			break;
1817 		start = (start + PAGE_SIZE) & PAGE_MASK;
1818 	} while (start != end);
1819 	mmap_read_unlock(mm);
1820 
1821 	if (size > (unsigned long)uaddr - start)
1822 		return size - ((unsigned long)uaddr - start);
1823 	return 0;
1824 }
1825 EXPORT_SYMBOL(fault_in_safe_writeable);
1826 
1827 /**
1828  * fault_in_readable - fault in userspace address range for reading
1829  * @uaddr: start of user address range
1830  * @size: size of user address range
1831  *
1832  * Returns the number of bytes not faulted in (like copy_to_user() and
1833  * copy_from_user()).
1834  */
1835 size_t fault_in_readable(const char __user *uaddr, size_t size)
1836 {
1837 	const char __user *start = uaddr, *end;
1838 	volatile char c;
1839 
1840 	if (unlikely(size == 0))
1841 		return 0;
1842 	if (!user_read_access_begin(uaddr, size))
1843 		return size;
1844 	if (!PAGE_ALIGNED(uaddr)) {
1845 		unsafe_get_user(c, uaddr, out);
1846 		uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr);
1847 	}
1848 	end = (const char __user *)PAGE_ALIGN((unsigned long)start + size);
1849 	if (unlikely(end < start))
1850 		end = NULL;
1851 	while (uaddr != end) {
1852 		unsafe_get_user(c, uaddr, out);
1853 		uaddr += PAGE_SIZE;
1854 	}
1855 
1856 out:
1857 	user_read_access_end();
1858 	(void)c;
1859 	if (size > uaddr - start)
1860 		return size - (uaddr - start);
1861 	return 0;
1862 }
1863 EXPORT_SYMBOL(fault_in_readable);
1864 
1865 /**
1866  * get_dump_page() - pin user page in memory while writing it to core dump
1867  * @addr: user address
1868  *
1869  * Returns struct page pointer of user page pinned for dump,
1870  * to be freed afterwards by put_page().
1871  *
1872  * Returns NULL on any kind of failure - a hole must then be inserted into
1873  * the corefile, to preserve alignment with its headers; and also returns
1874  * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1875  * allowing a hole to be left in the corefile to save disk space.
1876  *
1877  * Called without mmap_lock (takes and releases the mmap_lock by itself).
1878  */
1879 #ifdef CONFIG_ELF_CORE
1880 struct page *get_dump_page(unsigned long addr)
1881 {
1882 	struct page *page;
1883 	int locked = 0;
1884 	int ret;
1885 
1886 	ret = __get_user_pages_locked(current->mm, addr, 1, &page, &locked,
1887 				      FOLL_FORCE | FOLL_DUMP | FOLL_GET);
1888 	return (ret == 1) ? page : NULL;
1889 }
1890 #endif /* CONFIG_ELF_CORE */
1891 
1892 #ifdef CONFIG_MIGRATION
1893 /*
1894  * Returns the number of collected pages. Return value is always >= 0.
1895  */
1896 static unsigned long collect_longterm_unpinnable_pages(
1897 					struct list_head *movable_page_list,
1898 					unsigned long nr_pages,
1899 					struct page **pages)
1900 {
1901 	unsigned long i, collected = 0;
1902 	struct folio *prev_folio = NULL;
1903 	bool drain_allow = true;
1904 
1905 	for (i = 0; i < nr_pages; i++) {
1906 		struct folio *folio = page_folio(pages[i]);
1907 
1908 		if (folio == prev_folio)
1909 			continue;
1910 		prev_folio = folio;
1911 
1912 		if (folio_is_longterm_pinnable(folio))
1913 			continue;
1914 
1915 		collected++;
1916 
1917 		if (folio_is_device_coherent(folio))
1918 			continue;
1919 
1920 		if (folio_test_hugetlb(folio)) {
1921 			isolate_hugetlb(folio, movable_page_list);
1922 			continue;
1923 		}
1924 
1925 		if (!folio_test_lru(folio) && drain_allow) {
1926 			lru_add_drain_all();
1927 			drain_allow = false;
1928 		}
1929 
1930 		if (!folio_isolate_lru(folio))
1931 			continue;
1932 
1933 		list_add_tail(&folio->lru, movable_page_list);
1934 		node_stat_mod_folio(folio,
1935 				    NR_ISOLATED_ANON + folio_is_file_lru(folio),
1936 				    folio_nr_pages(folio));
1937 	}
1938 
1939 	return collected;
1940 }
1941 
1942 /*
1943  * Unpins all pages and migrates device coherent pages and movable_page_list.
1944  * Returns -EAGAIN if all pages were successfully migrated or -errno for failure
1945  * (or partial success).
1946  */
1947 static int migrate_longterm_unpinnable_pages(
1948 					struct list_head *movable_page_list,
1949 					unsigned long nr_pages,
1950 					struct page **pages)
1951 {
1952 	int ret;
1953 	unsigned long i;
1954 
1955 	for (i = 0; i < nr_pages; i++) {
1956 		struct folio *folio = page_folio(pages[i]);
1957 
1958 		if (folio_is_device_coherent(folio)) {
1959 			/*
1960 			 * Migration will fail if the page is pinned, so convert
1961 			 * the pin on the source page to a normal reference.
1962 			 */
1963 			pages[i] = NULL;
1964 			folio_get(folio);
1965 			gup_put_folio(folio, 1, FOLL_PIN);
1966 
1967 			if (migrate_device_coherent_page(&folio->page)) {
1968 				ret = -EBUSY;
1969 				goto err;
1970 			}
1971 
1972 			continue;
1973 		}
1974 
1975 		/*
1976 		 * We can't migrate pages with unexpected references, so drop
1977 		 * the reference obtained by __get_user_pages_locked().
1978 		 * Migrating pages have been added to movable_page_list after
1979 		 * calling folio_isolate_lru() which takes a reference so the
1980 		 * page won't be freed if it's migrating.
1981 		 */
1982 		unpin_user_page(pages[i]);
1983 		pages[i] = NULL;
1984 	}
1985 
1986 	if (!list_empty(movable_page_list)) {
1987 		struct migration_target_control mtc = {
1988 			.nid = NUMA_NO_NODE,
1989 			.gfp_mask = GFP_USER | __GFP_NOWARN,
1990 		};
1991 
1992 		if (migrate_pages(movable_page_list, alloc_migration_target,
1993 				  NULL, (unsigned long)&mtc, MIGRATE_SYNC,
1994 				  MR_LONGTERM_PIN, NULL)) {
1995 			ret = -ENOMEM;
1996 			goto err;
1997 		}
1998 	}
1999 
2000 	putback_movable_pages(movable_page_list);
2001 
2002 	return -EAGAIN;
2003 
2004 err:
2005 	for (i = 0; i < nr_pages; i++)
2006 		if (pages[i])
2007 			unpin_user_page(pages[i]);
2008 	putback_movable_pages(movable_page_list);
2009 
2010 	return ret;
2011 }
2012 
2013 /*
2014  * Check whether all pages are *allowed* to be pinned. Rather confusingly, all
2015  * pages in the range are required to be pinned via FOLL_PIN, before calling
2016  * this routine.
2017  *
2018  * If any pages in the range are not allowed to be pinned, then this routine
2019  * will migrate those pages away, unpin all the pages in the range and return
2020  * -EAGAIN. The caller should re-pin the entire range with FOLL_PIN and then
2021  * call this routine again.
2022  *
2023  * If an error other than -EAGAIN occurs, this indicates a migration failure.
2024  * The caller should give up, and propagate the error back up the call stack.
2025  *
2026  * If everything is OK and all pages in the range are allowed to be pinned, then
2027  * this routine leaves all pages pinned and returns zero for success.
2028  */
2029 static long check_and_migrate_movable_pages(unsigned long nr_pages,
2030 					    struct page **pages)
2031 {
2032 	unsigned long collected;
2033 	LIST_HEAD(movable_page_list);
2034 
2035 	collected = collect_longterm_unpinnable_pages(&movable_page_list,
2036 						nr_pages, pages);
2037 	if (!collected)
2038 		return 0;
2039 
2040 	return migrate_longterm_unpinnable_pages(&movable_page_list, nr_pages,
2041 						pages);
2042 }
2043 #else
2044 static long check_and_migrate_movable_pages(unsigned long nr_pages,
2045 					    struct page **pages)
2046 {
2047 	return 0;
2048 }
2049 #endif /* CONFIG_MIGRATION */
2050 
2051 /*
2052  * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
2053  * allows us to process the FOLL_LONGTERM flag.
2054  */
2055 static long __gup_longterm_locked(struct mm_struct *mm,
2056 				  unsigned long start,
2057 				  unsigned long nr_pages,
2058 				  struct page **pages,
2059 				  int *locked,
2060 				  unsigned int gup_flags)
2061 {
2062 	unsigned int flags;
2063 	long rc, nr_pinned_pages;
2064 
2065 	if (!(gup_flags & FOLL_LONGTERM))
2066 		return __get_user_pages_locked(mm, start, nr_pages, pages,
2067 					       locked, gup_flags);
2068 
2069 	flags = memalloc_pin_save();
2070 	do {
2071 		nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages,
2072 							  pages, locked,
2073 							  gup_flags);
2074 		if (nr_pinned_pages <= 0) {
2075 			rc = nr_pinned_pages;
2076 			break;
2077 		}
2078 
2079 		/* FOLL_LONGTERM implies FOLL_PIN */
2080 		rc = check_and_migrate_movable_pages(nr_pinned_pages, pages);
2081 	} while (rc == -EAGAIN);
2082 	memalloc_pin_restore(flags);
2083 	return rc ? rc : nr_pinned_pages;
2084 }
2085 
2086 /*
2087  * Check that the given flags are valid for the exported gup/pup interface, and
2088  * update them with the required flags that the caller must have set.
2089  */
2090 static bool is_valid_gup_args(struct page **pages, int *locked,
2091 			      unsigned int *gup_flags_p, unsigned int to_set)
2092 {
2093 	unsigned int gup_flags = *gup_flags_p;
2094 
2095 	/*
2096 	 * These flags not allowed to be specified externally to the gup
2097 	 * interfaces:
2098 	 * - FOLL_PIN/FOLL_TRIED/FOLL_FAST_ONLY are internal only
2099 	 * - FOLL_REMOTE is internal only and used on follow_page()
2100 	 * - FOLL_UNLOCKABLE is internal only and used if locked is !NULL
2101 	 */
2102 	if (WARN_ON_ONCE(gup_flags & (FOLL_PIN | FOLL_TRIED | FOLL_UNLOCKABLE |
2103 				      FOLL_REMOTE | FOLL_FAST_ONLY)))
2104 		return false;
2105 
2106 	gup_flags |= to_set;
2107 	if (locked) {
2108 		/* At the external interface locked must be set */
2109 		if (WARN_ON_ONCE(*locked != 1))
2110 			return false;
2111 
2112 		gup_flags |= FOLL_UNLOCKABLE;
2113 	}
2114 
2115 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
2116 	if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) ==
2117 			 (FOLL_PIN | FOLL_GET)))
2118 		return false;
2119 
2120 	/* LONGTERM can only be specified when pinning */
2121 	if (WARN_ON_ONCE(!(gup_flags & FOLL_PIN) && (gup_flags & FOLL_LONGTERM)))
2122 		return false;
2123 
2124 	/* Pages input must be given if using GET/PIN */
2125 	if (WARN_ON_ONCE((gup_flags & (FOLL_GET | FOLL_PIN)) && !pages))
2126 		return false;
2127 
2128 	/* We want to allow the pgmap to be hot-unplugged at all times */
2129 	if (WARN_ON_ONCE((gup_flags & FOLL_LONGTERM) &&
2130 			 (gup_flags & FOLL_PCI_P2PDMA)))
2131 		return false;
2132 
2133 	*gup_flags_p = gup_flags;
2134 	return true;
2135 }
2136 
2137 #ifdef CONFIG_MMU
2138 /**
2139  * get_user_pages_remote() - pin user pages in memory
2140  * @mm:		mm_struct of target mm
2141  * @start:	starting user address
2142  * @nr_pages:	number of pages from start to pin
2143  * @gup_flags:	flags modifying lookup behaviour
2144  * @pages:	array that receives pointers to the pages pinned.
2145  *		Should be at least nr_pages long. Or NULL, if caller
2146  *		only intends to ensure the pages are faulted in.
2147  * @locked:	pointer to lock flag indicating whether lock is held and
2148  *		subsequently whether VM_FAULT_RETRY functionality can be
2149  *		utilised. Lock must initially be held.
2150  *
2151  * Returns either number of pages pinned (which may be less than the
2152  * number requested), or an error. Details about the return value:
2153  *
2154  * -- If nr_pages is 0, returns 0.
2155  * -- If nr_pages is >0, but no pages were pinned, returns -errno.
2156  * -- If nr_pages is >0, and some pages were pinned, returns the number of
2157  *    pages pinned. Again, this may be less than nr_pages.
2158  *
2159  * The caller is responsible for releasing returned @pages, via put_page().
2160  *
2161  * Must be called with mmap_lock held for read or write.
2162  *
2163  * get_user_pages_remote walks a process's page tables and takes a reference
2164  * to each struct page that each user address corresponds to at a given
2165  * instant. That is, it takes the page that would be accessed if a user
2166  * thread accesses the given user virtual address at that instant.
2167  *
2168  * This does not guarantee that the page exists in the user mappings when
2169  * get_user_pages_remote returns, and there may even be a completely different
2170  * page there in some cases (eg. if mmapped pagecache has been invalidated
2171  * and subsequently re-faulted). However it does guarantee that the page
2172  * won't be freed completely. And mostly callers simply care that the page
2173  * contains data that was valid *at some point in time*. Typically, an IO
2174  * or similar operation cannot guarantee anything stronger anyway because
2175  * locks can't be held over the syscall boundary.
2176  *
2177  * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
2178  * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
2179  * be called after the page is finished with, and before put_page is called.
2180  *
2181  * get_user_pages_remote is typically used for fewer-copy IO operations,
2182  * to get a handle on the memory by some means other than accesses
2183  * via the user virtual addresses. The pages may be submitted for
2184  * DMA to devices or accessed via their kernel linear mapping (via the
2185  * kmap APIs). Care should be taken to use the correct cache flushing APIs.
2186  *
2187  * See also get_user_pages_fast, for performance critical applications.
2188  *
2189  * get_user_pages_remote should be phased out in favor of
2190  * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
2191  * should use get_user_pages_remote because it cannot pass
2192  * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
2193  */
2194 long get_user_pages_remote(struct mm_struct *mm,
2195 		unsigned long start, unsigned long nr_pages,
2196 		unsigned int gup_flags, struct page **pages,
2197 		int *locked)
2198 {
2199 	int local_locked = 1;
2200 
2201 	if (!is_valid_gup_args(pages, locked, &gup_flags,
2202 			       FOLL_TOUCH | FOLL_REMOTE))
2203 		return -EINVAL;
2204 
2205 	return __get_user_pages_locked(mm, start, nr_pages, pages,
2206 				       locked ? locked : &local_locked,
2207 				       gup_flags);
2208 }
2209 EXPORT_SYMBOL(get_user_pages_remote);
2210 
2211 #else /* CONFIG_MMU */
2212 long get_user_pages_remote(struct mm_struct *mm,
2213 			   unsigned long start, unsigned long nr_pages,
2214 			   unsigned int gup_flags, struct page **pages,
2215 			   int *locked)
2216 {
2217 	return 0;
2218 }
2219 #endif /* !CONFIG_MMU */
2220 
2221 /**
2222  * get_user_pages() - pin user pages in memory
2223  * @start:      starting user address
2224  * @nr_pages:   number of pages from start to pin
2225  * @gup_flags:  flags modifying lookup behaviour
2226  * @pages:      array that receives pointers to the pages pinned.
2227  *              Should be at least nr_pages long. Or NULL, if caller
2228  *              only intends to ensure the pages are faulted in.
2229  *
2230  * This is the same as get_user_pages_remote(), just with a less-flexible
2231  * calling convention where we assume that the mm being operated on belongs to
2232  * the current task, and doesn't allow passing of a locked parameter.  We also
2233  * obviously don't pass FOLL_REMOTE in here.
2234  */
2235 long get_user_pages(unsigned long start, unsigned long nr_pages,
2236 		    unsigned int gup_flags, struct page **pages)
2237 {
2238 	int locked = 1;
2239 
2240 	if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH))
2241 		return -EINVAL;
2242 
2243 	return __get_user_pages_locked(current->mm, start, nr_pages, pages,
2244 				       &locked, gup_flags);
2245 }
2246 EXPORT_SYMBOL(get_user_pages);
2247 
2248 /*
2249  * get_user_pages_unlocked() is suitable to replace the form:
2250  *
2251  *      mmap_read_lock(mm);
2252  *      get_user_pages(mm, ..., pages, NULL);
2253  *      mmap_read_unlock(mm);
2254  *
2255  *  with:
2256  *
2257  *      get_user_pages_unlocked(mm, ..., pages);
2258  *
2259  * It is functionally equivalent to get_user_pages_fast so
2260  * get_user_pages_fast should be used instead if specific gup_flags
2261  * (e.g. FOLL_FORCE) are not required.
2262  */
2263 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2264 			     struct page **pages, unsigned int gup_flags)
2265 {
2266 	int locked = 0;
2267 
2268 	if (!is_valid_gup_args(pages, NULL, &gup_flags,
2269 			       FOLL_TOUCH | FOLL_UNLOCKABLE))
2270 		return -EINVAL;
2271 
2272 	return __get_user_pages_locked(current->mm, start, nr_pages, pages,
2273 				       &locked, gup_flags);
2274 }
2275 EXPORT_SYMBOL(get_user_pages_unlocked);
2276 
2277 /*
2278  * Fast GUP
2279  *
2280  * get_user_pages_fast attempts to pin user pages by walking the page
2281  * tables directly and avoids taking locks. Thus the walker needs to be
2282  * protected from page table pages being freed from under it, and should
2283  * block any THP splits.
2284  *
2285  * One way to achieve this is to have the walker disable interrupts, and
2286  * rely on IPIs from the TLB flushing code blocking before the page table
2287  * pages are freed. This is unsuitable for architectures that do not need
2288  * to broadcast an IPI when invalidating TLBs.
2289  *
2290  * Another way to achieve this is to batch up page table containing pages
2291  * belonging to more than one mm_user, then rcu_sched a callback to free those
2292  * pages. Disabling interrupts will allow the fast_gup walker to both block
2293  * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
2294  * (which is a relatively rare event). The code below adopts this strategy.
2295  *
2296  * Before activating this code, please be aware that the following assumptions
2297  * are currently made:
2298  *
2299  *  *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
2300  *  free pages containing page tables or TLB flushing requires IPI broadcast.
2301  *
2302  *  *) ptes can be read atomically by the architecture.
2303  *
2304  *  *) access_ok is sufficient to validate userspace address ranges.
2305  *
2306  * The last two assumptions can be relaxed by the addition of helper functions.
2307  *
2308  * This code is based heavily on the PowerPC implementation by Nick Piggin.
2309  */
2310 #ifdef CONFIG_HAVE_FAST_GUP
2311 
2312 static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
2313 					    unsigned int flags,
2314 					    struct page **pages)
2315 {
2316 	while ((*nr) - nr_start) {
2317 		struct page *page = pages[--(*nr)];
2318 
2319 		ClearPageReferenced(page);
2320 		if (flags & FOLL_PIN)
2321 			unpin_user_page(page);
2322 		else
2323 			put_page(page);
2324 	}
2325 }
2326 
2327 #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
2328 /*
2329  * Fast-gup relies on pte change detection to avoid concurrent pgtable
2330  * operations.
2331  *
2332  * To pin the page, fast-gup needs to do below in order:
2333  * (1) pin the page (by prefetching pte), then (2) check pte not changed.
2334  *
2335  * For the rest of pgtable operations where pgtable updates can be racy
2336  * with fast-gup, we need to do (1) clear pte, then (2) check whether page
2337  * is pinned.
2338  *
2339  * Above will work for all pte-level operations, including THP split.
2340  *
2341  * For THP collapse, it's a bit more complicated because fast-gup may be
2342  * walking a pgtable page that is being freed (pte is still valid but pmd
2343  * can be cleared already).  To avoid race in such condition, we need to
2344  * also check pmd here to make sure pmd doesn't change (corresponds to
2345  * pmdp_collapse_flush() in the THP collapse code path).
2346  */
2347 static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
2348 			 unsigned long end, unsigned int flags,
2349 			 struct page **pages, int *nr)
2350 {
2351 	struct dev_pagemap *pgmap = NULL;
2352 	int nr_start = *nr, ret = 0;
2353 	pte_t *ptep, *ptem;
2354 
2355 	ptem = ptep = pte_offset_map(&pmd, addr);
2356 	do {
2357 		pte_t pte = ptep_get_lockless(ptep);
2358 		struct page *page;
2359 		struct folio *folio;
2360 
2361 		if (pte_protnone(pte) && !gup_can_follow_protnone(flags))
2362 			goto pte_unmap;
2363 
2364 		if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2365 			goto pte_unmap;
2366 
2367 		if (pte_devmap(pte)) {
2368 			if (unlikely(flags & FOLL_LONGTERM))
2369 				goto pte_unmap;
2370 
2371 			pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
2372 			if (unlikely(!pgmap)) {
2373 				undo_dev_pagemap(nr, nr_start, flags, pages);
2374 				goto pte_unmap;
2375 			}
2376 		} else if (pte_special(pte))
2377 			goto pte_unmap;
2378 
2379 		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2380 		page = pte_page(pte);
2381 
2382 		folio = try_grab_folio(page, 1, flags);
2383 		if (!folio)
2384 			goto pte_unmap;
2385 
2386 		if (unlikely(page_is_secretmem(page))) {
2387 			gup_put_folio(folio, 1, flags);
2388 			goto pte_unmap;
2389 		}
2390 
2391 		if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) ||
2392 		    unlikely(pte_val(pte) != pte_val(*ptep))) {
2393 			gup_put_folio(folio, 1, flags);
2394 			goto pte_unmap;
2395 		}
2396 
2397 		if (!pte_write(pte) && gup_must_unshare(NULL, flags, page)) {
2398 			gup_put_folio(folio, 1, flags);
2399 			goto pte_unmap;
2400 		}
2401 
2402 		/*
2403 		 * We need to make the page accessible if and only if we are
2404 		 * going to access its content (the FOLL_PIN case).  Please
2405 		 * see Documentation/core-api/pin_user_pages.rst for
2406 		 * details.
2407 		 */
2408 		if (flags & FOLL_PIN) {
2409 			ret = arch_make_page_accessible(page);
2410 			if (ret) {
2411 				gup_put_folio(folio, 1, flags);
2412 				goto pte_unmap;
2413 			}
2414 		}
2415 		folio_set_referenced(folio);
2416 		pages[*nr] = page;
2417 		(*nr)++;
2418 	} while (ptep++, addr += PAGE_SIZE, addr != end);
2419 
2420 	ret = 1;
2421 
2422 pte_unmap:
2423 	if (pgmap)
2424 		put_dev_pagemap(pgmap);
2425 	pte_unmap(ptem);
2426 	return ret;
2427 }
2428 #else
2429 
2430 /*
2431  * If we can't determine whether or not a pte is special, then fail immediately
2432  * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
2433  * to be special.
2434  *
2435  * For a futex to be placed on a THP tail page, get_futex_key requires a
2436  * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2437  * useful to have gup_huge_pmd even if we can't operate on ptes.
2438  */
2439 static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
2440 			 unsigned long end, unsigned int flags,
2441 			 struct page **pages, int *nr)
2442 {
2443 	return 0;
2444 }
2445 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
2446 
2447 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
2448 static int __gup_device_huge(unsigned long pfn, unsigned long addr,
2449 			     unsigned long end, unsigned int flags,
2450 			     struct page **pages, int *nr)
2451 {
2452 	int nr_start = *nr;
2453 	struct dev_pagemap *pgmap = NULL;
2454 
2455 	do {
2456 		struct page *page = pfn_to_page(pfn);
2457 
2458 		pgmap = get_dev_pagemap(pfn, pgmap);
2459 		if (unlikely(!pgmap)) {
2460 			undo_dev_pagemap(nr, nr_start, flags, pages);
2461 			break;
2462 		}
2463 
2464 		if (!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)) {
2465 			undo_dev_pagemap(nr, nr_start, flags, pages);
2466 			break;
2467 		}
2468 
2469 		SetPageReferenced(page);
2470 		pages[*nr] = page;
2471 		if (unlikely(try_grab_page(page, flags))) {
2472 			undo_dev_pagemap(nr, nr_start, flags, pages);
2473 			break;
2474 		}
2475 		(*nr)++;
2476 		pfn++;
2477 	} while (addr += PAGE_SIZE, addr != end);
2478 
2479 	put_dev_pagemap(pgmap);
2480 	return addr == end;
2481 }
2482 
2483 static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2484 				 unsigned long end, unsigned int flags,
2485 				 struct page **pages, int *nr)
2486 {
2487 	unsigned long fault_pfn;
2488 	int nr_start = *nr;
2489 
2490 	fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
2491 	if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2492 		return 0;
2493 
2494 	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
2495 		undo_dev_pagemap(nr, nr_start, flags, pages);
2496 		return 0;
2497 	}
2498 	return 1;
2499 }
2500 
2501 static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
2502 				 unsigned long end, unsigned int flags,
2503 				 struct page **pages, int *nr)
2504 {
2505 	unsigned long fault_pfn;
2506 	int nr_start = *nr;
2507 
2508 	fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
2509 	if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2510 		return 0;
2511 
2512 	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
2513 		undo_dev_pagemap(nr, nr_start, flags, pages);
2514 		return 0;
2515 	}
2516 	return 1;
2517 }
2518 #else
2519 static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2520 				 unsigned long end, unsigned int flags,
2521 				 struct page **pages, int *nr)
2522 {
2523 	BUILD_BUG();
2524 	return 0;
2525 }
2526 
2527 static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
2528 				 unsigned long end, unsigned int flags,
2529 				 struct page **pages, int *nr)
2530 {
2531 	BUILD_BUG();
2532 	return 0;
2533 }
2534 #endif
2535 
2536 static int record_subpages(struct page *page, unsigned long addr,
2537 			   unsigned long end, struct page **pages)
2538 {
2539 	int nr;
2540 
2541 	for (nr = 0; addr != end; nr++, addr += PAGE_SIZE)
2542 		pages[nr] = nth_page(page, nr);
2543 
2544 	return nr;
2545 }
2546 
2547 #ifdef CONFIG_ARCH_HAS_HUGEPD
2548 static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
2549 				      unsigned long sz)
2550 {
2551 	unsigned long __boundary = (addr + sz) & ~(sz-1);
2552 	return (__boundary - 1 < end - 1) ? __boundary : end;
2553 }
2554 
2555 static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
2556 		       unsigned long end, unsigned int flags,
2557 		       struct page **pages, int *nr)
2558 {
2559 	unsigned long pte_end;
2560 	struct page *page;
2561 	struct folio *folio;
2562 	pte_t pte;
2563 	int refs;
2564 
2565 	pte_end = (addr + sz) & ~(sz-1);
2566 	if (pte_end < end)
2567 		end = pte_end;
2568 
2569 	pte = huge_ptep_get(ptep);
2570 
2571 	if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2572 		return 0;
2573 
2574 	/* hugepages are never "special" */
2575 	VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2576 
2577 	page = nth_page(pte_page(pte), (addr & (sz - 1)) >> PAGE_SHIFT);
2578 	refs = record_subpages(page, addr, end, pages + *nr);
2579 
2580 	folio = try_grab_folio(page, refs, flags);
2581 	if (!folio)
2582 		return 0;
2583 
2584 	if (unlikely(pte_val(pte) != pte_val(*ptep))) {
2585 		gup_put_folio(folio, refs, flags);
2586 		return 0;
2587 	}
2588 
2589 	if (!pte_write(pte) && gup_must_unshare(NULL, flags, &folio->page)) {
2590 		gup_put_folio(folio, refs, flags);
2591 		return 0;
2592 	}
2593 
2594 	*nr += refs;
2595 	folio_set_referenced(folio);
2596 	return 1;
2597 }
2598 
2599 static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
2600 		unsigned int pdshift, unsigned long end, unsigned int flags,
2601 		struct page **pages, int *nr)
2602 {
2603 	pte_t *ptep;
2604 	unsigned long sz = 1UL << hugepd_shift(hugepd);
2605 	unsigned long next;
2606 
2607 	ptep = hugepte_offset(hugepd, addr, pdshift);
2608 	do {
2609 		next = hugepte_addr_end(addr, end, sz);
2610 		if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
2611 			return 0;
2612 	} while (ptep++, addr = next, addr != end);
2613 
2614 	return 1;
2615 }
2616 #else
2617 static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
2618 		unsigned int pdshift, unsigned long end, unsigned int flags,
2619 		struct page **pages, int *nr)
2620 {
2621 	return 0;
2622 }
2623 #endif /* CONFIG_ARCH_HAS_HUGEPD */
2624 
2625 static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2626 			unsigned long end, unsigned int flags,
2627 			struct page **pages, int *nr)
2628 {
2629 	struct page *page;
2630 	struct folio *folio;
2631 	int refs;
2632 
2633 	if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
2634 		return 0;
2635 
2636 	if (pmd_devmap(orig)) {
2637 		if (unlikely(flags & FOLL_LONGTERM))
2638 			return 0;
2639 		return __gup_device_huge_pmd(orig, pmdp, addr, end, flags,
2640 					     pages, nr);
2641 	}
2642 
2643 	page = nth_page(pmd_page(orig), (addr & ~PMD_MASK) >> PAGE_SHIFT);
2644 	refs = record_subpages(page, addr, end, pages + *nr);
2645 
2646 	folio = try_grab_folio(page, refs, flags);
2647 	if (!folio)
2648 		return 0;
2649 
2650 	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
2651 		gup_put_folio(folio, refs, flags);
2652 		return 0;
2653 	}
2654 
2655 	if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
2656 		gup_put_folio(folio, refs, flags);
2657 		return 0;
2658 	}
2659 
2660 	*nr += refs;
2661 	folio_set_referenced(folio);
2662 	return 1;
2663 }
2664 
2665 static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
2666 			unsigned long end, unsigned int flags,
2667 			struct page **pages, int *nr)
2668 {
2669 	struct page *page;
2670 	struct folio *folio;
2671 	int refs;
2672 
2673 	if (!pud_access_permitted(orig, flags & FOLL_WRITE))
2674 		return 0;
2675 
2676 	if (pud_devmap(orig)) {
2677 		if (unlikely(flags & FOLL_LONGTERM))
2678 			return 0;
2679 		return __gup_device_huge_pud(orig, pudp, addr, end, flags,
2680 					     pages, nr);
2681 	}
2682 
2683 	page = nth_page(pud_page(orig), (addr & ~PUD_MASK) >> PAGE_SHIFT);
2684 	refs = record_subpages(page, addr, end, pages + *nr);
2685 
2686 	folio = try_grab_folio(page, refs, flags);
2687 	if (!folio)
2688 		return 0;
2689 
2690 	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
2691 		gup_put_folio(folio, refs, flags);
2692 		return 0;
2693 	}
2694 
2695 	if (!pud_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
2696 		gup_put_folio(folio, refs, flags);
2697 		return 0;
2698 	}
2699 
2700 	*nr += refs;
2701 	folio_set_referenced(folio);
2702 	return 1;
2703 }
2704 
2705 static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
2706 			unsigned long end, unsigned int flags,
2707 			struct page **pages, int *nr)
2708 {
2709 	int refs;
2710 	struct page *page;
2711 	struct folio *folio;
2712 
2713 	if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
2714 		return 0;
2715 
2716 	BUILD_BUG_ON(pgd_devmap(orig));
2717 
2718 	page = nth_page(pgd_page(orig), (addr & ~PGDIR_MASK) >> PAGE_SHIFT);
2719 	refs = record_subpages(page, addr, end, pages + *nr);
2720 
2721 	folio = try_grab_folio(page, refs, flags);
2722 	if (!folio)
2723 		return 0;
2724 
2725 	if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
2726 		gup_put_folio(folio, refs, flags);
2727 		return 0;
2728 	}
2729 
2730 	if (!pgd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
2731 		gup_put_folio(folio, refs, flags);
2732 		return 0;
2733 	}
2734 
2735 	*nr += refs;
2736 	folio_set_referenced(folio);
2737 	return 1;
2738 }
2739 
2740 static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end,
2741 		unsigned int flags, struct page **pages, int *nr)
2742 {
2743 	unsigned long next;
2744 	pmd_t *pmdp;
2745 
2746 	pmdp = pmd_offset_lockless(pudp, pud, addr);
2747 	do {
2748 		pmd_t pmd = pmdp_get_lockless(pmdp);
2749 
2750 		next = pmd_addr_end(addr, end);
2751 		if (!pmd_present(pmd))
2752 			return 0;
2753 
2754 		if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
2755 			     pmd_devmap(pmd))) {
2756 			if (pmd_protnone(pmd) &&
2757 			    !gup_can_follow_protnone(flags))
2758 				return 0;
2759 
2760 			if (!gup_huge_pmd(pmd, pmdp, addr, next, flags,
2761 				pages, nr))
2762 				return 0;
2763 
2764 		} else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
2765 			/*
2766 			 * architecture have different format for hugetlbfs
2767 			 * pmd format and THP pmd format
2768 			 */
2769 			if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
2770 					 PMD_SHIFT, next, flags, pages, nr))
2771 				return 0;
2772 		} else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr))
2773 			return 0;
2774 	} while (pmdp++, addr = next, addr != end);
2775 
2776 	return 1;
2777 }
2778 
2779 static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end,
2780 			 unsigned int flags, struct page **pages, int *nr)
2781 {
2782 	unsigned long next;
2783 	pud_t *pudp;
2784 
2785 	pudp = pud_offset_lockless(p4dp, p4d, addr);
2786 	do {
2787 		pud_t pud = READ_ONCE(*pudp);
2788 
2789 		next = pud_addr_end(addr, end);
2790 		if (unlikely(!pud_present(pud)))
2791 			return 0;
2792 		if (unlikely(pud_huge(pud) || pud_devmap(pud))) {
2793 			if (!gup_huge_pud(pud, pudp, addr, next, flags,
2794 					  pages, nr))
2795 				return 0;
2796 		} else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
2797 			if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
2798 					 PUD_SHIFT, next, flags, pages, nr))
2799 				return 0;
2800 		} else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr))
2801 			return 0;
2802 	} while (pudp++, addr = next, addr != end);
2803 
2804 	return 1;
2805 }
2806 
2807 static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end,
2808 			 unsigned int flags, struct page **pages, int *nr)
2809 {
2810 	unsigned long next;
2811 	p4d_t *p4dp;
2812 
2813 	p4dp = p4d_offset_lockless(pgdp, pgd, addr);
2814 	do {
2815 		p4d_t p4d = READ_ONCE(*p4dp);
2816 
2817 		next = p4d_addr_end(addr, end);
2818 		if (p4d_none(p4d))
2819 			return 0;
2820 		BUILD_BUG_ON(p4d_huge(p4d));
2821 		if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
2822 			if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
2823 					 P4D_SHIFT, next, flags, pages, nr))
2824 				return 0;
2825 		} else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr))
2826 			return 0;
2827 	} while (p4dp++, addr = next, addr != end);
2828 
2829 	return 1;
2830 }
2831 
2832 static void gup_pgd_range(unsigned long addr, unsigned long end,
2833 		unsigned int flags, struct page **pages, int *nr)
2834 {
2835 	unsigned long next;
2836 	pgd_t *pgdp;
2837 
2838 	pgdp = pgd_offset(current->mm, addr);
2839 	do {
2840 		pgd_t pgd = READ_ONCE(*pgdp);
2841 
2842 		next = pgd_addr_end(addr, end);
2843 		if (pgd_none(pgd))
2844 			return;
2845 		if (unlikely(pgd_huge(pgd))) {
2846 			if (!gup_huge_pgd(pgd, pgdp, addr, next, flags,
2847 					  pages, nr))
2848 				return;
2849 		} else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
2850 			if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
2851 					 PGDIR_SHIFT, next, flags, pages, nr))
2852 				return;
2853 		} else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr))
2854 			return;
2855 	} while (pgdp++, addr = next, addr != end);
2856 }
2857 #else
2858 static inline void gup_pgd_range(unsigned long addr, unsigned long end,
2859 		unsigned int flags, struct page **pages, int *nr)
2860 {
2861 }
2862 #endif /* CONFIG_HAVE_FAST_GUP */
2863 
2864 #ifndef gup_fast_permitted
2865 /*
2866  * Check if it's allowed to use get_user_pages_fast_only() for the range, or
2867  * we need to fall back to the slow version:
2868  */
2869 static bool gup_fast_permitted(unsigned long start, unsigned long end)
2870 {
2871 	return true;
2872 }
2873 #endif
2874 
2875 static unsigned long lockless_pages_from_mm(unsigned long start,
2876 					    unsigned long end,
2877 					    unsigned int gup_flags,
2878 					    struct page **pages)
2879 {
2880 	unsigned long flags;
2881 	int nr_pinned = 0;
2882 	unsigned seq;
2883 
2884 	if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) ||
2885 	    !gup_fast_permitted(start, end))
2886 		return 0;
2887 
2888 	if (gup_flags & FOLL_PIN) {
2889 		seq = raw_read_seqcount(&current->mm->write_protect_seq);
2890 		if (seq & 1)
2891 			return 0;
2892 	}
2893 
2894 	/*
2895 	 * Disable interrupts. The nested form is used, in order to allow full,
2896 	 * general purpose use of this routine.
2897 	 *
2898 	 * With interrupts disabled, we block page table pages from being freed
2899 	 * from under us. See struct mmu_table_batch comments in
2900 	 * include/asm-generic/tlb.h for more details.
2901 	 *
2902 	 * We do not adopt an rcu_read_lock() here as we also want to block IPIs
2903 	 * that come from THPs splitting.
2904 	 */
2905 	local_irq_save(flags);
2906 	gup_pgd_range(start, end, gup_flags, pages, &nr_pinned);
2907 	local_irq_restore(flags);
2908 
2909 	/*
2910 	 * When pinning pages for DMA there could be a concurrent write protect
2911 	 * from fork() via copy_page_range(), in this case always fail fast GUP.
2912 	 */
2913 	if (gup_flags & FOLL_PIN) {
2914 		if (read_seqcount_retry(&current->mm->write_protect_seq, seq)) {
2915 			unpin_user_pages_lockless(pages, nr_pinned);
2916 			return 0;
2917 		} else {
2918 			sanity_check_pinned_pages(pages, nr_pinned);
2919 		}
2920 	}
2921 	return nr_pinned;
2922 }
2923 
2924 static int internal_get_user_pages_fast(unsigned long start,
2925 					unsigned long nr_pages,
2926 					unsigned int gup_flags,
2927 					struct page **pages)
2928 {
2929 	unsigned long len, end;
2930 	unsigned long nr_pinned;
2931 	int locked = 0;
2932 	int ret;
2933 
2934 	if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
2935 				       FOLL_FORCE | FOLL_PIN | FOLL_GET |
2936 				       FOLL_FAST_ONLY | FOLL_NOFAULT |
2937 				       FOLL_PCI_P2PDMA)))
2938 		return -EINVAL;
2939 
2940 	if (gup_flags & FOLL_PIN)
2941 		mm_set_has_pinned_flag(&current->mm->flags);
2942 
2943 	if (!(gup_flags & FOLL_FAST_ONLY))
2944 		might_lock_read(&current->mm->mmap_lock);
2945 
2946 	start = untagged_addr(start) & PAGE_MASK;
2947 	len = nr_pages << PAGE_SHIFT;
2948 	if (check_add_overflow(start, len, &end))
2949 		return 0;
2950 	if (end > TASK_SIZE_MAX)
2951 		return -EFAULT;
2952 	if (unlikely(!access_ok((void __user *)start, len)))
2953 		return -EFAULT;
2954 
2955 	nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages);
2956 	if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY)
2957 		return nr_pinned;
2958 
2959 	/* Slow path: try to get the remaining pages with get_user_pages */
2960 	start += nr_pinned << PAGE_SHIFT;
2961 	pages += nr_pinned;
2962 	ret = __gup_longterm_locked(current->mm, start, nr_pages - nr_pinned,
2963 				    pages, &locked,
2964 				    gup_flags | FOLL_TOUCH | FOLL_UNLOCKABLE);
2965 	if (ret < 0) {
2966 		/*
2967 		 * The caller has to unpin the pages we already pinned so
2968 		 * returning -errno is not an option
2969 		 */
2970 		if (nr_pinned)
2971 			return nr_pinned;
2972 		return ret;
2973 	}
2974 	return ret + nr_pinned;
2975 }
2976 
2977 /**
2978  * get_user_pages_fast_only() - pin user pages in memory
2979  * @start:      starting user address
2980  * @nr_pages:   number of pages from start to pin
2981  * @gup_flags:  flags modifying pin behaviour
2982  * @pages:      array that receives pointers to the pages pinned.
2983  *              Should be at least nr_pages long.
2984  *
2985  * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
2986  * the regular GUP.
2987  *
2988  * If the architecture does not support this function, simply return with no
2989  * pages pinned.
2990  *
2991  * Careful, careful! COW breaking can go either way, so a non-write
2992  * access can get ambiguous page results. If you call this function without
2993  * 'write' set, you'd better be sure that you're ok with that ambiguity.
2994  */
2995 int get_user_pages_fast_only(unsigned long start, int nr_pages,
2996 			     unsigned int gup_flags, struct page **pages)
2997 {
2998 	/*
2999 	 * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
3000 	 * because gup fast is always a "pin with a +1 page refcount" request.
3001 	 *
3002 	 * FOLL_FAST_ONLY is required in order to match the API description of
3003 	 * this routine: no fall back to regular ("slow") GUP.
3004 	 */
3005 	if (!is_valid_gup_args(pages, NULL, &gup_flags,
3006 			       FOLL_GET | FOLL_FAST_ONLY))
3007 		return -EINVAL;
3008 
3009 	return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
3010 }
3011 EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
3012 
3013 /**
3014  * get_user_pages_fast() - pin user pages in memory
3015  * @start:      starting user address
3016  * @nr_pages:   number of pages from start to pin
3017  * @gup_flags:  flags modifying pin behaviour
3018  * @pages:      array that receives pointers to the pages pinned.
3019  *              Should be at least nr_pages long.
3020  *
3021  * Attempt to pin user pages in memory without taking mm->mmap_lock.
3022  * If not successful, it will fall back to taking the lock and
3023  * calling get_user_pages().
3024  *
3025  * Returns number of pages pinned. This may be fewer than the number requested.
3026  * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
3027  * -errno.
3028  */
3029 int get_user_pages_fast(unsigned long start, int nr_pages,
3030 			unsigned int gup_flags, struct page **pages)
3031 {
3032 	/*
3033 	 * The caller may or may not have explicitly set FOLL_GET; either way is
3034 	 * OK. However, internally (within mm/gup.c), gup fast variants must set
3035 	 * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
3036 	 * request.
3037 	 */
3038 	if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET))
3039 		return -EINVAL;
3040 	return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
3041 }
3042 EXPORT_SYMBOL_GPL(get_user_pages_fast);
3043 
3044 /**
3045  * pin_user_pages_fast() - pin user pages in memory without taking locks
3046  *
3047  * @start:      starting user address
3048  * @nr_pages:   number of pages from start to pin
3049  * @gup_flags:  flags modifying pin behaviour
3050  * @pages:      array that receives pointers to the pages pinned.
3051  *              Should be at least nr_pages long.
3052  *
3053  * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
3054  * get_user_pages_fast() for documentation on the function arguments, because
3055  * the arguments here are identical.
3056  *
3057  * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3058  * see Documentation/core-api/pin_user_pages.rst for further details.
3059  */
3060 int pin_user_pages_fast(unsigned long start, int nr_pages,
3061 			unsigned int gup_flags, struct page **pages)
3062 {
3063 	if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN))
3064 		return -EINVAL;
3065 	return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
3066 }
3067 EXPORT_SYMBOL_GPL(pin_user_pages_fast);
3068 
3069 /**
3070  * pin_user_pages_remote() - pin pages of a remote process
3071  *
3072  * @mm:		mm_struct of target mm
3073  * @start:	starting user address
3074  * @nr_pages:	number of pages from start to pin
3075  * @gup_flags:	flags modifying lookup behaviour
3076  * @pages:	array that receives pointers to the pages pinned.
3077  *		Should be at least nr_pages long.
3078  * @locked:	pointer to lock flag indicating whether lock is held and
3079  *		subsequently whether VM_FAULT_RETRY functionality can be
3080  *		utilised. Lock must initially be held.
3081  *
3082  * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
3083  * get_user_pages_remote() for documentation on the function arguments, because
3084  * the arguments here are identical.
3085  *
3086  * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3087  * see Documentation/core-api/pin_user_pages.rst for details.
3088  */
3089 long pin_user_pages_remote(struct mm_struct *mm,
3090 			   unsigned long start, unsigned long nr_pages,
3091 			   unsigned int gup_flags, struct page **pages,
3092 			   int *locked)
3093 {
3094 	int local_locked = 1;
3095 
3096 	if (!is_valid_gup_args(pages, locked, &gup_flags,
3097 			       FOLL_PIN | FOLL_TOUCH | FOLL_REMOTE))
3098 		return 0;
3099 	return __gup_longterm_locked(mm, start, nr_pages, pages,
3100 				     locked ? locked : &local_locked,
3101 				     gup_flags);
3102 }
3103 EXPORT_SYMBOL(pin_user_pages_remote);
3104 
3105 /**
3106  * pin_user_pages() - pin user pages in memory for use by other devices
3107  *
3108  * @start:	starting user address
3109  * @nr_pages:	number of pages from start to pin
3110  * @gup_flags:	flags modifying lookup behaviour
3111  * @pages:	array that receives pointers to the pages pinned.
3112  *		Should be at least nr_pages long.
3113  *
3114  * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
3115  * FOLL_PIN is set.
3116  *
3117  * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3118  * see Documentation/core-api/pin_user_pages.rst for details.
3119  */
3120 long pin_user_pages(unsigned long start, unsigned long nr_pages,
3121 		    unsigned int gup_flags, struct page **pages)
3122 {
3123 	int locked = 1;
3124 
3125 	if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN))
3126 		return 0;
3127 	return __gup_longterm_locked(current->mm, start, nr_pages,
3128 				     pages, &locked, gup_flags);
3129 }
3130 EXPORT_SYMBOL(pin_user_pages);
3131 
3132 /*
3133  * pin_user_pages_unlocked() is the FOLL_PIN variant of
3134  * get_user_pages_unlocked(). Behavior is the same, except that this one sets
3135  * FOLL_PIN and rejects FOLL_GET.
3136  */
3137 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
3138 			     struct page **pages, unsigned int gup_flags)
3139 {
3140 	int locked = 0;
3141 
3142 	if (!is_valid_gup_args(pages, NULL, &gup_flags,
3143 			       FOLL_PIN | FOLL_TOUCH | FOLL_UNLOCKABLE))
3144 		return 0;
3145 
3146 	return __gup_longterm_locked(current->mm, start, nr_pages, pages,
3147 				     &locked, gup_flags);
3148 }
3149 EXPORT_SYMBOL(pin_user_pages_unlocked);
3150