xref: /linux/mm/gup.c (revision 94e48d6aafef23143f92eadd010c505c49487576)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/err.h>
5 #include <linux/spinlock.h>
6 
7 #include <linux/mm.h>
8 #include <linux/memremap.h>
9 #include <linux/pagemap.h>
10 #include <linux/rmap.h>
11 #include <linux/swap.h>
12 #include <linux/swapops.h>
13 
14 #include <linux/sched/signal.h>
15 #include <linux/rwsem.h>
16 #include <linux/hugetlb.h>
17 #include <linux/migrate.h>
18 #include <linux/mm_inline.h>
19 #include <linux/sched/mm.h>
20 
21 #include <asm/mmu_context.h>
22 #include <asm/tlbflush.h>
23 
24 #include "internal.h"
25 
26 struct follow_page_context {
27 	struct dev_pagemap *pgmap;
28 	unsigned int page_mask;
29 };
30 
31 static void hpage_pincount_add(struct page *page, int refs)
32 {
33 	VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
34 	VM_BUG_ON_PAGE(page != compound_head(page), page);
35 
36 	atomic_add(refs, compound_pincount_ptr(page));
37 }
38 
39 static void hpage_pincount_sub(struct page *page, int refs)
40 {
41 	VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
42 	VM_BUG_ON_PAGE(page != compound_head(page), page);
43 
44 	atomic_sub(refs, compound_pincount_ptr(page));
45 }
46 
47 /* Equivalent to calling put_page() @refs times. */
48 static void put_page_refs(struct page *page, int refs)
49 {
50 #ifdef CONFIG_DEBUG_VM
51 	if (VM_WARN_ON_ONCE_PAGE(page_ref_count(page) < refs, page))
52 		return;
53 #endif
54 
55 	/*
56 	 * Calling put_page() for each ref is unnecessarily slow. Only the last
57 	 * ref needs a put_page().
58 	 */
59 	if (refs > 1)
60 		page_ref_sub(page, refs - 1);
61 	put_page(page);
62 }
63 
64 /*
65  * Return the compound head page with ref appropriately incremented,
66  * or NULL if that failed.
67  */
68 static inline struct page *try_get_compound_head(struct page *page, int refs)
69 {
70 	struct page *head = compound_head(page);
71 
72 	if (WARN_ON_ONCE(page_ref_count(head) < 0))
73 		return NULL;
74 	if (unlikely(!page_cache_add_speculative(head, refs)))
75 		return NULL;
76 
77 	/*
78 	 * At this point we have a stable reference to the head page; but it
79 	 * could be that between the compound_head() lookup and the refcount
80 	 * increment, the compound page was split, in which case we'd end up
81 	 * holding a reference on a page that has nothing to do with the page
82 	 * we were given anymore.
83 	 * So now that the head page is stable, recheck that the pages still
84 	 * belong together.
85 	 */
86 	if (unlikely(compound_head(page) != head)) {
87 		put_page_refs(head, refs);
88 		return NULL;
89 	}
90 
91 	return head;
92 }
93 
94 /*
95  * try_grab_compound_head() - attempt to elevate a page's refcount, by a
96  * flags-dependent amount.
97  *
98  * "grab" names in this file mean, "look at flags to decide whether to use
99  * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
100  *
101  * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
102  * same time. (That's true throughout the get_user_pages*() and
103  * pin_user_pages*() APIs.) Cases:
104  *
105  *    FOLL_GET: page's refcount will be incremented by 1.
106  *    FOLL_PIN: page's refcount will be incremented by GUP_PIN_COUNTING_BIAS.
107  *
108  * Return: head page (with refcount appropriately incremented) for success, or
109  * NULL upon failure. If neither FOLL_GET nor FOLL_PIN was set, that's
110  * considered failure, and furthermore, a likely bug in the caller, so a warning
111  * is also emitted.
112  */
113 __maybe_unused struct page *try_grab_compound_head(struct page *page,
114 						   int refs, unsigned int flags)
115 {
116 	if (flags & FOLL_GET)
117 		return try_get_compound_head(page, refs);
118 	else if (flags & FOLL_PIN) {
119 		int orig_refs = refs;
120 
121 		/*
122 		 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
123 		 * right zone, so fail and let the caller fall back to the slow
124 		 * path.
125 		 */
126 		if (unlikely((flags & FOLL_LONGTERM) &&
127 			     !is_pinnable_page(page)))
128 			return NULL;
129 
130 		/*
131 		 * CAUTION: Don't use compound_head() on the page before this
132 		 * point, the result won't be stable.
133 		 */
134 		page = try_get_compound_head(page, refs);
135 		if (!page)
136 			return NULL;
137 
138 		/*
139 		 * When pinning a compound page of order > 1 (which is what
140 		 * hpage_pincount_available() checks for), use an exact count to
141 		 * track it, via hpage_pincount_add/_sub().
142 		 *
143 		 * However, be sure to *also* increment the normal page refcount
144 		 * field at least once, so that the page really is pinned.
145 		 */
146 		if (hpage_pincount_available(page))
147 			hpage_pincount_add(page, refs);
148 		else
149 			page_ref_add(page, refs * (GUP_PIN_COUNTING_BIAS - 1));
150 
151 		mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED,
152 				    orig_refs);
153 
154 		return page;
155 	}
156 
157 	WARN_ON_ONCE(1);
158 	return NULL;
159 }
160 
161 static void put_compound_head(struct page *page, int refs, unsigned int flags)
162 {
163 	if (flags & FOLL_PIN) {
164 		mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED,
165 				    refs);
166 
167 		if (hpage_pincount_available(page))
168 			hpage_pincount_sub(page, refs);
169 		else
170 			refs *= GUP_PIN_COUNTING_BIAS;
171 	}
172 
173 	put_page_refs(page, refs);
174 }
175 
176 /**
177  * try_grab_page() - elevate a page's refcount by a flag-dependent amount
178  *
179  * This might not do anything at all, depending on the flags argument.
180  *
181  * "grab" names in this file mean, "look at flags to decide whether to use
182  * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
183  *
184  * @page:    pointer to page to be grabbed
185  * @flags:   gup flags: these are the FOLL_* flag values.
186  *
187  * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
188  * time. Cases:
189  *
190  *    FOLL_GET: page's refcount will be incremented by 1.
191  *    FOLL_PIN: page's refcount will be incremented by GUP_PIN_COUNTING_BIAS.
192  *
193  * Return: true for success, or if no action was required (if neither FOLL_PIN
194  * nor FOLL_GET was set, nothing is done). False for failure: FOLL_GET or
195  * FOLL_PIN was set, but the page could not be grabbed.
196  */
197 bool __must_check try_grab_page(struct page *page, unsigned int flags)
198 {
199 	WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == (FOLL_GET | FOLL_PIN));
200 
201 	if (flags & FOLL_GET)
202 		return try_get_page(page);
203 	else if (flags & FOLL_PIN) {
204 		int refs = 1;
205 
206 		page = compound_head(page);
207 
208 		if (WARN_ON_ONCE(page_ref_count(page) <= 0))
209 			return false;
210 
211 		if (hpage_pincount_available(page))
212 			hpage_pincount_add(page, 1);
213 		else
214 			refs = GUP_PIN_COUNTING_BIAS;
215 
216 		/*
217 		 * Similar to try_grab_compound_head(): even if using the
218 		 * hpage_pincount_add/_sub() routines, be sure to
219 		 * *also* increment the normal page refcount field at least
220 		 * once, so that the page really is pinned.
221 		 */
222 		page_ref_add(page, refs);
223 
224 		mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED, 1);
225 	}
226 
227 	return true;
228 }
229 
230 /**
231  * unpin_user_page() - release a dma-pinned page
232  * @page:            pointer to page to be released
233  *
234  * Pages that were pinned via pin_user_pages*() must be released via either
235  * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
236  * that such pages can be separately tracked and uniquely handled. In
237  * particular, interactions with RDMA and filesystems need special handling.
238  */
239 void unpin_user_page(struct page *page)
240 {
241 	put_compound_head(compound_head(page), 1, FOLL_PIN);
242 }
243 EXPORT_SYMBOL(unpin_user_page);
244 
245 static inline void compound_range_next(unsigned long i, unsigned long npages,
246 				       struct page **list, struct page **head,
247 				       unsigned int *ntails)
248 {
249 	struct page *next, *page;
250 	unsigned int nr = 1;
251 
252 	if (i >= npages)
253 		return;
254 
255 	next = *list + i;
256 	page = compound_head(next);
257 	if (PageCompound(page) && compound_order(page) >= 1)
258 		nr = min_t(unsigned int,
259 			   page + compound_nr(page) - next, npages - i);
260 
261 	*head = page;
262 	*ntails = nr;
263 }
264 
265 #define for_each_compound_range(__i, __list, __npages, __head, __ntails) \
266 	for (__i = 0, \
267 	     compound_range_next(__i, __npages, __list, &(__head), &(__ntails)); \
268 	     __i < __npages; __i += __ntails, \
269 	     compound_range_next(__i, __npages, __list, &(__head), &(__ntails)))
270 
271 static inline void compound_next(unsigned long i, unsigned long npages,
272 				 struct page **list, struct page **head,
273 				 unsigned int *ntails)
274 {
275 	struct page *page;
276 	unsigned int nr;
277 
278 	if (i >= npages)
279 		return;
280 
281 	page = compound_head(list[i]);
282 	for (nr = i + 1; nr < npages; nr++) {
283 		if (compound_head(list[nr]) != page)
284 			break;
285 	}
286 
287 	*head = page;
288 	*ntails = nr - i;
289 }
290 
291 #define for_each_compound_head(__i, __list, __npages, __head, __ntails) \
292 	for (__i = 0, \
293 	     compound_next(__i, __npages, __list, &(__head), &(__ntails)); \
294 	     __i < __npages; __i += __ntails, \
295 	     compound_next(__i, __npages, __list, &(__head), &(__ntails)))
296 
297 /**
298  * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
299  * @pages:  array of pages to be maybe marked dirty, and definitely released.
300  * @npages: number of pages in the @pages array.
301  * @make_dirty: whether to mark the pages dirty
302  *
303  * "gup-pinned page" refers to a page that has had one of the get_user_pages()
304  * variants called on that page.
305  *
306  * For each page in the @pages array, make that page (or its head page, if a
307  * compound page) dirty, if @make_dirty is true, and if the page was previously
308  * listed as clean. In any case, releases all pages using unpin_user_page(),
309  * possibly via unpin_user_pages(), for the non-dirty case.
310  *
311  * Please see the unpin_user_page() documentation for details.
312  *
313  * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
314  * required, then the caller should a) verify that this is really correct,
315  * because _lock() is usually required, and b) hand code it:
316  * set_page_dirty_lock(), unpin_user_page().
317  *
318  */
319 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
320 				 bool make_dirty)
321 {
322 	unsigned long index;
323 	struct page *head;
324 	unsigned int ntails;
325 
326 	if (!make_dirty) {
327 		unpin_user_pages(pages, npages);
328 		return;
329 	}
330 
331 	for_each_compound_head(index, pages, npages, head, ntails) {
332 		/*
333 		 * Checking PageDirty at this point may race with
334 		 * clear_page_dirty_for_io(), but that's OK. Two key
335 		 * cases:
336 		 *
337 		 * 1) This code sees the page as already dirty, so it
338 		 * skips the call to set_page_dirty(). That could happen
339 		 * because clear_page_dirty_for_io() called
340 		 * page_mkclean(), followed by set_page_dirty().
341 		 * However, now the page is going to get written back,
342 		 * which meets the original intention of setting it
343 		 * dirty, so all is well: clear_page_dirty_for_io() goes
344 		 * on to call TestClearPageDirty(), and write the page
345 		 * back.
346 		 *
347 		 * 2) This code sees the page as clean, so it calls
348 		 * set_page_dirty(). The page stays dirty, despite being
349 		 * written back, so it gets written back again in the
350 		 * next writeback cycle. This is harmless.
351 		 */
352 		if (!PageDirty(head))
353 			set_page_dirty_lock(head);
354 		put_compound_head(head, ntails, FOLL_PIN);
355 	}
356 }
357 EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
358 
359 /**
360  * unpin_user_page_range_dirty_lock() - release and optionally dirty
361  * gup-pinned page range
362  *
363  * @page:  the starting page of a range maybe marked dirty, and definitely released.
364  * @npages: number of consecutive pages to release.
365  * @make_dirty: whether to mark the pages dirty
366  *
367  * "gup-pinned page range" refers to a range of pages that has had one of the
368  * pin_user_pages() variants called on that page.
369  *
370  * For the page ranges defined by [page .. page+npages], make that range (or
371  * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
372  * page range was previously listed as clean.
373  *
374  * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
375  * required, then the caller should a) verify that this is really correct,
376  * because _lock() is usually required, and b) hand code it:
377  * set_page_dirty_lock(), unpin_user_page().
378  *
379  */
380 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
381 				      bool make_dirty)
382 {
383 	unsigned long index;
384 	struct page *head;
385 	unsigned int ntails;
386 
387 	for_each_compound_range(index, &page, npages, head, ntails) {
388 		if (make_dirty && !PageDirty(head))
389 			set_page_dirty_lock(head);
390 		put_compound_head(head, ntails, FOLL_PIN);
391 	}
392 }
393 EXPORT_SYMBOL(unpin_user_page_range_dirty_lock);
394 
395 /**
396  * unpin_user_pages() - release an array of gup-pinned pages.
397  * @pages:  array of pages to be marked dirty and released.
398  * @npages: number of pages in the @pages array.
399  *
400  * For each page in the @pages array, release the page using unpin_user_page().
401  *
402  * Please see the unpin_user_page() documentation for details.
403  */
404 void unpin_user_pages(struct page **pages, unsigned long npages)
405 {
406 	unsigned long index;
407 	struct page *head;
408 	unsigned int ntails;
409 
410 	/*
411 	 * If this WARN_ON() fires, then the system *might* be leaking pages (by
412 	 * leaving them pinned), but probably not. More likely, gup/pup returned
413 	 * a hard -ERRNO error to the caller, who erroneously passed it here.
414 	 */
415 	if (WARN_ON(IS_ERR_VALUE(npages)))
416 		return;
417 
418 	for_each_compound_head(index, pages, npages, head, ntails)
419 		put_compound_head(head, ntails, FOLL_PIN);
420 }
421 EXPORT_SYMBOL(unpin_user_pages);
422 
423 /*
424  * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
425  * lifecycle.  Avoid setting the bit unless necessary, or it might cause write
426  * cache bouncing on large SMP machines for concurrent pinned gups.
427  */
428 static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
429 {
430 	if (!test_bit(MMF_HAS_PINNED, mm_flags))
431 		set_bit(MMF_HAS_PINNED, mm_flags);
432 }
433 
434 #ifdef CONFIG_MMU
435 static struct page *no_page_table(struct vm_area_struct *vma,
436 		unsigned int flags)
437 {
438 	/*
439 	 * When core dumping an enormous anonymous area that nobody
440 	 * has touched so far, we don't want to allocate unnecessary pages or
441 	 * page tables.  Return error instead of NULL to skip handle_mm_fault,
442 	 * then get_dump_page() will return NULL to leave a hole in the dump.
443 	 * But we can only make this optimization where a hole would surely
444 	 * be zero-filled if handle_mm_fault() actually did handle it.
445 	 */
446 	if ((flags & FOLL_DUMP) &&
447 			(vma_is_anonymous(vma) || !vma->vm_ops->fault))
448 		return ERR_PTR(-EFAULT);
449 	return NULL;
450 }
451 
452 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
453 		pte_t *pte, unsigned int flags)
454 {
455 	/* No page to get reference */
456 	if (flags & FOLL_GET)
457 		return -EFAULT;
458 
459 	if (flags & FOLL_TOUCH) {
460 		pte_t entry = *pte;
461 
462 		if (flags & FOLL_WRITE)
463 			entry = pte_mkdirty(entry);
464 		entry = pte_mkyoung(entry);
465 
466 		if (!pte_same(*pte, entry)) {
467 			set_pte_at(vma->vm_mm, address, pte, entry);
468 			update_mmu_cache(vma, address, pte);
469 		}
470 	}
471 
472 	/* Proper page table entry exists, but no corresponding struct page */
473 	return -EEXIST;
474 }
475 
476 /*
477  * FOLL_FORCE can write to even unwritable pte's, but only
478  * after we've gone through a COW cycle and they are dirty.
479  */
480 static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
481 {
482 	return pte_write(pte) ||
483 		((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
484 }
485 
486 static struct page *follow_page_pte(struct vm_area_struct *vma,
487 		unsigned long address, pmd_t *pmd, unsigned int flags,
488 		struct dev_pagemap **pgmap)
489 {
490 	struct mm_struct *mm = vma->vm_mm;
491 	struct page *page;
492 	spinlock_t *ptl;
493 	pte_t *ptep, pte;
494 	int ret;
495 
496 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
497 	if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
498 			 (FOLL_PIN | FOLL_GET)))
499 		return ERR_PTR(-EINVAL);
500 retry:
501 	if (unlikely(pmd_bad(*pmd)))
502 		return no_page_table(vma, flags);
503 
504 	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
505 	pte = *ptep;
506 	if (!pte_present(pte)) {
507 		swp_entry_t entry;
508 		/*
509 		 * KSM's break_ksm() relies upon recognizing a ksm page
510 		 * even while it is being migrated, so for that case we
511 		 * need migration_entry_wait().
512 		 */
513 		if (likely(!(flags & FOLL_MIGRATION)))
514 			goto no_page;
515 		if (pte_none(pte))
516 			goto no_page;
517 		entry = pte_to_swp_entry(pte);
518 		if (!is_migration_entry(entry))
519 			goto no_page;
520 		pte_unmap_unlock(ptep, ptl);
521 		migration_entry_wait(mm, pmd, address);
522 		goto retry;
523 	}
524 	if ((flags & FOLL_NUMA) && pte_protnone(pte))
525 		goto no_page;
526 	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
527 		pte_unmap_unlock(ptep, ptl);
528 		return NULL;
529 	}
530 
531 	page = vm_normal_page(vma, address, pte);
532 	if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
533 		/*
534 		 * Only return device mapping pages in the FOLL_GET or FOLL_PIN
535 		 * case since they are only valid while holding the pgmap
536 		 * reference.
537 		 */
538 		*pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
539 		if (*pgmap)
540 			page = pte_page(pte);
541 		else
542 			goto no_page;
543 	} else if (unlikely(!page)) {
544 		if (flags & FOLL_DUMP) {
545 			/* Avoid special (like zero) pages in core dumps */
546 			page = ERR_PTR(-EFAULT);
547 			goto out;
548 		}
549 
550 		if (is_zero_pfn(pte_pfn(pte))) {
551 			page = pte_page(pte);
552 		} else {
553 			ret = follow_pfn_pte(vma, address, ptep, flags);
554 			page = ERR_PTR(ret);
555 			goto out;
556 		}
557 	}
558 
559 	/* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
560 	if (unlikely(!try_grab_page(page, flags))) {
561 		page = ERR_PTR(-ENOMEM);
562 		goto out;
563 	}
564 	/*
565 	 * We need to make the page accessible if and only if we are going
566 	 * to access its content (the FOLL_PIN case).  Please see
567 	 * Documentation/core-api/pin_user_pages.rst for details.
568 	 */
569 	if (flags & FOLL_PIN) {
570 		ret = arch_make_page_accessible(page);
571 		if (ret) {
572 			unpin_user_page(page);
573 			page = ERR_PTR(ret);
574 			goto out;
575 		}
576 	}
577 	if (flags & FOLL_TOUCH) {
578 		if ((flags & FOLL_WRITE) &&
579 		    !pte_dirty(pte) && !PageDirty(page))
580 			set_page_dirty(page);
581 		/*
582 		 * pte_mkyoung() would be more correct here, but atomic care
583 		 * is needed to avoid losing the dirty bit: it is easier to use
584 		 * mark_page_accessed().
585 		 */
586 		mark_page_accessed(page);
587 	}
588 	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
589 		/* Do not mlock pte-mapped THP */
590 		if (PageTransCompound(page))
591 			goto out;
592 
593 		/*
594 		 * The preliminary mapping check is mainly to avoid the
595 		 * pointless overhead of lock_page on the ZERO_PAGE
596 		 * which might bounce very badly if there is contention.
597 		 *
598 		 * If the page is already locked, we don't need to
599 		 * handle it now - vmscan will handle it later if and
600 		 * when it attempts to reclaim the page.
601 		 */
602 		if (page->mapping && trylock_page(page)) {
603 			lru_add_drain();  /* push cached pages to LRU */
604 			/*
605 			 * Because we lock page here, and migration is
606 			 * blocked by the pte's page reference, and we
607 			 * know the page is still mapped, we don't even
608 			 * need to check for file-cache page truncation.
609 			 */
610 			mlock_vma_page(page);
611 			unlock_page(page);
612 		}
613 	}
614 out:
615 	pte_unmap_unlock(ptep, ptl);
616 	return page;
617 no_page:
618 	pte_unmap_unlock(ptep, ptl);
619 	if (!pte_none(pte))
620 		return NULL;
621 	return no_page_table(vma, flags);
622 }
623 
624 static struct page *follow_pmd_mask(struct vm_area_struct *vma,
625 				    unsigned long address, pud_t *pudp,
626 				    unsigned int flags,
627 				    struct follow_page_context *ctx)
628 {
629 	pmd_t *pmd, pmdval;
630 	spinlock_t *ptl;
631 	struct page *page;
632 	struct mm_struct *mm = vma->vm_mm;
633 
634 	pmd = pmd_offset(pudp, address);
635 	/*
636 	 * The READ_ONCE() will stabilize the pmdval in a register or
637 	 * on the stack so that it will stop changing under the code.
638 	 */
639 	pmdval = READ_ONCE(*pmd);
640 	if (pmd_none(pmdval))
641 		return no_page_table(vma, flags);
642 	if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
643 		page = follow_huge_pmd(mm, address, pmd, flags);
644 		if (page)
645 			return page;
646 		return no_page_table(vma, flags);
647 	}
648 	if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
649 		page = follow_huge_pd(vma, address,
650 				      __hugepd(pmd_val(pmdval)), flags,
651 				      PMD_SHIFT);
652 		if (page)
653 			return page;
654 		return no_page_table(vma, flags);
655 	}
656 retry:
657 	if (!pmd_present(pmdval)) {
658 		if (likely(!(flags & FOLL_MIGRATION)))
659 			return no_page_table(vma, flags);
660 		VM_BUG_ON(thp_migration_supported() &&
661 				  !is_pmd_migration_entry(pmdval));
662 		if (is_pmd_migration_entry(pmdval))
663 			pmd_migration_entry_wait(mm, pmd);
664 		pmdval = READ_ONCE(*pmd);
665 		/*
666 		 * MADV_DONTNEED may convert the pmd to null because
667 		 * mmap_lock is held in read mode
668 		 */
669 		if (pmd_none(pmdval))
670 			return no_page_table(vma, flags);
671 		goto retry;
672 	}
673 	if (pmd_devmap(pmdval)) {
674 		ptl = pmd_lock(mm, pmd);
675 		page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
676 		spin_unlock(ptl);
677 		if (page)
678 			return page;
679 	}
680 	if (likely(!pmd_trans_huge(pmdval)))
681 		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
682 
683 	if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
684 		return no_page_table(vma, flags);
685 
686 retry_locked:
687 	ptl = pmd_lock(mm, pmd);
688 	if (unlikely(pmd_none(*pmd))) {
689 		spin_unlock(ptl);
690 		return no_page_table(vma, flags);
691 	}
692 	if (unlikely(!pmd_present(*pmd))) {
693 		spin_unlock(ptl);
694 		if (likely(!(flags & FOLL_MIGRATION)))
695 			return no_page_table(vma, flags);
696 		pmd_migration_entry_wait(mm, pmd);
697 		goto retry_locked;
698 	}
699 	if (unlikely(!pmd_trans_huge(*pmd))) {
700 		spin_unlock(ptl);
701 		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
702 	}
703 	if (flags & FOLL_SPLIT_PMD) {
704 		int ret;
705 		page = pmd_page(*pmd);
706 		if (is_huge_zero_page(page)) {
707 			spin_unlock(ptl);
708 			ret = 0;
709 			split_huge_pmd(vma, pmd, address);
710 			if (pmd_trans_unstable(pmd))
711 				ret = -EBUSY;
712 		} else {
713 			spin_unlock(ptl);
714 			split_huge_pmd(vma, pmd, address);
715 			ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
716 		}
717 
718 		return ret ? ERR_PTR(ret) :
719 			follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
720 	}
721 	page = follow_trans_huge_pmd(vma, address, pmd, flags);
722 	spin_unlock(ptl);
723 	ctx->page_mask = HPAGE_PMD_NR - 1;
724 	return page;
725 }
726 
727 static struct page *follow_pud_mask(struct vm_area_struct *vma,
728 				    unsigned long address, p4d_t *p4dp,
729 				    unsigned int flags,
730 				    struct follow_page_context *ctx)
731 {
732 	pud_t *pud;
733 	spinlock_t *ptl;
734 	struct page *page;
735 	struct mm_struct *mm = vma->vm_mm;
736 
737 	pud = pud_offset(p4dp, address);
738 	if (pud_none(*pud))
739 		return no_page_table(vma, flags);
740 	if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) {
741 		page = follow_huge_pud(mm, address, pud, flags);
742 		if (page)
743 			return page;
744 		return no_page_table(vma, flags);
745 	}
746 	if (is_hugepd(__hugepd(pud_val(*pud)))) {
747 		page = follow_huge_pd(vma, address,
748 				      __hugepd(pud_val(*pud)), flags,
749 				      PUD_SHIFT);
750 		if (page)
751 			return page;
752 		return no_page_table(vma, flags);
753 	}
754 	if (pud_devmap(*pud)) {
755 		ptl = pud_lock(mm, pud);
756 		page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
757 		spin_unlock(ptl);
758 		if (page)
759 			return page;
760 	}
761 	if (unlikely(pud_bad(*pud)))
762 		return no_page_table(vma, flags);
763 
764 	return follow_pmd_mask(vma, address, pud, flags, ctx);
765 }
766 
767 static struct page *follow_p4d_mask(struct vm_area_struct *vma,
768 				    unsigned long address, pgd_t *pgdp,
769 				    unsigned int flags,
770 				    struct follow_page_context *ctx)
771 {
772 	p4d_t *p4d;
773 	struct page *page;
774 
775 	p4d = p4d_offset(pgdp, address);
776 	if (p4d_none(*p4d))
777 		return no_page_table(vma, flags);
778 	BUILD_BUG_ON(p4d_huge(*p4d));
779 	if (unlikely(p4d_bad(*p4d)))
780 		return no_page_table(vma, flags);
781 
782 	if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
783 		page = follow_huge_pd(vma, address,
784 				      __hugepd(p4d_val(*p4d)), flags,
785 				      P4D_SHIFT);
786 		if (page)
787 			return page;
788 		return no_page_table(vma, flags);
789 	}
790 	return follow_pud_mask(vma, address, p4d, flags, ctx);
791 }
792 
793 /**
794  * follow_page_mask - look up a page descriptor from a user-virtual address
795  * @vma: vm_area_struct mapping @address
796  * @address: virtual address to look up
797  * @flags: flags modifying lookup behaviour
798  * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
799  *       pointer to output page_mask
800  *
801  * @flags can have FOLL_ flags set, defined in <linux/mm.h>
802  *
803  * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
804  * the device's dev_pagemap metadata to avoid repeating expensive lookups.
805  *
806  * On output, the @ctx->page_mask is set according to the size of the page.
807  *
808  * Return: the mapped (struct page *), %NULL if no mapping exists, or
809  * an error pointer if there is a mapping to something not represented
810  * by a page descriptor (see also vm_normal_page()).
811  */
812 static struct page *follow_page_mask(struct vm_area_struct *vma,
813 			      unsigned long address, unsigned int flags,
814 			      struct follow_page_context *ctx)
815 {
816 	pgd_t *pgd;
817 	struct page *page;
818 	struct mm_struct *mm = vma->vm_mm;
819 
820 	ctx->page_mask = 0;
821 
822 	/* make this handle hugepd */
823 	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
824 	if (!IS_ERR(page)) {
825 		WARN_ON_ONCE(flags & (FOLL_GET | FOLL_PIN));
826 		return page;
827 	}
828 
829 	pgd = pgd_offset(mm, address);
830 
831 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
832 		return no_page_table(vma, flags);
833 
834 	if (pgd_huge(*pgd)) {
835 		page = follow_huge_pgd(mm, address, pgd, flags);
836 		if (page)
837 			return page;
838 		return no_page_table(vma, flags);
839 	}
840 	if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
841 		page = follow_huge_pd(vma, address,
842 				      __hugepd(pgd_val(*pgd)), flags,
843 				      PGDIR_SHIFT);
844 		if (page)
845 			return page;
846 		return no_page_table(vma, flags);
847 	}
848 
849 	return follow_p4d_mask(vma, address, pgd, flags, ctx);
850 }
851 
852 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
853 			 unsigned int foll_flags)
854 {
855 	struct follow_page_context ctx = { NULL };
856 	struct page *page;
857 
858 	page = follow_page_mask(vma, address, foll_flags, &ctx);
859 	if (ctx.pgmap)
860 		put_dev_pagemap(ctx.pgmap);
861 	return page;
862 }
863 
864 static int get_gate_page(struct mm_struct *mm, unsigned long address,
865 		unsigned int gup_flags, struct vm_area_struct **vma,
866 		struct page **page)
867 {
868 	pgd_t *pgd;
869 	p4d_t *p4d;
870 	pud_t *pud;
871 	pmd_t *pmd;
872 	pte_t *pte;
873 	int ret = -EFAULT;
874 
875 	/* user gate pages are read-only */
876 	if (gup_flags & FOLL_WRITE)
877 		return -EFAULT;
878 	if (address > TASK_SIZE)
879 		pgd = pgd_offset_k(address);
880 	else
881 		pgd = pgd_offset_gate(mm, address);
882 	if (pgd_none(*pgd))
883 		return -EFAULT;
884 	p4d = p4d_offset(pgd, address);
885 	if (p4d_none(*p4d))
886 		return -EFAULT;
887 	pud = pud_offset(p4d, address);
888 	if (pud_none(*pud))
889 		return -EFAULT;
890 	pmd = pmd_offset(pud, address);
891 	if (!pmd_present(*pmd))
892 		return -EFAULT;
893 	VM_BUG_ON(pmd_trans_huge(*pmd));
894 	pte = pte_offset_map(pmd, address);
895 	if (pte_none(*pte))
896 		goto unmap;
897 	*vma = get_gate_vma(mm);
898 	if (!page)
899 		goto out;
900 	*page = vm_normal_page(*vma, address, *pte);
901 	if (!*page) {
902 		if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
903 			goto unmap;
904 		*page = pte_page(*pte);
905 	}
906 	if (unlikely(!try_grab_page(*page, gup_flags))) {
907 		ret = -ENOMEM;
908 		goto unmap;
909 	}
910 out:
911 	ret = 0;
912 unmap:
913 	pte_unmap(pte);
914 	return ret;
915 }
916 
917 /*
918  * mmap_lock must be held on entry.  If @locked != NULL and *@flags
919  * does not include FOLL_NOWAIT, the mmap_lock may be released.  If it
920  * is, *@locked will be set to 0 and -EBUSY returned.
921  */
922 static int faultin_page(struct vm_area_struct *vma,
923 		unsigned long address, unsigned int *flags, int *locked)
924 {
925 	unsigned int fault_flags = 0;
926 	vm_fault_t ret;
927 
928 	/* mlock all present pages, but do not fault in new pages */
929 	if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
930 		return -ENOENT;
931 	if (*flags & FOLL_WRITE)
932 		fault_flags |= FAULT_FLAG_WRITE;
933 	if (*flags & FOLL_REMOTE)
934 		fault_flags |= FAULT_FLAG_REMOTE;
935 	if (locked)
936 		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
937 	if (*flags & FOLL_NOWAIT)
938 		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
939 	if (*flags & FOLL_TRIED) {
940 		/*
941 		 * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
942 		 * can co-exist
943 		 */
944 		fault_flags |= FAULT_FLAG_TRIED;
945 	}
946 
947 	ret = handle_mm_fault(vma, address, fault_flags, NULL);
948 	if (ret & VM_FAULT_ERROR) {
949 		int err = vm_fault_to_errno(ret, *flags);
950 
951 		if (err)
952 			return err;
953 		BUG();
954 	}
955 
956 	if (ret & VM_FAULT_RETRY) {
957 		if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
958 			*locked = 0;
959 		return -EBUSY;
960 	}
961 
962 	/*
963 	 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
964 	 * necessary, even if maybe_mkwrite decided not to set pte_write. We
965 	 * can thus safely do subsequent page lookups as if they were reads.
966 	 * But only do so when looping for pte_write is futile: in some cases
967 	 * userspace may also be wanting to write to the gotten user page,
968 	 * which a read fault here might prevent (a readonly page might get
969 	 * reCOWed by userspace write).
970 	 */
971 	if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
972 		*flags |= FOLL_COW;
973 	return 0;
974 }
975 
976 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
977 {
978 	vm_flags_t vm_flags = vma->vm_flags;
979 	int write = (gup_flags & FOLL_WRITE);
980 	int foreign = (gup_flags & FOLL_REMOTE);
981 
982 	if (vm_flags & (VM_IO | VM_PFNMAP))
983 		return -EFAULT;
984 
985 	if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
986 		return -EFAULT;
987 
988 	if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
989 		return -EOPNOTSUPP;
990 
991 	if (write) {
992 		if (!(vm_flags & VM_WRITE)) {
993 			if (!(gup_flags & FOLL_FORCE))
994 				return -EFAULT;
995 			/*
996 			 * We used to let the write,force case do COW in a
997 			 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
998 			 * set a breakpoint in a read-only mapping of an
999 			 * executable, without corrupting the file (yet only
1000 			 * when that file had been opened for writing!).
1001 			 * Anon pages in shared mappings are surprising: now
1002 			 * just reject it.
1003 			 */
1004 			if (!is_cow_mapping(vm_flags))
1005 				return -EFAULT;
1006 		}
1007 	} else if (!(vm_flags & VM_READ)) {
1008 		if (!(gup_flags & FOLL_FORCE))
1009 			return -EFAULT;
1010 		/*
1011 		 * Is there actually any vma we can reach here which does not
1012 		 * have VM_MAYREAD set?
1013 		 */
1014 		if (!(vm_flags & VM_MAYREAD))
1015 			return -EFAULT;
1016 	}
1017 	/*
1018 	 * gups are always data accesses, not instruction
1019 	 * fetches, so execute=false here
1020 	 */
1021 	if (!arch_vma_access_permitted(vma, write, false, foreign))
1022 		return -EFAULT;
1023 	return 0;
1024 }
1025 
1026 /**
1027  * __get_user_pages() - pin user pages in memory
1028  * @mm:		mm_struct of target mm
1029  * @start:	starting user address
1030  * @nr_pages:	number of pages from start to pin
1031  * @gup_flags:	flags modifying pin behaviour
1032  * @pages:	array that receives pointers to the pages pinned.
1033  *		Should be at least nr_pages long. Or NULL, if caller
1034  *		only intends to ensure the pages are faulted in.
1035  * @vmas:	array of pointers to vmas corresponding to each page.
1036  *		Or NULL if the caller does not require them.
1037  * @locked:     whether we're still with the mmap_lock held
1038  *
1039  * Returns either number of pages pinned (which may be less than the
1040  * number requested), or an error. Details about the return value:
1041  *
1042  * -- If nr_pages is 0, returns 0.
1043  * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1044  * -- If nr_pages is >0, and some pages were pinned, returns the number of
1045  *    pages pinned. Again, this may be less than nr_pages.
1046  * -- 0 return value is possible when the fault would need to be retried.
1047  *
1048  * The caller is responsible for releasing returned @pages, via put_page().
1049  *
1050  * @vmas are valid only as long as mmap_lock is held.
1051  *
1052  * Must be called with mmap_lock held.  It may be released.  See below.
1053  *
1054  * __get_user_pages walks a process's page tables and takes a reference to
1055  * each struct page that each user address corresponds to at a given
1056  * instant. That is, it takes the page that would be accessed if a user
1057  * thread accesses the given user virtual address at that instant.
1058  *
1059  * This does not guarantee that the page exists in the user mappings when
1060  * __get_user_pages returns, and there may even be a completely different
1061  * page there in some cases (eg. if mmapped pagecache has been invalidated
1062  * and subsequently re faulted). However it does guarantee that the page
1063  * won't be freed completely. And mostly callers simply care that the page
1064  * contains data that was valid *at some point in time*. Typically, an IO
1065  * or similar operation cannot guarantee anything stronger anyway because
1066  * locks can't be held over the syscall boundary.
1067  *
1068  * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
1069  * the page is written to, set_page_dirty (or set_page_dirty_lock, as
1070  * appropriate) must be called after the page is finished with, and
1071  * before put_page is called.
1072  *
1073  * If @locked != NULL, *@locked will be set to 0 when mmap_lock is
1074  * released by an up_read().  That can happen if @gup_flags does not
1075  * have FOLL_NOWAIT.
1076  *
1077  * A caller using such a combination of @locked and @gup_flags
1078  * must therefore hold the mmap_lock for reading only, and recognize
1079  * when it's been released.  Otherwise, it must be held for either
1080  * reading or writing and will not be released.
1081  *
1082  * In most cases, get_user_pages or get_user_pages_fast should be used
1083  * instead of __get_user_pages. __get_user_pages should be used only if
1084  * you need some special @gup_flags.
1085  */
1086 static long __get_user_pages(struct mm_struct *mm,
1087 		unsigned long start, unsigned long nr_pages,
1088 		unsigned int gup_flags, struct page **pages,
1089 		struct vm_area_struct **vmas, int *locked)
1090 {
1091 	long ret = 0, i = 0;
1092 	struct vm_area_struct *vma = NULL;
1093 	struct follow_page_context ctx = { NULL };
1094 
1095 	if (!nr_pages)
1096 		return 0;
1097 
1098 	start = untagged_addr(start);
1099 
1100 	VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
1101 
1102 	/*
1103 	 * If FOLL_FORCE is set then do not force a full fault as the hinting
1104 	 * fault information is unrelated to the reference behaviour of a task
1105 	 * using the address space
1106 	 */
1107 	if (!(gup_flags & FOLL_FORCE))
1108 		gup_flags |= FOLL_NUMA;
1109 
1110 	do {
1111 		struct page *page;
1112 		unsigned int foll_flags = gup_flags;
1113 		unsigned int page_increm;
1114 
1115 		/* first iteration or cross vma bound */
1116 		if (!vma || start >= vma->vm_end) {
1117 			vma = find_extend_vma(mm, start);
1118 			if (!vma && in_gate_area(mm, start)) {
1119 				ret = get_gate_page(mm, start & PAGE_MASK,
1120 						gup_flags, &vma,
1121 						pages ? &pages[i] : NULL);
1122 				if (ret)
1123 					goto out;
1124 				ctx.page_mask = 0;
1125 				goto next_page;
1126 			}
1127 
1128 			if (!vma) {
1129 				ret = -EFAULT;
1130 				goto out;
1131 			}
1132 			ret = check_vma_flags(vma, gup_flags);
1133 			if (ret)
1134 				goto out;
1135 
1136 			if (is_vm_hugetlb_page(vma)) {
1137 				i = follow_hugetlb_page(mm, vma, pages, vmas,
1138 						&start, &nr_pages, i,
1139 						gup_flags, locked);
1140 				if (locked && *locked == 0) {
1141 					/*
1142 					 * We've got a VM_FAULT_RETRY
1143 					 * and we've lost mmap_lock.
1144 					 * We must stop here.
1145 					 */
1146 					BUG_ON(gup_flags & FOLL_NOWAIT);
1147 					BUG_ON(ret != 0);
1148 					goto out;
1149 				}
1150 				continue;
1151 			}
1152 		}
1153 retry:
1154 		/*
1155 		 * If we have a pending SIGKILL, don't keep faulting pages and
1156 		 * potentially allocating memory.
1157 		 */
1158 		if (fatal_signal_pending(current)) {
1159 			ret = -EINTR;
1160 			goto out;
1161 		}
1162 		cond_resched();
1163 
1164 		page = follow_page_mask(vma, start, foll_flags, &ctx);
1165 		if (!page) {
1166 			ret = faultin_page(vma, start, &foll_flags, locked);
1167 			switch (ret) {
1168 			case 0:
1169 				goto retry;
1170 			case -EBUSY:
1171 				ret = 0;
1172 				fallthrough;
1173 			case -EFAULT:
1174 			case -ENOMEM:
1175 			case -EHWPOISON:
1176 				goto out;
1177 			case -ENOENT:
1178 				goto next_page;
1179 			}
1180 			BUG();
1181 		} else if (PTR_ERR(page) == -EEXIST) {
1182 			/*
1183 			 * Proper page table entry exists, but no corresponding
1184 			 * struct page.
1185 			 */
1186 			goto next_page;
1187 		} else if (IS_ERR(page)) {
1188 			ret = PTR_ERR(page);
1189 			goto out;
1190 		}
1191 		if (pages) {
1192 			pages[i] = page;
1193 			flush_anon_page(vma, page, start);
1194 			flush_dcache_page(page);
1195 			ctx.page_mask = 0;
1196 		}
1197 next_page:
1198 		if (vmas) {
1199 			vmas[i] = vma;
1200 			ctx.page_mask = 0;
1201 		}
1202 		page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
1203 		if (page_increm > nr_pages)
1204 			page_increm = nr_pages;
1205 		i += page_increm;
1206 		start += page_increm * PAGE_SIZE;
1207 		nr_pages -= page_increm;
1208 	} while (nr_pages);
1209 out:
1210 	if (ctx.pgmap)
1211 		put_dev_pagemap(ctx.pgmap);
1212 	return i ? i : ret;
1213 }
1214 
1215 static bool vma_permits_fault(struct vm_area_struct *vma,
1216 			      unsigned int fault_flags)
1217 {
1218 	bool write   = !!(fault_flags & FAULT_FLAG_WRITE);
1219 	bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
1220 	vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
1221 
1222 	if (!(vm_flags & vma->vm_flags))
1223 		return false;
1224 
1225 	/*
1226 	 * The architecture might have a hardware protection
1227 	 * mechanism other than read/write that can deny access.
1228 	 *
1229 	 * gup always represents data access, not instruction
1230 	 * fetches, so execute=false here:
1231 	 */
1232 	if (!arch_vma_access_permitted(vma, write, false, foreign))
1233 		return false;
1234 
1235 	return true;
1236 }
1237 
1238 /**
1239  * fixup_user_fault() - manually resolve a user page fault
1240  * @mm:		mm_struct of target mm
1241  * @address:	user address
1242  * @fault_flags:flags to pass down to handle_mm_fault()
1243  * @unlocked:	did we unlock the mmap_lock while retrying, maybe NULL if caller
1244  *		does not allow retry. If NULL, the caller must guarantee
1245  *		that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
1246  *
1247  * This is meant to be called in the specific scenario where for locking reasons
1248  * we try to access user memory in atomic context (within a pagefault_disable()
1249  * section), this returns -EFAULT, and we want to resolve the user fault before
1250  * trying again.
1251  *
1252  * Typically this is meant to be used by the futex code.
1253  *
1254  * The main difference with get_user_pages() is that this function will
1255  * unconditionally call handle_mm_fault() which will in turn perform all the
1256  * necessary SW fixup of the dirty and young bits in the PTE, while
1257  * get_user_pages() only guarantees to update these in the struct page.
1258  *
1259  * This is important for some architectures where those bits also gate the
1260  * access permission to the page because they are maintained in software.  On
1261  * such architectures, gup() will not be enough to make a subsequent access
1262  * succeed.
1263  *
1264  * This function will not return with an unlocked mmap_lock. So it has not the
1265  * same semantics wrt the @mm->mmap_lock as does filemap_fault().
1266  */
1267 int fixup_user_fault(struct mm_struct *mm,
1268 		     unsigned long address, unsigned int fault_flags,
1269 		     bool *unlocked)
1270 {
1271 	struct vm_area_struct *vma;
1272 	vm_fault_t ret, major = 0;
1273 
1274 	address = untagged_addr(address);
1275 
1276 	if (unlocked)
1277 		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1278 
1279 retry:
1280 	vma = find_extend_vma(mm, address);
1281 	if (!vma || address < vma->vm_start)
1282 		return -EFAULT;
1283 
1284 	if (!vma_permits_fault(vma, fault_flags))
1285 		return -EFAULT;
1286 
1287 	if ((fault_flags & FAULT_FLAG_KILLABLE) &&
1288 	    fatal_signal_pending(current))
1289 		return -EINTR;
1290 
1291 	ret = handle_mm_fault(vma, address, fault_flags, NULL);
1292 	major |= ret & VM_FAULT_MAJOR;
1293 	if (ret & VM_FAULT_ERROR) {
1294 		int err = vm_fault_to_errno(ret, 0);
1295 
1296 		if (err)
1297 			return err;
1298 		BUG();
1299 	}
1300 
1301 	if (ret & VM_FAULT_RETRY) {
1302 		mmap_read_lock(mm);
1303 		*unlocked = true;
1304 		fault_flags |= FAULT_FLAG_TRIED;
1305 		goto retry;
1306 	}
1307 
1308 	return 0;
1309 }
1310 EXPORT_SYMBOL_GPL(fixup_user_fault);
1311 
1312 /*
1313  * Please note that this function, unlike __get_user_pages will not
1314  * return 0 for nr_pages > 0 without FOLL_NOWAIT
1315  */
1316 static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
1317 						unsigned long start,
1318 						unsigned long nr_pages,
1319 						struct page **pages,
1320 						struct vm_area_struct **vmas,
1321 						int *locked,
1322 						unsigned int flags)
1323 {
1324 	long ret, pages_done;
1325 	bool lock_dropped;
1326 
1327 	if (locked) {
1328 		/* if VM_FAULT_RETRY can be returned, vmas become invalid */
1329 		BUG_ON(vmas);
1330 		/* check caller initialized locked */
1331 		BUG_ON(*locked != 1);
1332 	}
1333 
1334 	if (flags & FOLL_PIN)
1335 		mm_set_has_pinned_flag(&mm->flags);
1336 
1337 	/*
1338 	 * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
1339 	 * is to set FOLL_GET if the caller wants pages[] filled in (but has
1340 	 * carelessly failed to specify FOLL_GET), so keep doing that, but only
1341 	 * for FOLL_GET, not for the newer FOLL_PIN.
1342 	 *
1343 	 * FOLL_PIN always expects pages to be non-null, but no need to assert
1344 	 * that here, as any failures will be obvious enough.
1345 	 */
1346 	if (pages && !(flags & FOLL_PIN))
1347 		flags |= FOLL_GET;
1348 
1349 	pages_done = 0;
1350 	lock_dropped = false;
1351 	for (;;) {
1352 		ret = __get_user_pages(mm, start, nr_pages, flags, pages,
1353 				       vmas, locked);
1354 		if (!locked)
1355 			/* VM_FAULT_RETRY couldn't trigger, bypass */
1356 			return ret;
1357 
1358 		/* VM_FAULT_RETRY cannot return errors */
1359 		if (!*locked) {
1360 			BUG_ON(ret < 0);
1361 			BUG_ON(ret >= nr_pages);
1362 		}
1363 
1364 		if (ret > 0) {
1365 			nr_pages -= ret;
1366 			pages_done += ret;
1367 			if (!nr_pages)
1368 				break;
1369 		}
1370 		if (*locked) {
1371 			/*
1372 			 * VM_FAULT_RETRY didn't trigger or it was a
1373 			 * FOLL_NOWAIT.
1374 			 */
1375 			if (!pages_done)
1376 				pages_done = ret;
1377 			break;
1378 		}
1379 		/*
1380 		 * VM_FAULT_RETRY triggered, so seek to the faulting offset.
1381 		 * For the prefault case (!pages) we only update counts.
1382 		 */
1383 		if (likely(pages))
1384 			pages += ret;
1385 		start += ret << PAGE_SHIFT;
1386 		lock_dropped = true;
1387 
1388 retry:
1389 		/*
1390 		 * Repeat on the address that fired VM_FAULT_RETRY
1391 		 * with both FAULT_FLAG_ALLOW_RETRY and
1392 		 * FAULT_FLAG_TRIED.  Note that GUP can be interrupted
1393 		 * by fatal signals, so we need to check it before we
1394 		 * start trying again otherwise it can loop forever.
1395 		 */
1396 
1397 		if (fatal_signal_pending(current)) {
1398 			if (!pages_done)
1399 				pages_done = -EINTR;
1400 			break;
1401 		}
1402 
1403 		ret = mmap_read_lock_killable(mm);
1404 		if (ret) {
1405 			BUG_ON(ret > 0);
1406 			if (!pages_done)
1407 				pages_done = ret;
1408 			break;
1409 		}
1410 
1411 		*locked = 1;
1412 		ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
1413 				       pages, NULL, locked);
1414 		if (!*locked) {
1415 			/* Continue to retry until we succeeded */
1416 			BUG_ON(ret != 0);
1417 			goto retry;
1418 		}
1419 		if (ret != 1) {
1420 			BUG_ON(ret > 1);
1421 			if (!pages_done)
1422 				pages_done = ret;
1423 			break;
1424 		}
1425 		nr_pages--;
1426 		pages_done++;
1427 		if (!nr_pages)
1428 			break;
1429 		if (likely(pages))
1430 			pages++;
1431 		start += PAGE_SIZE;
1432 	}
1433 	if (lock_dropped && *locked) {
1434 		/*
1435 		 * We must let the caller know we temporarily dropped the lock
1436 		 * and so the critical section protected by it was lost.
1437 		 */
1438 		mmap_read_unlock(mm);
1439 		*locked = 0;
1440 	}
1441 	return pages_done;
1442 }
1443 
1444 /**
1445  * populate_vma_page_range() -  populate a range of pages in the vma.
1446  * @vma:   target vma
1447  * @start: start address
1448  * @end:   end address
1449  * @locked: whether the mmap_lock is still held
1450  *
1451  * This takes care of mlocking the pages too if VM_LOCKED is set.
1452  *
1453  * Return either number of pages pinned in the vma, or a negative error
1454  * code on error.
1455  *
1456  * vma->vm_mm->mmap_lock must be held.
1457  *
1458  * If @locked is NULL, it may be held for read or write and will
1459  * be unperturbed.
1460  *
1461  * If @locked is non-NULL, it must held for read only and may be
1462  * released.  If it's released, *@locked will be set to 0.
1463  */
1464 long populate_vma_page_range(struct vm_area_struct *vma,
1465 		unsigned long start, unsigned long end, int *locked)
1466 {
1467 	struct mm_struct *mm = vma->vm_mm;
1468 	unsigned long nr_pages = (end - start) / PAGE_SIZE;
1469 	int gup_flags;
1470 
1471 	VM_BUG_ON(start & ~PAGE_MASK);
1472 	VM_BUG_ON(end   & ~PAGE_MASK);
1473 	VM_BUG_ON_VMA(start < vma->vm_start, vma);
1474 	VM_BUG_ON_VMA(end   > vma->vm_end, vma);
1475 	mmap_assert_locked(mm);
1476 
1477 	gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
1478 	if (vma->vm_flags & VM_LOCKONFAULT)
1479 		gup_flags &= ~FOLL_POPULATE;
1480 	/*
1481 	 * We want to touch writable mappings with a write fault in order
1482 	 * to break COW, except for shared mappings because these don't COW
1483 	 * and we would not want to dirty them for nothing.
1484 	 */
1485 	if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1486 		gup_flags |= FOLL_WRITE;
1487 
1488 	/*
1489 	 * We want mlock to succeed for regions that have any permissions
1490 	 * other than PROT_NONE.
1491 	 */
1492 	if (vma_is_accessible(vma))
1493 		gup_flags |= FOLL_FORCE;
1494 
1495 	/*
1496 	 * We made sure addr is within a VMA, so the following will
1497 	 * not result in a stack expansion that recurses back here.
1498 	 */
1499 	return __get_user_pages(mm, start, nr_pages, gup_flags,
1500 				NULL, NULL, locked);
1501 }
1502 
1503 /*
1504  * __mm_populate - populate and/or mlock pages within a range of address space.
1505  *
1506  * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1507  * flags. VMAs must be already marked with the desired vm_flags, and
1508  * mmap_lock must not be held.
1509  */
1510 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1511 {
1512 	struct mm_struct *mm = current->mm;
1513 	unsigned long end, nstart, nend;
1514 	struct vm_area_struct *vma = NULL;
1515 	int locked = 0;
1516 	long ret = 0;
1517 
1518 	end = start + len;
1519 
1520 	for (nstart = start; nstart < end; nstart = nend) {
1521 		/*
1522 		 * We want to fault in pages for [nstart; end) address range.
1523 		 * Find first corresponding VMA.
1524 		 */
1525 		if (!locked) {
1526 			locked = 1;
1527 			mmap_read_lock(mm);
1528 			vma = find_vma(mm, nstart);
1529 		} else if (nstart >= vma->vm_end)
1530 			vma = vma->vm_next;
1531 		if (!vma || vma->vm_start >= end)
1532 			break;
1533 		/*
1534 		 * Set [nstart; nend) to intersection of desired address
1535 		 * range with the first VMA. Also, skip undesirable VMA types.
1536 		 */
1537 		nend = min(end, vma->vm_end);
1538 		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1539 			continue;
1540 		if (nstart < vma->vm_start)
1541 			nstart = vma->vm_start;
1542 		/*
1543 		 * Now fault in a range of pages. populate_vma_page_range()
1544 		 * double checks the vma flags, so that it won't mlock pages
1545 		 * if the vma was already munlocked.
1546 		 */
1547 		ret = populate_vma_page_range(vma, nstart, nend, &locked);
1548 		if (ret < 0) {
1549 			if (ignore_errors) {
1550 				ret = 0;
1551 				continue;	/* continue at next VMA */
1552 			}
1553 			break;
1554 		}
1555 		nend = nstart + ret * PAGE_SIZE;
1556 		ret = 0;
1557 	}
1558 	if (locked)
1559 		mmap_read_unlock(mm);
1560 	return ret;	/* 0 or negative error code */
1561 }
1562 #else /* CONFIG_MMU */
1563 static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
1564 		unsigned long nr_pages, struct page **pages,
1565 		struct vm_area_struct **vmas, int *locked,
1566 		unsigned int foll_flags)
1567 {
1568 	struct vm_area_struct *vma;
1569 	unsigned long vm_flags;
1570 	long i;
1571 
1572 	/* calculate required read or write permissions.
1573 	 * If FOLL_FORCE is set, we only require the "MAY" flags.
1574 	 */
1575 	vm_flags  = (foll_flags & FOLL_WRITE) ?
1576 			(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1577 	vm_flags &= (foll_flags & FOLL_FORCE) ?
1578 			(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1579 
1580 	for (i = 0; i < nr_pages; i++) {
1581 		vma = find_vma(mm, start);
1582 		if (!vma)
1583 			goto finish_or_fault;
1584 
1585 		/* protect what we can, including chardevs */
1586 		if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1587 		    !(vm_flags & vma->vm_flags))
1588 			goto finish_or_fault;
1589 
1590 		if (pages) {
1591 			pages[i] = virt_to_page(start);
1592 			if (pages[i])
1593 				get_page(pages[i]);
1594 		}
1595 		if (vmas)
1596 			vmas[i] = vma;
1597 		start = (start + PAGE_SIZE) & PAGE_MASK;
1598 	}
1599 
1600 	return i;
1601 
1602 finish_or_fault:
1603 	return i ? : -EFAULT;
1604 }
1605 #endif /* !CONFIG_MMU */
1606 
1607 /**
1608  * get_dump_page() - pin user page in memory while writing it to core dump
1609  * @addr: user address
1610  *
1611  * Returns struct page pointer of user page pinned for dump,
1612  * to be freed afterwards by put_page().
1613  *
1614  * Returns NULL on any kind of failure - a hole must then be inserted into
1615  * the corefile, to preserve alignment with its headers; and also returns
1616  * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1617  * allowing a hole to be left in the corefile to save disk space.
1618  *
1619  * Called without mmap_lock (takes and releases the mmap_lock by itself).
1620  */
1621 #ifdef CONFIG_ELF_CORE
1622 struct page *get_dump_page(unsigned long addr)
1623 {
1624 	struct mm_struct *mm = current->mm;
1625 	struct page *page;
1626 	int locked = 1;
1627 	int ret;
1628 
1629 	if (mmap_read_lock_killable(mm))
1630 		return NULL;
1631 	ret = __get_user_pages_locked(mm, addr, 1, &page, NULL, &locked,
1632 				      FOLL_FORCE | FOLL_DUMP | FOLL_GET);
1633 	if (locked)
1634 		mmap_read_unlock(mm);
1635 	return (ret == 1) ? page : NULL;
1636 }
1637 #endif /* CONFIG_ELF_CORE */
1638 
1639 #ifdef CONFIG_MIGRATION
1640 /*
1641  * Check whether all pages are pinnable, if so return number of pages.  If some
1642  * pages are not pinnable, migrate them, and unpin all pages. Return zero if
1643  * pages were migrated, or if some pages were not successfully isolated.
1644  * Return negative error if migration fails.
1645  */
1646 static long check_and_migrate_movable_pages(unsigned long nr_pages,
1647 					    struct page **pages,
1648 					    unsigned int gup_flags)
1649 {
1650 	unsigned long i;
1651 	unsigned long isolation_error_count = 0;
1652 	bool drain_allow = true;
1653 	LIST_HEAD(movable_page_list);
1654 	long ret = 0;
1655 	struct page *prev_head = NULL;
1656 	struct page *head;
1657 	struct migration_target_control mtc = {
1658 		.nid = NUMA_NO_NODE,
1659 		.gfp_mask = GFP_USER | __GFP_NOWARN,
1660 	};
1661 
1662 	for (i = 0; i < nr_pages; i++) {
1663 		head = compound_head(pages[i]);
1664 		if (head == prev_head)
1665 			continue;
1666 		prev_head = head;
1667 		/*
1668 		 * If we get a movable page, since we are going to be pinning
1669 		 * these entries, try to move them out if possible.
1670 		 */
1671 		if (!is_pinnable_page(head)) {
1672 			if (PageHuge(head)) {
1673 				if (!isolate_huge_page(head, &movable_page_list))
1674 					isolation_error_count++;
1675 			} else {
1676 				if (!PageLRU(head) && drain_allow) {
1677 					lru_add_drain_all();
1678 					drain_allow = false;
1679 				}
1680 
1681 				if (isolate_lru_page(head)) {
1682 					isolation_error_count++;
1683 					continue;
1684 				}
1685 				list_add_tail(&head->lru, &movable_page_list);
1686 				mod_node_page_state(page_pgdat(head),
1687 						    NR_ISOLATED_ANON +
1688 						    page_is_file_lru(head),
1689 						    thp_nr_pages(head));
1690 			}
1691 		}
1692 	}
1693 
1694 	/*
1695 	 * If list is empty, and no isolation errors, means that all pages are
1696 	 * in the correct zone.
1697 	 */
1698 	if (list_empty(&movable_page_list) && !isolation_error_count)
1699 		return nr_pages;
1700 
1701 	if (gup_flags & FOLL_PIN) {
1702 		unpin_user_pages(pages, nr_pages);
1703 	} else {
1704 		for (i = 0; i < nr_pages; i++)
1705 			put_page(pages[i]);
1706 	}
1707 	if (!list_empty(&movable_page_list)) {
1708 		ret = migrate_pages(&movable_page_list, alloc_migration_target,
1709 				    NULL, (unsigned long)&mtc, MIGRATE_SYNC,
1710 				    MR_LONGTERM_PIN);
1711 		if (ret && !list_empty(&movable_page_list))
1712 			putback_movable_pages(&movable_page_list);
1713 	}
1714 
1715 	return ret > 0 ? -ENOMEM : ret;
1716 }
1717 #else
1718 static long check_and_migrate_movable_pages(unsigned long nr_pages,
1719 					    struct page **pages,
1720 					    unsigned int gup_flags)
1721 {
1722 	return nr_pages;
1723 }
1724 #endif /* CONFIG_MIGRATION */
1725 
1726 /*
1727  * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
1728  * allows us to process the FOLL_LONGTERM flag.
1729  */
1730 static long __gup_longterm_locked(struct mm_struct *mm,
1731 				  unsigned long start,
1732 				  unsigned long nr_pages,
1733 				  struct page **pages,
1734 				  struct vm_area_struct **vmas,
1735 				  unsigned int gup_flags)
1736 {
1737 	unsigned int flags;
1738 	long rc;
1739 
1740 	if (!(gup_flags & FOLL_LONGTERM))
1741 		return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1742 					       NULL, gup_flags);
1743 	flags = memalloc_pin_save();
1744 	do {
1745 		rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1746 					     NULL, gup_flags);
1747 		if (rc <= 0)
1748 			break;
1749 		rc = check_and_migrate_movable_pages(rc, pages, gup_flags);
1750 	} while (!rc);
1751 	memalloc_pin_restore(flags);
1752 
1753 	return rc;
1754 }
1755 
1756 static bool is_valid_gup_flags(unsigned int gup_flags)
1757 {
1758 	/*
1759 	 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
1760 	 * never directly by the caller, so enforce that with an assertion:
1761 	 */
1762 	if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
1763 		return false;
1764 	/*
1765 	 * FOLL_PIN is a prerequisite to FOLL_LONGTERM. Another way of saying
1766 	 * that is, FOLL_LONGTERM is a specific case, more restrictive case of
1767 	 * FOLL_PIN.
1768 	 */
1769 	if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
1770 		return false;
1771 
1772 	return true;
1773 }
1774 
1775 #ifdef CONFIG_MMU
1776 static long __get_user_pages_remote(struct mm_struct *mm,
1777 				    unsigned long start, unsigned long nr_pages,
1778 				    unsigned int gup_flags, struct page **pages,
1779 				    struct vm_area_struct **vmas, int *locked)
1780 {
1781 	/*
1782 	 * Parts of FOLL_LONGTERM behavior are incompatible with
1783 	 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
1784 	 * vmas. However, this only comes up if locked is set, and there are
1785 	 * callers that do request FOLL_LONGTERM, but do not set locked. So,
1786 	 * allow what we can.
1787 	 */
1788 	if (gup_flags & FOLL_LONGTERM) {
1789 		if (WARN_ON_ONCE(locked))
1790 			return -EINVAL;
1791 		/*
1792 		 * This will check the vmas (even if our vmas arg is NULL)
1793 		 * and return -ENOTSUPP if DAX isn't allowed in this case:
1794 		 */
1795 		return __gup_longterm_locked(mm, start, nr_pages, pages,
1796 					     vmas, gup_flags | FOLL_TOUCH |
1797 					     FOLL_REMOTE);
1798 	}
1799 
1800 	return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1801 				       locked,
1802 				       gup_flags | FOLL_TOUCH | FOLL_REMOTE);
1803 }
1804 
1805 /**
1806  * get_user_pages_remote() - pin user pages in memory
1807  * @mm:		mm_struct of target mm
1808  * @start:	starting user address
1809  * @nr_pages:	number of pages from start to pin
1810  * @gup_flags:	flags modifying lookup behaviour
1811  * @pages:	array that receives pointers to the pages pinned.
1812  *		Should be at least nr_pages long. Or NULL, if caller
1813  *		only intends to ensure the pages are faulted in.
1814  * @vmas:	array of pointers to vmas corresponding to each page.
1815  *		Or NULL if the caller does not require them.
1816  * @locked:	pointer to lock flag indicating whether lock is held and
1817  *		subsequently whether VM_FAULT_RETRY functionality can be
1818  *		utilised. Lock must initially be held.
1819  *
1820  * Returns either number of pages pinned (which may be less than the
1821  * number requested), or an error. Details about the return value:
1822  *
1823  * -- If nr_pages is 0, returns 0.
1824  * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1825  * -- If nr_pages is >0, and some pages were pinned, returns the number of
1826  *    pages pinned. Again, this may be less than nr_pages.
1827  *
1828  * The caller is responsible for releasing returned @pages, via put_page().
1829  *
1830  * @vmas are valid only as long as mmap_lock is held.
1831  *
1832  * Must be called with mmap_lock held for read or write.
1833  *
1834  * get_user_pages_remote walks a process's page tables and takes a reference
1835  * to each struct page that each user address corresponds to at a given
1836  * instant. That is, it takes the page that would be accessed if a user
1837  * thread accesses the given user virtual address at that instant.
1838  *
1839  * This does not guarantee that the page exists in the user mappings when
1840  * get_user_pages_remote returns, and there may even be a completely different
1841  * page there in some cases (eg. if mmapped pagecache has been invalidated
1842  * and subsequently re faulted). However it does guarantee that the page
1843  * won't be freed completely. And mostly callers simply care that the page
1844  * contains data that was valid *at some point in time*. Typically, an IO
1845  * or similar operation cannot guarantee anything stronger anyway because
1846  * locks can't be held over the syscall boundary.
1847  *
1848  * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
1849  * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
1850  * be called after the page is finished with, and before put_page is called.
1851  *
1852  * get_user_pages_remote is typically used for fewer-copy IO operations,
1853  * to get a handle on the memory by some means other than accesses
1854  * via the user virtual addresses. The pages may be submitted for
1855  * DMA to devices or accessed via their kernel linear mapping (via the
1856  * kmap APIs). Care should be taken to use the correct cache flushing APIs.
1857  *
1858  * See also get_user_pages_fast, for performance critical applications.
1859  *
1860  * get_user_pages_remote should be phased out in favor of
1861  * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
1862  * should use get_user_pages_remote because it cannot pass
1863  * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
1864  */
1865 long get_user_pages_remote(struct mm_struct *mm,
1866 		unsigned long start, unsigned long nr_pages,
1867 		unsigned int gup_flags, struct page **pages,
1868 		struct vm_area_struct **vmas, int *locked)
1869 {
1870 	if (!is_valid_gup_flags(gup_flags))
1871 		return -EINVAL;
1872 
1873 	return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
1874 				       pages, vmas, locked);
1875 }
1876 EXPORT_SYMBOL(get_user_pages_remote);
1877 
1878 #else /* CONFIG_MMU */
1879 long get_user_pages_remote(struct mm_struct *mm,
1880 			   unsigned long start, unsigned long nr_pages,
1881 			   unsigned int gup_flags, struct page **pages,
1882 			   struct vm_area_struct **vmas, int *locked)
1883 {
1884 	return 0;
1885 }
1886 
1887 static long __get_user_pages_remote(struct mm_struct *mm,
1888 				    unsigned long start, unsigned long nr_pages,
1889 				    unsigned int gup_flags, struct page **pages,
1890 				    struct vm_area_struct **vmas, int *locked)
1891 {
1892 	return 0;
1893 }
1894 #endif /* !CONFIG_MMU */
1895 
1896 /**
1897  * get_user_pages() - pin user pages in memory
1898  * @start:      starting user address
1899  * @nr_pages:   number of pages from start to pin
1900  * @gup_flags:  flags modifying lookup behaviour
1901  * @pages:      array that receives pointers to the pages pinned.
1902  *              Should be at least nr_pages long. Or NULL, if caller
1903  *              only intends to ensure the pages are faulted in.
1904  * @vmas:       array of pointers to vmas corresponding to each page.
1905  *              Or NULL if the caller does not require them.
1906  *
1907  * This is the same as get_user_pages_remote(), just with a less-flexible
1908  * calling convention where we assume that the mm being operated on belongs to
1909  * the current task, and doesn't allow passing of a locked parameter.  We also
1910  * obviously don't pass FOLL_REMOTE in here.
1911  */
1912 long get_user_pages(unsigned long start, unsigned long nr_pages,
1913 		unsigned int gup_flags, struct page **pages,
1914 		struct vm_area_struct **vmas)
1915 {
1916 	if (!is_valid_gup_flags(gup_flags))
1917 		return -EINVAL;
1918 
1919 	return __gup_longterm_locked(current->mm, start, nr_pages,
1920 				     pages, vmas, gup_flags | FOLL_TOUCH);
1921 }
1922 EXPORT_SYMBOL(get_user_pages);
1923 
1924 /**
1925  * get_user_pages_locked() - variant of get_user_pages()
1926  *
1927  * @start:      starting user address
1928  * @nr_pages:   number of pages from start to pin
1929  * @gup_flags:  flags modifying lookup behaviour
1930  * @pages:      array that receives pointers to the pages pinned.
1931  *              Should be at least nr_pages long. Or NULL, if caller
1932  *              only intends to ensure the pages are faulted in.
1933  * @locked:     pointer to lock flag indicating whether lock is held and
1934  *              subsequently whether VM_FAULT_RETRY functionality can be
1935  *              utilised. Lock must initially be held.
1936  *
1937  * It is suitable to replace the form:
1938  *
1939  *      mmap_read_lock(mm);
1940  *      do_something()
1941  *      get_user_pages(mm, ..., pages, NULL);
1942  *      mmap_read_unlock(mm);
1943  *
1944  *  to:
1945  *
1946  *      int locked = 1;
1947  *      mmap_read_lock(mm);
1948  *      do_something()
1949  *      get_user_pages_locked(mm, ..., pages, &locked);
1950  *      if (locked)
1951  *          mmap_read_unlock(mm);
1952  *
1953  * We can leverage the VM_FAULT_RETRY functionality in the page fault
1954  * paths better by using either get_user_pages_locked() or
1955  * get_user_pages_unlocked().
1956  *
1957  */
1958 long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1959 			   unsigned int gup_flags, struct page **pages,
1960 			   int *locked)
1961 {
1962 	/*
1963 	 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
1964 	 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
1965 	 * vmas.  As there are no users of this flag in this call we simply
1966 	 * disallow this option for now.
1967 	 */
1968 	if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
1969 		return -EINVAL;
1970 	/*
1971 	 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
1972 	 * never directly by the caller, so enforce that:
1973 	 */
1974 	if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
1975 		return -EINVAL;
1976 
1977 	return __get_user_pages_locked(current->mm, start, nr_pages,
1978 				       pages, NULL, locked,
1979 				       gup_flags | FOLL_TOUCH);
1980 }
1981 EXPORT_SYMBOL(get_user_pages_locked);
1982 
1983 /*
1984  * get_user_pages_unlocked() is suitable to replace the form:
1985  *
1986  *      mmap_read_lock(mm);
1987  *      get_user_pages(mm, ..., pages, NULL);
1988  *      mmap_read_unlock(mm);
1989  *
1990  *  with:
1991  *
1992  *      get_user_pages_unlocked(mm, ..., pages);
1993  *
1994  * It is functionally equivalent to get_user_pages_fast so
1995  * get_user_pages_fast should be used instead if specific gup_flags
1996  * (e.g. FOLL_FORCE) are not required.
1997  */
1998 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1999 			     struct page **pages, unsigned int gup_flags)
2000 {
2001 	struct mm_struct *mm = current->mm;
2002 	int locked = 1;
2003 	long ret;
2004 
2005 	/*
2006 	 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
2007 	 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2008 	 * vmas.  As there are no users of this flag in this call we simply
2009 	 * disallow this option for now.
2010 	 */
2011 	if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2012 		return -EINVAL;
2013 
2014 	mmap_read_lock(mm);
2015 	ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL,
2016 				      &locked, gup_flags | FOLL_TOUCH);
2017 	if (locked)
2018 		mmap_read_unlock(mm);
2019 	return ret;
2020 }
2021 EXPORT_SYMBOL(get_user_pages_unlocked);
2022 
2023 /*
2024  * Fast GUP
2025  *
2026  * get_user_pages_fast attempts to pin user pages by walking the page
2027  * tables directly and avoids taking locks. Thus the walker needs to be
2028  * protected from page table pages being freed from under it, and should
2029  * block any THP splits.
2030  *
2031  * One way to achieve this is to have the walker disable interrupts, and
2032  * rely on IPIs from the TLB flushing code blocking before the page table
2033  * pages are freed. This is unsuitable for architectures that do not need
2034  * to broadcast an IPI when invalidating TLBs.
2035  *
2036  * Another way to achieve this is to batch up page table containing pages
2037  * belonging to more than one mm_user, then rcu_sched a callback to free those
2038  * pages. Disabling interrupts will allow the fast_gup walker to both block
2039  * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
2040  * (which is a relatively rare event). The code below adopts this strategy.
2041  *
2042  * Before activating this code, please be aware that the following assumptions
2043  * are currently made:
2044  *
2045  *  *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
2046  *  free pages containing page tables or TLB flushing requires IPI broadcast.
2047  *
2048  *  *) ptes can be read atomically by the architecture.
2049  *
2050  *  *) access_ok is sufficient to validate userspace address ranges.
2051  *
2052  * The last two assumptions can be relaxed by the addition of helper functions.
2053  *
2054  * This code is based heavily on the PowerPC implementation by Nick Piggin.
2055  */
2056 #ifdef CONFIG_HAVE_FAST_GUP
2057 
2058 static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
2059 					    unsigned int flags,
2060 					    struct page **pages)
2061 {
2062 	while ((*nr) - nr_start) {
2063 		struct page *page = pages[--(*nr)];
2064 
2065 		ClearPageReferenced(page);
2066 		if (flags & FOLL_PIN)
2067 			unpin_user_page(page);
2068 		else
2069 			put_page(page);
2070 	}
2071 }
2072 
2073 #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
2074 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
2075 			 unsigned int flags, struct page **pages, int *nr)
2076 {
2077 	struct dev_pagemap *pgmap = NULL;
2078 	int nr_start = *nr, ret = 0;
2079 	pte_t *ptep, *ptem;
2080 
2081 	ptem = ptep = pte_offset_map(&pmd, addr);
2082 	do {
2083 		pte_t pte = ptep_get_lockless(ptep);
2084 		struct page *head, *page;
2085 
2086 		/*
2087 		 * Similar to the PMD case below, NUMA hinting must take slow
2088 		 * path using the pte_protnone check.
2089 		 */
2090 		if (pte_protnone(pte))
2091 			goto pte_unmap;
2092 
2093 		if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2094 			goto pte_unmap;
2095 
2096 		if (pte_devmap(pte)) {
2097 			if (unlikely(flags & FOLL_LONGTERM))
2098 				goto pte_unmap;
2099 
2100 			pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
2101 			if (unlikely(!pgmap)) {
2102 				undo_dev_pagemap(nr, nr_start, flags, pages);
2103 				goto pte_unmap;
2104 			}
2105 		} else if (pte_special(pte))
2106 			goto pte_unmap;
2107 
2108 		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2109 		page = pte_page(pte);
2110 
2111 		head = try_grab_compound_head(page, 1, flags);
2112 		if (!head)
2113 			goto pte_unmap;
2114 
2115 		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
2116 			put_compound_head(head, 1, flags);
2117 			goto pte_unmap;
2118 		}
2119 
2120 		VM_BUG_ON_PAGE(compound_head(page) != head, page);
2121 
2122 		/*
2123 		 * We need to make the page accessible if and only if we are
2124 		 * going to access its content (the FOLL_PIN case).  Please
2125 		 * see Documentation/core-api/pin_user_pages.rst for
2126 		 * details.
2127 		 */
2128 		if (flags & FOLL_PIN) {
2129 			ret = arch_make_page_accessible(page);
2130 			if (ret) {
2131 				unpin_user_page(page);
2132 				goto pte_unmap;
2133 			}
2134 		}
2135 		SetPageReferenced(page);
2136 		pages[*nr] = page;
2137 		(*nr)++;
2138 
2139 	} while (ptep++, addr += PAGE_SIZE, addr != end);
2140 
2141 	ret = 1;
2142 
2143 pte_unmap:
2144 	if (pgmap)
2145 		put_dev_pagemap(pgmap);
2146 	pte_unmap(ptem);
2147 	return ret;
2148 }
2149 #else
2150 
2151 /*
2152  * If we can't determine whether or not a pte is special, then fail immediately
2153  * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
2154  * to be special.
2155  *
2156  * For a futex to be placed on a THP tail page, get_futex_key requires a
2157  * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2158  * useful to have gup_huge_pmd even if we can't operate on ptes.
2159  */
2160 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
2161 			 unsigned int flags, struct page **pages, int *nr)
2162 {
2163 	return 0;
2164 }
2165 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
2166 
2167 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
2168 static int __gup_device_huge(unsigned long pfn, unsigned long addr,
2169 			     unsigned long end, unsigned int flags,
2170 			     struct page **pages, int *nr)
2171 {
2172 	int nr_start = *nr;
2173 	struct dev_pagemap *pgmap = NULL;
2174 
2175 	do {
2176 		struct page *page = pfn_to_page(pfn);
2177 
2178 		pgmap = get_dev_pagemap(pfn, pgmap);
2179 		if (unlikely(!pgmap)) {
2180 			undo_dev_pagemap(nr, nr_start, flags, pages);
2181 			return 0;
2182 		}
2183 		SetPageReferenced(page);
2184 		pages[*nr] = page;
2185 		if (unlikely(!try_grab_page(page, flags))) {
2186 			undo_dev_pagemap(nr, nr_start, flags, pages);
2187 			return 0;
2188 		}
2189 		(*nr)++;
2190 		pfn++;
2191 	} while (addr += PAGE_SIZE, addr != end);
2192 
2193 	if (pgmap)
2194 		put_dev_pagemap(pgmap);
2195 	return 1;
2196 }
2197 
2198 static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2199 				 unsigned long end, unsigned int flags,
2200 				 struct page **pages, int *nr)
2201 {
2202 	unsigned long fault_pfn;
2203 	int nr_start = *nr;
2204 
2205 	fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
2206 	if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2207 		return 0;
2208 
2209 	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
2210 		undo_dev_pagemap(nr, nr_start, flags, pages);
2211 		return 0;
2212 	}
2213 	return 1;
2214 }
2215 
2216 static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
2217 				 unsigned long end, unsigned int flags,
2218 				 struct page **pages, int *nr)
2219 {
2220 	unsigned long fault_pfn;
2221 	int nr_start = *nr;
2222 
2223 	fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
2224 	if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2225 		return 0;
2226 
2227 	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
2228 		undo_dev_pagemap(nr, nr_start, flags, pages);
2229 		return 0;
2230 	}
2231 	return 1;
2232 }
2233 #else
2234 static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2235 				 unsigned long end, unsigned int flags,
2236 				 struct page **pages, int *nr)
2237 {
2238 	BUILD_BUG();
2239 	return 0;
2240 }
2241 
2242 static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
2243 				 unsigned long end, unsigned int flags,
2244 				 struct page **pages, int *nr)
2245 {
2246 	BUILD_BUG();
2247 	return 0;
2248 }
2249 #endif
2250 
2251 static int record_subpages(struct page *page, unsigned long addr,
2252 			   unsigned long end, struct page **pages)
2253 {
2254 	int nr;
2255 
2256 	for (nr = 0; addr != end; addr += PAGE_SIZE)
2257 		pages[nr++] = page++;
2258 
2259 	return nr;
2260 }
2261 
2262 #ifdef CONFIG_ARCH_HAS_HUGEPD
2263 static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
2264 				      unsigned long sz)
2265 {
2266 	unsigned long __boundary = (addr + sz) & ~(sz-1);
2267 	return (__boundary - 1 < end - 1) ? __boundary : end;
2268 }
2269 
2270 static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
2271 		       unsigned long end, unsigned int flags,
2272 		       struct page **pages, int *nr)
2273 {
2274 	unsigned long pte_end;
2275 	struct page *head, *page;
2276 	pte_t pte;
2277 	int refs;
2278 
2279 	pte_end = (addr + sz) & ~(sz-1);
2280 	if (pte_end < end)
2281 		end = pte_end;
2282 
2283 	pte = huge_ptep_get(ptep);
2284 
2285 	if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2286 		return 0;
2287 
2288 	/* hugepages are never "special" */
2289 	VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2290 
2291 	head = pte_page(pte);
2292 	page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
2293 	refs = record_subpages(page, addr, end, pages + *nr);
2294 
2295 	head = try_grab_compound_head(head, refs, flags);
2296 	if (!head)
2297 		return 0;
2298 
2299 	if (unlikely(pte_val(pte) != pte_val(*ptep))) {
2300 		put_compound_head(head, refs, flags);
2301 		return 0;
2302 	}
2303 
2304 	*nr += refs;
2305 	SetPageReferenced(head);
2306 	return 1;
2307 }
2308 
2309 static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
2310 		unsigned int pdshift, unsigned long end, unsigned int flags,
2311 		struct page **pages, int *nr)
2312 {
2313 	pte_t *ptep;
2314 	unsigned long sz = 1UL << hugepd_shift(hugepd);
2315 	unsigned long next;
2316 
2317 	ptep = hugepte_offset(hugepd, addr, pdshift);
2318 	do {
2319 		next = hugepte_addr_end(addr, end, sz);
2320 		if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
2321 			return 0;
2322 	} while (ptep++, addr = next, addr != end);
2323 
2324 	return 1;
2325 }
2326 #else
2327 static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
2328 		unsigned int pdshift, unsigned long end, unsigned int flags,
2329 		struct page **pages, int *nr)
2330 {
2331 	return 0;
2332 }
2333 #endif /* CONFIG_ARCH_HAS_HUGEPD */
2334 
2335 static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2336 			unsigned long end, unsigned int flags,
2337 			struct page **pages, int *nr)
2338 {
2339 	struct page *head, *page;
2340 	int refs;
2341 
2342 	if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
2343 		return 0;
2344 
2345 	if (pmd_devmap(orig)) {
2346 		if (unlikely(flags & FOLL_LONGTERM))
2347 			return 0;
2348 		return __gup_device_huge_pmd(orig, pmdp, addr, end, flags,
2349 					     pages, nr);
2350 	}
2351 
2352 	page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
2353 	refs = record_subpages(page, addr, end, pages + *nr);
2354 
2355 	head = try_grab_compound_head(pmd_page(orig), refs, flags);
2356 	if (!head)
2357 		return 0;
2358 
2359 	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
2360 		put_compound_head(head, refs, flags);
2361 		return 0;
2362 	}
2363 
2364 	*nr += refs;
2365 	SetPageReferenced(head);
2366 	return 1;
2367 }
2368 
2369 static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
2370 			unsigned long end, unsigned int flags,
2371 			struct page **pages, int *nr)
2372 {
2373 	struct page *head, *page;
2374 	int refs;
2375 
2376 	if (!pud_access_permitted(orig, flags & FOLL_WRITE))
2377 		return 0;
2378 
2379 	if (pud_devmap(orig)) {
2380 		if (unlikely(flags & FOLL_LONGTERM))
2381 			return 0;
2382 		return __gup_device_huge_pud(orig, pudp, addr, end, flags,
2383 					     pages, nr);
2384 	}
2385 
2386 	page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
2387 	refs = record_subpages(page, addr, end, pages + *nr);
2388 
2389 	head = try_grab_compound_head(pud_page(orig), refs, flags);
2390 	if (!head)
2391 		return 0;
2392 
2393 	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
2394 		put_compound_head(head, refs, flags);
2395 		return 0;
2396 	}
2397 
2398 	*nr += refs;
2399 	SetPageReferenced(head);
2400 	return 1;
2401 }
2402 
2403 static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
2404 			unsigned long end, unsigned int flags,
2405 			struct page **pages, int *nr)
2406 {
2407 	int refs;
2408 	struct page *head, *page;
2409 
2410 	if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
2411 		return 0;
2412 
2413 	BUILD_BUG_ON(pgd_devmap(orig));
2414 
2415 	page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
2416 	refs = record_subpages(page, addr, end, pages + *nr);
2417 
2418 	head = try_grab_compound_head(pgd_page(orig), refs, flags);
2419 	if (!head)
2420 		return 0;
2421 
2422 	if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
2423 		put_compound_head(head, refs, flags);
2424 		return 0;
2425 	}
2426 
2427 	*nr += refs;
2428 	SetPageReferenced(head);
2429 	return 1;
2430 }
2431 
2432 static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end,
2433 		unsigned int flags, struct page **pages, int *nr)
2434 {
2435 	unsigned long next;
2436 	pmd_t *pmdp;
2437 
2438 	pmdp = pmd_offset_lockless(pudp, pud, addr);
2439 	do {
2440 		pmd_t pmd = READ_ONCE(*pmdp);
2441 
2442 		next = pmd_addr_end(addr, end);
2443 		if (!pmd_present(pmd))
2444 			return 0;
2445 
2446 		if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
2447 			     pmd_devmap(pmd))) {
2448 			/*
2449 			 * NUMA hinting faults need to be handled in the GUP
2450 			 * slowpath for accounting purposes and so that they
2451 			 * can be serialised against THP migration.
2452 			 */
2453 			if (pmd_protnone(pmd))
2454 				return 0;
2455 
2456 			if (!gup_huge_pmd(pmd, pmdp, addr, next, flags,
2457 				pages, nr))
2458 				return 0;
2459 
2460 		} else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
2461 			/*
2462 			 * architecture have different format for hugetlbfs
2463 			 * pmd format and THP pmd format
2464 			 */
2465 			if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
2466 					 PMD_SHIFT, next, flags, pages, nr))
2467 				return 0;
2468 		} else if (!gup_pte_range(pmd, addr, next, flags, pages, nr))
2469 			return 0;
2470 	} while (pmdp++, addr = next, addr != end);
2471 
2472 	return 1;
2473 }
2474 
2475 static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end,
2476 			 unsigned int flags, struct page **pages, int *nr)
2477 {
2478 	unsigned long next;
2479 	pud_t *pudp;
2480 
2481 	pudp = pud_offset_lockless(p4dp, p4d, addr);
2482 	do {
2483 		pud_t pud = READ_ONCE(*pudp);
2484 
2485 		next = pud_addr_end(addr, end);
2486 		if (unlikely(!pud_present(pud)))
2487 			return 0;
2488 		if (unlikely(pud_huge(pud))) {
2489 			if (!gup_huge_pud(pud, pudp, addr, next, flags,
2490 					  pages, nr))
2491 				return 0;
2492 		} else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
2493 			if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
2494 					 PUD_SHIFT, next, flags, pages, nr))
2495 				return 0;
2496 		} else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr))
2497 			return 0;
2498 	} while (pudp++, addr = next, addr != end);
2499 
2500 	return 1;
2501 }
2502 
2503 static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end,
2504 			 unsigned int flags, struct page **pages, int *nr)
2505 {
2506 	unsigned long next;
2507 	p4d_t *p4dp;
2508 
2509 	p4dp = p4d_offset_lockless(pgdp, pgd, addr);
2510 	do {
2511 		p4d_t p4d = READ_ONCE(*p4dp);
2512 
2513 		next = p4d_addr_end(addr, end);
2514 		if (p4d_none(p4d))
2515 			return 0;
2516 		BUILD_BUG_ON(p4d_huge(p4d));
2517 		if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
2518 			if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
2519 					 P4D_SHIFT, next, flags, pages, nr))
2520 				return 0;
2521 		} else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr))
2522 			return 0;
2523 	} while (p4dp++, addr = next, addr != end);
2524 
2525 	return 1;
2526 }
2527 
2528 static void gup_pgd_range(unsigned long addr, unsigned long end,
2529 		unsigned int flags, struct page **pages, int *nr)
2530 {
2531 	unsigned long next;
2532 	pgd_t *pgdp;
2533 
2534 	pgdp = pgd_offset(current->mm, addr);
2535 	do {
2536 		pgd_t pgd = READ_ONCE(*pgdp);
2537 
2538 		next = pgd_addr_end(addr, end);
2539 		if (pgd_none(pgd))
2540 			return;
2541 		if (unlikely(pgd_huge(pgd))) {
2542 			if (!gup_huge_pgd(pgd, pgdp, addr, next, flags,
2543 					  pages, nr))
2544 				return;
2545 		} else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
2546 			if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
2547 					 PGDIR_SHIFT, next, flags, pages, nr))
2548 				return;
2549 		} else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr))
2550 			return;
2551 	} while (pgdp++, addr = next, addr != end);
2552 }
2553 #else
2554 static inline void gup_pgd_range(unsigned long addr, unsigned long end,
2555 		unsigned int flags, struct page **pages, int *nr)
2556 {
2557 }
2558 #endif /* CONFIG_HAVE_FAST_GUP */
2559 
2560 #ifndef gup_fast_permitted
2561 /*
2562  * Check if it's allowed to use get_user_pages_fast_only() for the range, or
2563  * we need to fall back to the slow version:
2564  */
2565 static bool gup_fast_permitted(unsigned long start, unsigned long end)
2566 {
2567 	return true;
2568 }
2569 #endif
2570 
2571 static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
2572 				   unsigned int gup_flags, struct page **pages)
2573 {
2574 	int ret;
2575 
2576 	/*
2577 	 * FIXME: FOLL_LONGTERM does not work with
2578 	 * get_user_pages_unlocked() (see comments in that function)
2579 	 */
2580 	if (gup_flags & FOLL_LONGTERM) {
2581 		mmap_read_lock(current->mm);
2582 		ret = __gup_longterm_locked(current->mm,
2583 					    start, nr_pages,
2584 					    pages, NULL, gup_flags);
2585 		mmap_read_unlock(current->mm);
2586 	} else {
2587 		ret = get_user_pages_unlocked(start, nr_pages,
2588 					      pages, gup_flags);
2589 	}
2590 
2591 	return ret;
2592 }
2593 
2594 static unsigned long lockless_pages_from_mm(unsigned long start,
2595 					    unsigned long end,
2596 					    unsigned int gup_flags,
2597 					    struct page **pages)
2598 {
2599 	unsigned long flags;
2600 	int nr_pinned = 0;
2601 	unsigned seq;
2602 
2603 	if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) ||
2604 	    !gup_fast_permitted(start, end))
2605 		return 0;
2606 
2607 	if (gup_flags & FOLL_PIN) {
2608 		seq = raw_read_seqcount(&current->mm->write_protect_seq);
2609 		if (seq & 1)
2610 			return 0;
2611 	}
2612 
2613 	/*
2614 	 * Disable interrupts. The nested form is used, in order to allow full,
2615 	 * general purpose use of this routine.
2616 	 *
2617 	 * With interrupts disabled, we block page table pages from being freed
2618 	 * from under us. See struct mmu_table_batch comments in
2619 	 * include/asm-generic/tlb.h for more details.
2620 	 *
2621 	 * We do not adopt an rcu_read_lock() here as we also want to block IPIs
2622 	 * that come from THPs splitting.
2623 	 */
2624 	local_irq_save(flags);
2625 	gup_pgd_range(start, end, gup_flags, pages, &nr_pinned);
2626 	local_irq_restore(flags);
2627 
2628 	/*
2629 	 * When pinning pages for DMA there could be a concurrent write protect
2630 	 * from fork() via copy_page_range(), in this case always fail fast GUP.
2631 	 */
2632 	if (gup_flags & FOLL_PIN) {
2633 		if (read_seqcount_retry(&current->mm->write_protect_seq, seq)) {
2634 			unpin_user_pages(pages, nr_pinned);
2635 			return 0;
2636 		}
2637 	}
2638 	return nr_pinned;
2639 }
2640 
2641 static int internal_get_user_pages_fast(unsigned long start,
2642 					unsigned long nr_pages,
2643 					unsigned int gup_flags,
2644 					struct page **pages)
2645 {
2646 	unsigned long len, end;
2647 	unsigned long nr_pinned;
2648 	int ret;
2649 
2650 	if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
2651 				       FOLL_FORCE | FOLL_PIN | FOLL_GET |
2652 				       FOLL_FAST_ONLY)))
2653 		return -EINVAL;
2654 
2655 	if (gup_flags & FOLL_PIN)
2656 		mm_set_has_pinned_flag(&current->mm->flags);
2657 
2658 	if (!(gup_flags & FOLL_FAST_ONLY))
2659 		might_lock_read(&current->mm->mmap_lock);
2660 
2661 	start = untagged_addr(start) & PAGE_MASK;
2662 	len = nr_pages << PAGE_SHIFT;
2663 	if (check_add_overflow(start, len, &end))
2664 		return 0;
2665 	if (unlikely(!access_ok((void __user *)start, len)))
2666 		return -EFAULT;
2667 
2668 	nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages);
2669 	if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY)
2670 		return nr_pinned;
2671 
2672 	/* Slow path: try to get the remaining pages with get_user_pages */
2673 	start += nr_pinned << PAGE_SHIFT;
2674 	pages += nr_pinned;
2675 	ret = __gup_longterm_unlocked(start, nr_pages - nr_pinned, gup_flags,
2676 				      pages);
2677 	if (ret < 0) {
2678 		/*
2679 		 * The caller has to unpin the pages we already pinned so
2680 		 * returning -errno is not an option
2681 		 */
2682 		if (nr_pinned)
2683 			return nr_pinned;
2684 		return ret;
2685 	}
2686 	return ret + nr_pinned;
2687 }
2688 
2689 /**
2690  * get_user_pages_fast_only() - pin user pages in memory
2691  * @start:      starting user address
2692  * @nr_pages:   number of pages from start to pin
2693  * @gup_flags:  flags modifying pin behaviour
2694  * @pages:      array that receives pointers to the pages pinned.
2695  *              Should be at least nr_pages long.
2696  *
2697  * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
2698  * the regular GUP.
2699  * Note a difference with get_user_pages_fast: this always returns the
2700  * number of pages pinned, 0 if no pages were pinned.
2701  *
2702  * If the architecture does not support this function, simply return with no
2703  * pages pinned.
2704  *
2705  * Careful, careful! COW breaking can go either way, so a non-write
2706  * access can get ambiguous page results. If you call this function without
2707  * 'write' set, you'd better be sure that you're ok with that ambiguity.
2708  */
2709 int get_user_pages_fast_only(unsigned long start, int nr_pages,
2710 			     unsigned int gup_flags, struct page **pages)
2711 {
2712 	int nr_pinned;
2713 	/*
2714 	 * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
2715 	 * because gup fast is always a "pin with a +1 page refcount" request.
2716 	 *
2717 	 * FOLL_FAST_ONLY is required in order to match the API description of
2718 	 * this routine: no fall back to regular ("slow") GUP.
2719 	 */
2720 	gup_flags |= FOLL_GET | FOLL_FAST_ONLY;
2721 
2722 	nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
2723 						 pages);
2724 
2725 	/*
2726 	 * As specified in the API description above, this routine is not
2727 	 * allowed to return negative values. However, the common core
2728 	 * routine internal_get_user_pages_fast() *can* return -errno.
2729 	 * Therefore, correct for that here:
2730 	 */
2731 	if (nr_pinned < 0)
2732 		nr_pinned = 0;
2733 
2734 	return nr_pinned;
2735 }
2736 EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
2737 
2738 /**
2739  * get_user_pages_fast() - pin user pages in memory
2740  * @start:      starting user address
2741  * @nr_pages:   number of pages from start to pin
2742  * @gup_flags:  flags modifying pin behaviour
2743  * @pages:      array that receives pointers to the pages pinned.
2744  *              Should be at least nr_pages long.
2745  *
2746  * Attempt to pin user pages in memory without taking mm->mmap_lock.
2747  * If not successful, it will fall back to taking the lock and
2748  * calling get_user_pages().
2749  *
2750  * Returns number of pages pinned. This may be fewer than the number requested.
2751  * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
2752  * -errno.
2753  */
2754 int get_user_pages_fast(unsigned long start, int nr_pages,
2755 			unsigned int gup_flags, struct page **pages)
2756 {
2757 	if (!is_valid_gup_flags(gup_flags))
2758 		return -EINVAL;
2759 
2760 	/*
2761 	 * The caller may or may not have explicitly set FOLL_GET; either way is
2762 	 * OK. However, internally (within mm/gup.c), gup fast variants must set
2763 	 * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
2764 	 * request.
2765 	 */
2766 	gup_flags |= FOLL_GET;
2767 	return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
2768 }
2769 EXPORT_SYMBOL_GPL(get_user_pages_fast);
2770 
2771 /**
2772  * pin_user_pages_fast() - pin user pages in memory without taking locks
2773  *
2774  * @start:      starting user address
2775  * @nr_pages:   number of pages from start to pin
2776  * @gup_flags:  flags modifying pin behaviour
2777  * @pages:      array that receives pointers to the pages pinned.
2778  *              Should be at least nr_pages long.
2779  *
2780  * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
2781  * get_user_pages_fast() for documentation on the function arguments, because
2782  * the arguments here are identical.
2783  *
2784  * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2785  * see Documentation/core-api/pin_user_pages.rst for further details.
2786  */
2787 int pin_user_pages_fast(unsigned long start, int nr_pages,
2788 			unsigned int gup_flags, struct page **pages)
2789 {
2790 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
2791 	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2792 		return -EINVAL;
2793 
2794 	gup_flags |= FOLL_PIN;
2795 	return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
2796 }
2797 EXPORT_SYMBOL_GPL(pin_user_pages_fast);
2798 
2799 /*
2800  * This is the FOLL_PIN equivalent of get_user_pages_fast_only(). Behavior
2801  * is the same, except that this one sets FOLL_PIN instead of FOLL_GET.
2802  *
2803  * The API rules are the same, too: no negative values may be returned.
2804  */
2805 int pin_user_pages_fast_only(unsigned long start, int nr_pages,
2806 			     unsigned int gup_flags, struct page **pages)
2807 {
2808 	int nr_pinned;
2809 
2810 	/*
2811 	 * FOLL_GET and FOLL_PIN are mutually exclusive. Note that the API
2812 	 * rules require returning 0, rather than -errno:
2813 	 */
2814 	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2815 		return 0;
2816 	/*
2817 	 * FOLL_FAST_ONLY is required in order to match the API description of
2818 	 * this routine: no fall back to regular ("slow") GUP.
2819 	 */
2820 	gup_flags |= (FOLL_PIN | FOLL_FAST_ONLY);
2821 	nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
2822 						 pages);
2823 	/*
2824 	 * This routine is not allowed to return negative values. However,
2825 	 * internal_get_user_pages_fast() *can* return -errno. Therefore,
2826 	 * correct for that here:
2827 	 */
2828 	if (nr_pinned < 0)
2829 		nr_pinned = 0;
2830 
2831 	return nr_pinned;
2832 }
2833 EXPORT_SYMBOL_GPL(pin_user_pages_fast_only);
2834 
2835 /**
2836  * pin_user_pages_remote() - pin pages of a remote process
2837  *
2838  * @mm:		mm_struct of target mm
2839  * @start:	starting user address
2840  * @nr_pages:	number of pages from start to pin
2841  * @gup_flags:	flags modifying lookup behaviour
2842  * @pages:	array that receives pointers to the pages pinned.
2843  *		Should be at least nr_pages long. Or NULL, if caller
2844  *		only intends to ensure the pages are faulted in.
2845  * @vmas:	array of pointers to vmas corresponding to each page.
2846  *		Or NULL if the caller does not require them.
2847  * @locked:	pointer to lock flag indicating whether lock is held and
2848  *		subsequently whether VM_FAULT_RETRY functionality can be
2849  *		utilised. Lock must initially be held.
2850  *
2851  * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
2852  * get_user_pages_remote() for documentation on the function arguments, because
2853  * the arguments here are identical.
2854  *
2855  * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2856  * see Documentation/core-api/pin_user_pages.rst for details.
2857  */
2858 long pin_user_pages_remote(struct mm_struct *mm,
2859 			   unsigned long start, unsigned long nr_pages,
2860 			   unsigned int gup_flags, struct page **pages,
2861 			   struct vm_area_struct **vmas, int *locked)
2862 {
2863 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
2864 	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2865 		return -EINVAL;
2866 
2867 	gup_flags |= FOLL_PIN;
2868 	return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
2869 				       pages, vmas, locked);
2870 }
2871 EXPORT_SYMBOL(pin_user_pages_remote);
2872 
2873 /**
2874  * pin_user_pages() - pin user pages in memory for use by other devices
2875  *
2876  * @start:	starting user address
2877  * @nr_pages:	number of pages from start to pin
2878  * @gup_flags:	flags modifying lookup behaviour
2879  * @pages:	array that receives pointers to the pages pinned.
2880  *		Should be at least nr_pages long. Or NULL, if caller
2881  *		only intends to ensure the pages are faulted in.
2882  * @vmas:	array of pointers to vmas corresponding to each page.
2883  *		Or NULL if the caller does not require them.
2884  *
2885  * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
2886  * FOLL_PIN is set.
2887  *
2888  * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2889  * see Documentation/core-api/pin_user_pages.rst for details.
2890  */
2891 long pin_user_pages(unsigned long start, unsigned long nr_pages,
2892 		    unsigned int gup_flags, struct page **pages,
2893 		    struct vm_area_struct **vmas)
2894 {
2895 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
2896 	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2897 		return -EINVAL;
2898 
2899 	gup_flags |= FOLL_PIN;
2900 	return __gup_longterm_locked(current->mm, start, nr_pages,
2901 				     pages, vmas, gup_flags);
2902 }
2903 EXPORT_SYMBOL(pin_user_pages);
2904 
2905 /*
2906  * pin_user_pages_unlocked() is the FOLL_PIN variant of
2907  * get_user_pages_unlocked(). Behavior is the same, except that this one sets
2908  * FOLL_PIN and rejects FOLL_GET.
2909  */
2910 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2911 			     struct page **pages, unsigned int gup_flags)
2912 {
2913 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
2914 	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2915 		return -EINVAL;
2916 
2917 	gup_flags |= FOLL_PIN;
2918 	return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
2919 }
2920 EXPORT_SYMBOL(pin_user_pages_unlocked);
2921 
2922 /*
2923  * pin_user_pages_locked() is the FOLL_PIN variant of get_user_pages_locked().
2924  * Behavior is the same, except that this one sets FOLL_PIN and rejects
2925  * FOLL_GET.
2926  */
2927 long pin_user_pages_locked(unsigned long start, unsigned long nr_pages,
2928 			   unsigned int gup_flags, struct page **pages,
2929 			   int *locked)
2930 {
2931 	/*
2932 	 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
2933 	 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2934 	 * vmas.  As there are no users of this flag in this call we simply
2935 	 * disallow this option for now.
2936 	 */
2937 	if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2938 		return -EINVAL;
2939 
2940 	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
2941 	if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2942 		return -EINVAL;
2943 
2944 	gup_flags |= FOLL_PIN;
2945 	return __get_user_pages_locked(current->mm, start, nr_pages,
2946 				       pages, NULL, locked,
2947 				       gup_flags | FOLL_TOUCH);
2948 }
2949 EXPORT_SYMBOL(pin_user_pages_locked);
2950